Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.553
      1 /*	$NetBSD: if_wm.c,v 1.553 2018/01/15 04:09:58 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.553 2018/01/15 04:09:58 knakahara Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 #include <dev/mii/ihphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 #ifndef WM_DISABLE_MSI
    177 #define	WM_DISABLE_MSI 0
    178 #endif
    179 #ifndef WM_DISABLE_MSIX
    180 #define	WM_DISABLE_MSIX 0
    181 #endif
    182 
    183 int wm_disable_msi = WM_DISABLE_MSI;
    184 int wm_disable_msix = WM_DISABLE_MSIX;
    185 
    186 /*
    187  * Transmit descriptor list size.  Due to errata, we can only have
    188  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    189  * on >= 82544.  We tell the upper layers that they can queue a lot
    190  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    191  * of them at a time.
    192  *
    193  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    194  * chains containing many small mbufs have been observed in zero-copy
    195  * situations with jumbo frames.
    196  */
    197 #define	WM_NTXSEGS		256
    198 #define	WM_IFQUEUELEN		256
    199 #define	WM_TXQUEUELEN_MAX	64
    200 #define	WM_TXQUEUELEN_MAX_82547	16
    201 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    202 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    203 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    204 #define	WM_NTXDESC_82542	256
    205 #define	WM_NTXDESC_82544	4096
    206 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    207 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    208 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    209 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    210 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    211 
    212 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    213 
    214 #define	WM_TXINTERQSIZE		256
    215 
    216 /*
    217  * Receive descriptor list size.  We have one Rx buffer for normal
    218  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    219  * packet.  We allocate 256 receive descriptors, each with a 2k
    220  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    221  */
    222 #define	WM_NRXDESC		256
    223 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    224 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    225 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    226 
    227 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    228 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    229 #endif
    230 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    231 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    232 #endif
    233 
    234 typedef union txdescs {
    235 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    236 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    237 } txdescs_t;
    238 
    239 typedef union rxdescs {
    240 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    241 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    242 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    243 } rxdescs_t;
    244 
    245 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    246 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    247 
    248 /*
    249  * Software state for transmit jobs.
    250  */
    251 struct wm_txsoft {
    252 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    253 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    254 	int txs_firstdesc;		/* first descriptor in packet */
    255 	int txs_lastdesc;		/* last descriptor in packet */
    256 	int txs_ndesc;			/* # of descriptors used */
    257 };
    258 
    259 /*
    260  * Software state for receive buffers.  Each descriptor gets a
    261  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    262  * more than one buffer, we chain them together.
    263  */
    264 struct wm_rxsoft {
    265 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    266 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    267 };
    268 
    269 #define WM_LINKUP_TIMEOUT	50
    270 
    271 static uint16_t swfwphysem[] = {
    272 	SWFW_PHY0_SM,
    273 	SWFW_PHY1_SM,
    274 	SWFW_PHY2_SM,
    275 	SWFW_PHY3_SM
    276 };
    277 
    278 static const uint32_t wm_82580_rxpbs_table[] = {
    279 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    280 };
    281 
    282 struct wm_softc;
    283 
    284 #ifdef WM_EVENT_COUNTERS
    285 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    286 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    287 	struct evcnt qname##_ev_##evname;
    288 
    289 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    290 	do{								\
    291 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    292 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    293 		    "%s%02d%s", #qname, (qnum), #evname);		\
    294 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    295 		    (evtype), NULL, (xname),				\
    296 		    (q)->qname##_##evname##_evcnt_name);		\
    297 	}while(0)
    298 
    299 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    300 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    301 
    302 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    303 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    304 
    305 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    306 	evcnt_detach(&(q)->qname##_ev_##evname);
    307 #endif /* WM_EVENT_COUNTERS */
    308 
    309 struct wm_txqueue {
    310 	kmutex_t *txq_lock;		/* lock for tx operations */
    311 
    312 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    313 
    314 	/* Software state for the transmit descriptors. */
    315 	int txq_num;			/* must be a power of two */
    316 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    317 
    318 	/* TX control data structures. */
    319 	int txq_ndesc;			/* must be a power of two */
    320 	size_t txq_descsize;		/* a tx descriptor size */
    321 	txdescs_t *txq_descs_u;
    322         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    323 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    324 	int txq_desc_rseg;		/* real number of control segment */
    325 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    326 #define	txq_descs	txq_descs_u->sctxu_txdescs
    327 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    328 
    329 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    330 
    331 	int txq_free;			/* number of free Tx descriptors */
    332 	int txq_next;			/* next ready Tx descriptor */
    333 
    334 	int txq_sfree;			/* number of free Tx jobs */
    335 	int txq_snext;			/* next free Tx job */
    336 	int txq_sdirty;			/* dirty Tx jobs */
    337 
    338 	/* These 4 variables are used only on the 82547. */
    339 	int txq_fifo_size;		/* Tx FIFO size */
    340 	int txq_fifo_head;		/* current head of FIFO */
    341 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    342 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    343 
    344 	/*
    345 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    346 	 * CPUs. This queue intermediate them without block.
    347 	 */
    348 	pcq_t *txq_interq;
    349 
    350 	/*
    351 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    352 	 * to manage Tx H/W queue's busy flag.
    353 	 */
    354 	int txq_flags;			/* flags for H/W queue, see below */
    355 #define	WM_TXQ_NO_SPACE	0x1
    356 
    357 	bool txq_stopping;
    358 
    359 	uint32_t txq_packets;		/* for AIM */
    360 	uint32_t txq_bytes;		/* for AIM */
    361 #ifdef WM_EVENT_COUNTERS
    362 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    363 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    364 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    365 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    366 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    367 						/* XXX not used? */
    368 
    369 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    370 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    371 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    372 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    373 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    374 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    375 
    376 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    377 
    378 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    379 
    380 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    381 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    382 #endif /* WM_EVENT_COUNTERS */
    383 };
    384 
    385 struct wm_rxqueue {
    386 	kmutex_t *rxq_lock;		/* lock for rx operations */
    387 
    388 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    389 
    390 	/* Software state for the receive descriptors. */
    391 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    392 
    393 	/* RX control data structures. */
    394 	int rxq_ndesc;			/* must be a power of two */
    395 	size_t rxq_descsize;		/* a rx descriptor size */
    396 	rxdescs_t *rxq_descs_u;
    397 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    398 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    399 	int rxq_desc_rseg;		/* real number of control segment */
    400 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    401 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    402 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    403 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    404 
    405 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    406 
    407 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    408 	int rxq_discard;
    409 	int rxq_len;
    410 	struct mbuf *rxq_head;
    411 	struct mbuf *rxq_tail;
    412 	struct mbuf **rxq_tailp;
    413 
    414 	bool rxq_stopping;
    415 
    416 	uint32_t rxq_packets;		/* for AIM */
    417 	uint32_t rxq_bytes;		/* for AIM */
    418 #ifdef WM_EVENT_COUNTERS
    419 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    420 
    421 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    422 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    423 #endif
    424 };
    425 
    426 struct wm_queue {
    427 	int wmq_id;			/* index of transmit and receive queues */
    428 	int wmq_intr_idx;		/* index of MSI-X tables */
    429 
    430 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    431 	bool wmq_set_itr;
    432 
    433 	struct wm_txqueue wmq_txq;
    434 	struct wm_rxqueue wmq_rxq;
    435 
    436 	void *wmq_si;
    437 };
    438 
    439 struct wm_phyop {
    440 	int (*acquire)(struct wm_softc *);
    441 	void (*release)(struct wm_softc *);
    442 	int reset_delay_us;
    443 };
    444 
    445 struct wm_nvmop {
    446 	int (*acquire)(struct wm_softc *);
    447 	void (*release)(struct wm_softc *);
    448 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    449 };
    450 
    451 /*
    452  * Software state per device.
    453  */
    454 struct wm_softc {
    455 	device_t sc_dev;		/* generic device information */
    456 	bus_space_tag_t sc_st;		/* bus space tag */
    457 	bus_space_handle_t sc_sh;	/* bus space handle */
    458 	bus_size_t sc_ss;		/* bus space size */
    459 	bus_space_tag_t sc_iot;		/* I/O space tag */
    460 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    461 	bus_size_t sc_ios;		/* I/O space size */
    462 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    463 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    464 	bus_size_t sc_flashs;		/* flash registers space size */
    465 	off_t sc_flashreg_offset;	/*
    466 					 * offset to flash registers from
    467 					 * start of BAR
    468 					 */
    469 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    470 
    471 	struct ethercom sc_ethercom;	/* ethernet common data */
    472 	struct mii_data sc_mii;		/* MII/media information */
    473 
    474 	pci_chipset_tag_t sc_pc;
    475 	pcitag_t sc_pcitag;
    476 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    477 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    478 
    479 	uint16_t sc_pcidevid;		/* PCI device ID */
    480 	wm_chip_type sc_type;		/* MAC type */
    481 	int sc_rev;			/* MAC revision */
    482 	wm_phy_type sc_phytype;		/* PHY type */
    483 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    484 #define	WM_MEDIATYPE_UNKNOWN		0x00
    485 #define	WM_MEDIATYPE_FIBER		0x01
    486 #define	WM_MEDIATYPE_COPPER		0x02
    487 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    488 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    489 	int sc_flags;			/* flags; see below */
    490 	int sc_if_flags;		/* last if_flags */
    491 	int sc_flowflags;		/* 802.3x flow control flags */
    492 	int sc_align_tweak;
    493 
    494 	void *sc_ihs[WM_MAX_NINTR];	/*
    495 					 * interrupt cookie.
    496 					 * - legacy and msi use sc_ihs[0] only
    497 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    498 					 */
    499 	pci_intr_handle_t *sc_intrs;	/*
    500 					 * legacy and msi use sc_intrs[0] only
    501 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    502 					 */
    503 	int sc_nintrs;			/* number of interrupts */
    504 
    505 	int sc_link_intr_idx;		/* index of MSI-X tables */
    506 
    507 	callout_t sc_tick_ch;		/* tick callout */
    508 	bool sc_core_stopping;
    509 
    510 	int sc_nvm_ver_major;
    511 	int sc_nvm_ver_minor;
    512 	int sc_nvm_ver_build;
    513 	int sc_nvm_addrbits;		/* NVM address bits */
    514 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    515 	int sc_ich8_flash_base;
    516 	int sc_ich8_flash_bank_size;
    517 	int sc_nvm_k1_enabled;
    518 
    519 	int sc_nqueues;
    520 	struct wm_queue *sc_queue;
    521 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    522 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    523 
    524 	int sc_affinity_offset;
    525 
    526 #ifdef WM_EVENT_COUNTERS
    527 	/* Event counters. */
    528 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    529 
    530         /* WM_T_82542_2_1 only */
    531 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    532 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    533 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    534 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    535 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    536 #endif /* WM_EVENT_COUNTERS */
    537 
    538 	/* This variable are used only on the 82547. */
    539 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    540 
    541 	uint32_t sc_ctrl;		/* prototype CTRL register */
    542 #if 0
    543 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    544 #endif
    545 	uint32_t sc_icr;		/* prototype interrupt bits */
    546 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    547 	uint32_t sc_tctl;		/* prototype TCTL register */
    548 	uint32_t sc_rctl;		/* prototype RCTL register */
    549 	uint32_t sc_txcw;		/* prototype TXCW register */
    550 	uint32_t sc_tipg;		/* prototype TIPG register */
    551 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    552 	uint32_t sc_pba;		/* prototype PBA register */
    553 
    554 	int sc_tbi_linkup;		/* TBI link status */
    555 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    556 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    557 
    558 	int sc_mchash_type;		/* multicast filter offset */
    559 
    560 	krndsource_t rnd_source;	/* random source */
    561 
    562 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    563 
    564 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    565 	kmutex_t *sc_ich_phymtx;	/*
    566 					 * 82574/82583/ICH/PCH specific PHY
    567 					 * mutex. For 82574/82583, the mutex
    568 					 * is used for both PHY and NVM.
    569 					 */
    570 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    571 
    572 	struct wm_phyop phy;
    573 	struct wm_nvmop nvm;
    574 };
    575 
    576 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    577 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    578 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    579 
    580 #define	WM_RXCHAIN_RESET(rxq)						\
    581 do {									\
    582 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    583 	*(rxq)->rxq_tailp = NULL;					\
    584 	(rxq)->rxq_len = 0;						\
    585 } while (/*CONSTCOND*/0)
    586 
    587 #define	WM_RXCHAIN_LINK(rxq, m)						\
    588 do {									\
    589 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    590 	(rxq)->rxq_tailp = &(m)->m_next;				\
    591 } while (/*CONSTCOND*/0)
    592 
    593 #ifdef WM_EVENT_COUNTERS
    594 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    595 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    596 
    597 #define WM_Q_EVCNT_INCR(qname, evname)			\
    598 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    599 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    600 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    601 #else /* !WM_EVENT_COUNTERS */
    602 #define	WM_EVCNT_INCR(ev)	/* nothing */
    603 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    604 
    605 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    606 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    607 #endif /* !WM_EVENT_COUNTERS */
    608 
    609 #define	CSR_READ(sc, reg)						\
    610 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    611 #define	CSR_WRITE(sc, reg, val)						\
    612 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    613 #define	CSR_WRITE_FLUSH(sc)						\
    614 	(void) CSR_READ((sc), WMREG_STATUS)
    615 
    616 #define ICH8_FLASH_READ32(sc, reg)					\
    617 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    618 	    (reg) + sc->sc_flashreg_offset)
    619 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    620 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    621 	    (reg) + sc->sc_flashreg_offset, (data))
    622 
    623 #define ICH8_FLASH_READ16(sc, reg)					\
    624 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    625 	    (reg) + sc->sc_flashreg_offset)
    626 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    627 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    628 	    (reg) + sc->sc_flashreg_offset, (data))
    629 
    630 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    631 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    632 
    633 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    634 #define	WM_CDTXADDR_HI(txq, x)						\
    635 	(sizeof(bus_addr_t) == 8 ?					\
    636 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    637 
    638 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    639 #define	WM_CDRXADDR_HI(rxq, x)						\
    640 	(sizeof(bus_addr_t) == 8 ?					\
    641 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    642 
    643 /*
    644  * Register read/write functions.
    645  * Other than CSR_{READ|WRITE}().
    646  */
    647 #if 0
    648 static inline uint32_t wm_io_read(struct wm_softc *, int);
    649 #endif
    650 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    651 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    652 	uint32_t, uint32_t);
    653 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    654 
    655 /*
    656  * Descriptor sync/init functions.
    657  */
    658 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    659 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    660 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    661 
    662 /*
    663  * Device driver interface functions and commonly used functions.
    664  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    665  */
    666 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    667 static int	wm_match(device_t, cfdata_t, void *);
    668 static void	wm_attach(device_t, device_t, void *);
    669 static int	wm_detach(device_t, int);
    670 static bool	wm_suspend(device_t, const pmf_qual_t *);
    671 static bool	wm_resume(device_t, const pmf_qual_t *);
    672 static void	wm_watchdog(struct ifnet *);
    673 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    674 static void	wm_tick(void *);
    675 static int	wm_ifflags_cb(struct ethercom *);
    676 static int	wm_ioctl(struct ifnet *, u_long, void *);
    677 /* MAC address related */
    678 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    679 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    680 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    681 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    682 static void	wm_set_filter(struct wm_softc *);
    683 /* Reset and init related */
    684 static void	wm_set_vlan(struct wm_softc *);
    685 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    686 static void	wm_get_auto_rd_done(struct wm_softc *);
    687 static void	wm_lan_init_done(struct wm_softc *);
    688 static void	wm_get_cfg_done(struct wm_softc *);
    689 static void	wm_phy_post_reset(struct wm_softc *);
    690 static void	wm_write_smbus_addr(struct wm_softc *);
    691 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    692 static void	wm_initialize_hardware_bits(struct wm_softc *);
    693 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    694 static void	wm_reset_phy(struct wm_softc *);
    695 static void	wm_flush_desc_rings(struct wm_softc *);
    696 static void	wm_reset(struct wm_softc *);
    697 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    698 static void	wm_rxdrain(struct wm_rxqueue *);
    699 static void	wm_rss_getkey(uint8_t *);
    700 static void	wm_init_rss(struct wm_softc *);
    701 static void	wm_adjust_qnum(struct wm_softc *, int);
    702 static inline bool	wm_is_using_msix(struct wm_softc *);
    703 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    704 static int	wm_softint_establish(struct wm_softc *, int, int);
    705 static int	wm_setup_legacy(struct wm_softc *);
    706 static int	wm_setup_msix(struct wm_softc *);
    707 static int	wm_init(struct ifnet *);
    708 static int	wm_init_locked(struct ifnet *);
    709 static void	wm_unset_stopping_flags(struct wm_softc *);
    710 static void	wm_set_stopping_flags(struct wm_softc *);
    711 static void	wm_stop(struct ifnet *, int);
    712 static void	wm_stop_locked(struct ifnet *, int);
    713 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    714 static void	wm_82547_txfifo_stall(void *);
    715 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    716 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    717 /* DMA related */
    718 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    719 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    720 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    721 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    722     struct wm_txqueue *);
    723 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    724 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    725 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    726     struct wm_rxqueue *);
    727 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    728 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    729 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    730 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    731 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    732 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    733 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    734     struct wm_txqueue *);
    735 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    736     struct wm_rxqueue *);
    737 static int	wm_alloc_txrx_queues(struct wm_softc *);
    738 static void	wm_free_txrx_queues(struct wm_softc *);
    739 static int	wm_init_txrx_queues(struct wm_softc *);
    740 /* Start */
    741 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    742     struct wm_txsoft *, uint32_t *, uint8_t *);
    743 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    744 static void	wm_start(struct ifnet *);
    745 static void	wm_start_locked(struct ifnet *);
    746 static int	wm_transmit(struct ifnet *, struct mbuf *);
    747 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    748 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    749 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    750     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    751 static void	wm_nq_start(struct ifnet *);
    752 static void	wm_nq_start_locked(struct ifnet *);
    753 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    754 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    755 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    756 static void	wm_deferred_start_locked(struct wm_txqueue *);
    757 static void	wm_handle_queue(void *);
    758 /* Interrupt */
    759 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    760 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    761 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    762 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    763 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    764 static void	wm_linkintr(struct wm_softc *, uint32_t);
    765 static int	wm_intr_legacy(void *);
    766 static inline void	wm_txrxintr_disable(struct wm_queue *);
    767 static inline void	wm_txrxintr_enable(struct wm_queue *);
    768 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    769 static int	wm_txrxintr_msix(void *);
    770 static int	wm_linkintr_msix(void *);
    771 
    772 /*
    773  * Media related.
    774  * GMII, SGMII, TBI, SERDES and SFP.
    775  */
    776 /* Common */
    777 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    778 /* GMII related */
    779 static void	wm_gmii_reset(struct wm_softc *);
    780 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    781 static int	wm_get_phy_id_82575(struct wm_softc *);
    782 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    783 static int	wm_gmii_mediachange(struct ifnet *);
    784 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    785 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    786 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    787 static int	wm_gmii_i82543_readreg(device_t, int, int);
    788 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    789 static int	wm_gmii_mdic_readreg(device_t, int, int);
    790 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    791 static int	wm_gmii_i82544_readreg(device_t, int, int);
    792 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    793 static int	wm_gmii_i80003_readreg(device_t, int, int);
    794 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    795 static int	wm_gmii_bm_readreg(device_t, int, int);
    796 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    797 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    798 static int	wm_gmii_hv_readreg(device_t, int, int);
    799 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    800 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    801 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    802 static int	wm_gmii_82580_readreg(device_t, int, int);
    803 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    804 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    805 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    806 static void	wm_gmii_statchg(struct ifnet *);
    807 /*
    808  * kumeran related (80003, ICH* and PCH*).
    809  * These functions are not for accessing MII registers but for accessing
    810  * kumeran specific registers.
    811  */
    812 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    813 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    814 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    815 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    816 /* SGMII */
    817 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    818 static int	wm_sgmii_readreg(device_t, int, int);
    819 static void	wm_sgmii_writereg(device_t, int, int, int);
    820 /* TBI related */
    821 static void	wm_tbi_mediainit(struct wm_softc *);
    822 static int	wm_tbi_mediachange(struct ifnet *);
    823 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    824 static int	wm_check_for_link(struct wm_softc *);
    825 static void	wm_tbi_tick(struct wm_softc *);
    826 /* SERDES related */
    827 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    828 static int	wm_serdes_mediachange(struct ifnet *);
    829 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    830 static void	wm_serdes_tick(struct wm_softc *);
    831 /* SFP related */
    832 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    833 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    834 
    835 /*
    836  * NVM related.
    837  * Microwire, SPI (w/wo EERD) and Flash.
    838  */
    839 /* Misc functions */
    840 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    841 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    842 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    843 /* Microwire */
    844 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    845 /* SPI */
    846 static int	wm_nvm_ready_spi(struct wm_softc *);
    847 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    848 /* Using with EERD */
    849 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    850 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    851 /* Flash */
    852 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    853     unsigned int *);
    854 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    855 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    856 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    857 	uint32_t *);
    858 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    859 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    860 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    861 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    862 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    863 /* iNVM */
    864 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    865 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    866 /* Lock, detecting NVM type, validate checksum and read */
    867 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    868 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    869 static int	wm_nvm_validate_checksum(struct wm_softc *);
    870 static void	wm_nvm_version_invm(struct wm_softc *);
    871 static void	wm_nvm_version(struct wm_softc *);
    872 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    873 
    874 /*
    875  * Hardware semaphores.
    876  * Very complexed...
    877  */
    878 static int	wm_get_null(struct wm_softc *);
    879 static void	wm_put_null(struct wm_softc *);
    880 static int	wm_get_eecd(struct wm_softc *);
    881 static void	wm_put_eecd(struct wm_softc *);
    882 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    883 static void	wm_put_swsm_semaphore(struct wm_softc *);
    884 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    885 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    886 static int	wm_get_nvm_80003(struct wm_softc *);
    887 static void	wm_put_nvm_80003(struct wm_softc *);
    888 static int	wm_get_nvm_82571(struct wm_softc *);
    889 static void	wm_put_nvm_82571(struct wm_softc *);
    890 static int	wm_get_phy_82575(struct wm_softc *);
    891 static void	wm_put_phy_82575(struct wm_softc *);
    892 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    893 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    894 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    895 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    896 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    897 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    898 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    899 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    900 
    901 /*
    902  * Management mode and power management related subroutines.
    903  * BMC, AMT, suspend/resume and EEE.
    904  */
    905 #if 0
    906 static int	wm_check_mng_mode(struct wm_softc *);
    907 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    908 static int	wm_check_mng_mode_82574(struct wm_softc *);
    909 static int	wm_check_mng_mode_generic(struct wm_softc *);
    910 #endif
    911 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    912 static bool	wm_phy_resetisblocked(struct wm_softc *);
    913 static void	wm_get_hw_control(struct wm_softc *);
    914 static void	wm_release_hw_control(struct wm_softc *);
    915 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    916 static void	wm_smbustopci(struct wm_softc *);
    917 static void	wm_init_manageability(struct wm_softc *);
    918 static void	wm_release_manageability(struct wm_softc *);
    919 static void	wm_get_wakeup(struct wm_softc *);
    920 static void	wm_ulp_disable(struct wm_softc *);
    921 static void	wm_enable_phy_wakeup(struct wm_softc *);
    922 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    923 static void	wm_enable_wakeup(struct wm_softc *);
    924 static void	wm_disable_aspm(struct wm_softc *);
    925 /* LPLU (Low Power Link Up) */
    926 static void	wm_lplu_d0_disable(struct wm_softc *);
    927 /* EEE */
    928 static void	wm_set_eee_i350(struct wm_softc *);
    929 
    930 /*
    931  * Workarounds (mainly PHY related).
    932  * Basically, PHY's workarounds are in the PHY drivers.
    933  */
    934 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    935 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    936 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    937 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    938 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    939 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    940 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    941 static void	wm_reset_init_script_82575(struct wm_softc *);
    942 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    943 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    944 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    945 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    946 static void	wm_pll_workaround_i210(struct wm_softc *);
    947 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    948 
    949 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    950     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    951 
    952 /*
    953  * Devices supported by this driver.
    954  */
    955 static const struct wm_product {
    956 	pci_vendor_id_t		wmp_vendor;
    957 	pci_product_id_t	wmp_product;
    958 	const char		*wmp_name;
    959 	wm_chip_type		wmp_type;
    960 	uint32_t		wmp_flags;
    961 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    962 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    963 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    964 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    965 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    966 } wm_products[] = {
    967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    968 	  "Intel i82542 1000BASE-X Ethernet",
    969 	  WM_T_82542_2_1,	WMP_F_FIBER },
    970 
    971 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    972 	  "Intel i82543GC 1000BASE-X Ethernet",
    973 	  WM_T_82543,		WMP_F_FIBER },
    974 
    975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    976 	  "Intel i82543GC 1000BASE-T Ethernet",
    977 	  WM_T_82543,		WMP_F_COPPER },
    978 
    979 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    980 	  "Intel i82544EI 1000BASE-T Ethernet",
    981 	  WM_T_82544,		WMP_F_COPPER },
    982 
    983 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    984 	  "Intel i82544EI 1000BASE-X Ethernet",
    985 	  WM_T_82544,		WMP_F_FIBER },
    986 
    987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    988 	  "Intel i82544GC 1000BASE-T Ethernet",
    989 	  WM_T_82544,		WMP_F_COPPER },
    990 
    991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    992 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    993 	  WM_T_82544,		WMP_F_COPPER },
    994 
    995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    996 	  "Intel i82540EM 1000BASE-T Ethernet",
    997 	  WM_T_82540,		WMP_F_COPPER },
    998 
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1000 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1001 	  WM_T_82540,		WMP_F_COPPER },
   1002 
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1004 	  "Intel i82540EP 1000BASE-T Ethernet",
   1005 	  WM_T_82540,		WMP_F_COPPER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1008 	  "Intel i82540EP 1000BASE-T Ethernet",
   1009 	  WM_T_82540,		WMP_F_COPPER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1012 	  "Intel i82540EP 1000BASE-T Ethernet",
   1013 	  WM_T_82540,		WMP_F_COPPER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1016 	  "Intel i82545EM 1000BASE-T Ethernet",
   1017 	  WM_T_82545,		WMP_F_COPPER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1020 	  "Intel i82545GM 1000BASE-T Ethernet",
   1021 	  WM_T_82545_3,		WMP_F_COPPER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1024 	  "Intel i82545GM 1000BASE-X Ethernet",
   1025 	  WM_T_82545_3,		WMP_F_FIBER },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1028 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1029 	  WM_T_82545_3,		WMP_F_SERDES },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1032 	  "Intel i82546EB 1000BASE-T Ethernet",
   1033 	  WM_T_82546,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1036 	  "Intel i82546EB 1000BASE-T Ethernet",
   1037 	  WM_T_82546,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1040 	  "Intel i82545EM 1000BASE-X Ethernet",
   1041 	  WM_T_82545,		WMP_F_FIBER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1044 	  "Intel i82546EB 1000BASE-X Ethernet",
   1045 	  WM_T_82546,		WMP_F_FIBER },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1048 	  "Intel i82546GB 1000BASE-T Ethernet",
   1049 	  WM_T_82546_3,		WMP_F_COPPER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1052 	  "Intel i82546GB 1000BASE-X Ethernet",
   1053 	  WM_T_82546_3,		WMP_F_FIBER },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1056 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1057 	  WM_T_82546_3,		WMP_F_SERDES },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1060 	  "i82546GB quad-port Gigabit Ethernet",
   1061 	  WM_T_82546_3,		WMP_F_COPPER },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1064 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1065 	  WM_T_82546_3,		WMP_F_COPPER },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1068 	  "Intel PRO/1000MT (82546GB)",
   1069 	  WM_T_82546_3,		WMP_F_COPPER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1072 	  "Intel i82541EI 1000BASE-T Ethernet",
   1073 	  WM_T_82541,		WMP_F_COPPER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1076 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1077 	  WM_T_82541,		WMP_F_COPPER },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1080 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1081 	  WM_T_82541,		WMP_F_COPPER },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1084 	  "Intel i82541ER 1000BASE-T Ethernet",
   1085 	  WM_T_82541_2,		WMP_F_COPPER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1088 	  "Intel i82541GI 1000BASE-T Ethernet",
   1089 	  WM_T_82541_2,		WMP_F_COPPER },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1092 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1093 	  WM_T_82541_2,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1096 	  "Intel i82541PI 1000BASE-T Ethernet",
   1097 	  WM_T_82541_2,		WMP_F_COPPER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1100 	  "Intel i82547EI 1000BASE-T Ethernet",
   1101 	  WM_T_82547,		WMP_F_COPPER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1104 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1105 	  WM_T_82547,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1108 	  "Intel i82547GI 1000BASE-T Ethernet",
   1109 	  WM_T_82547_2,		WMP_F_COPPER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1112 	  "Intel PRO/1000 PT (82571EB)",
   1113 	  WM_T_82571,		WMP_F_COPPER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1116 	  "Intel PRO/1000 PF (82571EB)",
   1117 	  WM_T_82571,		WMP_F_FIBER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1120 	  "Intel PRO/1000 PB (82571EB)",
   1121 	  WM_T_82571,		WMP_F_SERDES },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1124 	  "Intel PRO/1000 QT (82571EB)",
   1125 	  WM_T_82571,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1128 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1129 	  WM_T_82571,		WMP_F_COPPER, },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1132 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1133 	  WM_T_82571,		WMP_F_COPPER, },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1136 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1137 	  WM_T_82571,		WMP_F_SERDES, },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1140 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1141 	  WM_T_82571,		WMP_F_SERDES, },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1144 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1145 	  WM_T_82571,		WMP_F_FIBER, },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1148 	  "Intel i82572EI 1000baseT Ethernet",
   1149 	  WM_T_82572,		WMP_F_COPPER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1152 	  "Intel i82572EI 1000baseX Ethernet",
   1153 	  WM_T_82572,		WMP_F_FIBER },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1156 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1157 	  WM_T_82572,		WMP_F_SERDES },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1160 	  "Intel i82572EI 1000baseT Ethernet",
   1161 	  WM_T_82572,		WMP_F_COPPER },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1164 	  "Intel i82573E",
   1165 	  WM_T_82573,		WMP_F_COPPER },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1168 	  "Intel i82573E IAMT",
   1169 	  WM_T_82573,		WMP_F_COPPER },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1172 	  "Intel i82573L Gigabit Ethernet",
   1173 	  WM_T_82573,		WMP_F_COPPER },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1176 	  "Intel i82574L",
   1177 	  WM_T_82574,		WMP_F_COPPER },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1180 	  "Intel i82574L",
   1181 	  WM_T_82574,		WMP_F_COPPER },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1184 	  "Intel i82583V",
   1185 	  WM_T_82583,		WMP_F_COPPER },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1188 	  "i80003 dual 1000baseT Ethernet",
   1189 	  WM_T_80003,		WMP_F_COPPER },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1192 	  "i80003 dual 1000baseX Ethernet",
   1193 	  WM_T_80003,		WMP_F_COPPER },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1196 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1197 	  WM_T_80003,		WMP_F_SERDES },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1200 	  "Intel i80003 1000baseT Ethernet",
   1201 	  WM_T_80003,		WMP_F_COPPER },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1204 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1205 	  WM_T_80003,		WMP_F_SERDES },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1208 	  "Intel i82801H (M_AMT) LAN Controller",
   1209 	  WM_T_ICH8,		WMP_F_COPPER },
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1211 	  "Intel i82801H (AMT) LAN Controller",
   1212 	  WM_T_ICH8,		WMP_F_COPPER },
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1214 	  "Intel i82801H LAN Controller",
   1215 	  WM_T_ICH8,		WMP_F_COPPER },
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1217 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1218 	  WM_T_ICH8,		WMP_F_COPPER },
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1220 	  "Intel i82801H (M) LAN Controller",
   1221 	  WM_T_ICH8,		WMP_F_COPPER },
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1223 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1224 	  WM_T_ICH8,		WMP_F_COPPER },
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1226 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1227 	  WM_T_ICH8,		WMP_F_COPPER },
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1229 	  "82567V-3 LAN Controller",
   1230 	  WM_T_ICH8,		WMP_F_COPPER },
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1232 	  "82801I (AMT) LAN Controller",
   1233 	  WM_T_ICH9,		WMP_F_COPPER },
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1235 	  "82801I 10/100 LAN Controller",
   1236 	  WM_T_ICH9,		WMP_F_COPPER },
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1238 	  "82801I (G) 10/100 LAN Controller",
   1239 	  WM_T_ICH9,		WMP_F_COPPER },
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1241 	  "82801I (GT) 10/100 LAN Controller",
   1242 	  WM_T_ICH9,		WMP_F_COPPER },
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1244 	  "82801I (C) LAN Controller",
   1245 	  WM_T_ICH9,		WMP_F_COPPER },
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1247 	  "82801I mobile LAN Controller",
   1248 	  WM_T_ICH9,		WMP_F_COPPER },
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1250 	  "82801I mobile (V) LAN Controller",
   1251 	  WM_T_ICH9,		WMP_F_COPPER },
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1253 	  "82801I mobile (AMT) LAN Controller",
   1254 	  WM_T_ICH9,		WMP_F_COPPER },
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1256 	  "82567LM-4 LAN Controller",
   1257 	  WM_T_ICH9,		WMP_F_COPPER },
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1259 	  "82567LM-2 LAN Controller",
   1260 	  WM_T_ICH10,		WMP_F_COPPER },
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1262 	  "82567LF-2 LAN Controller",
   1263 	  WM_T_ICH10,		WMP_F_COPPER },
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1265 	  "82567LM-3 LAN Controller",
   1266 	  WM_T_ICH10,		WMP_F_COPPER },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1268 	  "82567LF-3 LAN Controller",
   1269 	  WM_T_ICH10,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1271 	  "82567V-2 LAN Controller",
   1272 	  WM_T_ICH10,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1274 	  "82567V-3? LAN Controller",
   1275 	  WM_T_ICH10,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1277 	  "HANKSVILLE LAN Controller",
   1278 	  WM_T_ICH10,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1280 	  "PCH LAN (82577LM) Controller",
   1281 	  WM_T_PCH,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1283 	  "PCH LAN (82577LC) Controller",
   1284 	  WM_T_PCH,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1286 	  "PCH LAN (82578DM) Controller",
   1287 	  WM_T_PCH,		WMP_F_COPPER },
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1289 	  "PCH LAN (82578DC) Controller",
   1290 	  WM_T_PCH,		WMP_F_COPPER },
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1292 	  "PCH2 LAN (82579LM) Controller",
   1293 	  WM_T_PCH2,		WMP_F_COPPER },
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1295 	  "PCH2 LAN (82579V) Controller",
   1296 	  WM_T_PCH2,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1298 	  "82575EB dual-1000baseT Ethernet",
   1299 	  WM_T_82575,		WMP_F_COPPER },
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1301 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1302 	  WM_T_82575,		WMP_F_SERDES },
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1304 	  "82575GB quad-1000baseT Ethernet",
   1305 	  WM_T_82575,		WMP_F_COPPER },
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1307 	  "82575GB quad-1000baseT Ethernet (PM)",
   1308 	  WM_T_82575,		WMP_F_COPPER },
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1310 	  "82576 1000BaseT Ethernet",
   1311 	  WM_T_82576,		WMP_F_COPPER },
   1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1313 	  "82576 1000BaseX Ethernet",
   1314 	  WM_T_82576,		WMP_F_FIBER },
   1315 
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1317 	  "82576 gigabit Ethernet (SERDES)",
   1318 	  WM_T_82576,		WMP_F_SERDES },
   1319 
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1321 	  "82576 quad-1000BaseT Ethernet",
   1322 	  WM_T_82576,		WMP_F_COPPER },
   1323 
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1325 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1326 	  WM_T_82576,		WMP_F_COPPER },
   1327 
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1329 	  "82576 gigabit Ethernet",
   1330 	  WM_T_82576,		WMP_F_COPPER },
   1331 
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1333 	  "82576 gigabit Ethernet (SERDES)",
   1334 	  WM_T_82576,		WMP_F_SERDES },
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1336 	  "82576 quad-gigabit Ethernet (SERDES)",
   1337 	  WM_T_82576,		WMP_F_SERDES },
   1338 
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1340 	  "82580 1000BaseT Ethernet",
   1341 	  WM_T_82580,		WMP_F_COPPER },
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1343 	  "82580 1000BaseX Ethernet",
   1344 	  WM_T_82580,		WMP_F_FIBER },
   1345 
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1347 	  "82580 1000BaseT Ethernet (SERDES)",
   1348 	  WM_T_82580,		WMP_F_SERDES },
   1349 
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1351 	  "82580 gigabit Ethernet (SGMII)",
   1352 	  WM_T_82580,		WMP_F_COPPER },
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1354 	  "82580 dual-1000BaseT Ethernet",
   1355 	  WM_T_82580,		WMP_F_COPPER },
   1356 
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1358 	  "82580 quad-1000BaseX Ethernet",
   1359 	  WM_T_82580,		WMP_F_FIBER },
   1360 
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1362 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1363 	  WM_T_82580,		WMP_F_COPPER },
   1364 
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1366 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1367 	  WM_T_82580,		WMP_F_SERDES },
   1368 
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1370 	  "DH89XXCC 1000BASE-KX Ethernet",
   1371 	  WM_T_82580,		WMP_F_SERDES },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1374 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1375 	  WM_T_82580,		WMP_F_SERDES },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1378 	  "I350 Gigabit Network Connection",
   1379 	  WM_T_I350,		WMP_F_COPPER },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1382 	  "I350 Gigabit Fiber Network Connection",
   1383 	  WM_T_I350,		WMP_F_FIBER },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1386 	  "I350 Gigabit Backplane Connection",
   1387 	  WM_T_I350,		WMP_F_SERDES },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1390 	  "I350 Quad Port Gigabit Ethernet",
   1391 	  WM_T_I350,		WMP_F_SERDES },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1394 	  "I350 Gigabit Connection",
   1395 	  WM_T_I350,		WMP_F_COPPER },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1398 	  "I354 Gigabit Ethernet (KX)",
   1399 	  WM_T_I354,		WMP_F_SERDES },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1402 	  "I354 Gigabit Ethernet (SGMII)",
   1403 	  WM_T_I354,		WMP_F_COPPER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1406 	  "I354 Gigabit Ethernet (2.5G)",
   1407 	  WM_T_I354,		WMP_F_COPPER },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1410 	  "I210-T1 Ethernet Server Adapter",
   1411 	  WM_T_I210,		WMP_F_COPPER },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1414 	  "I210 Ethernet (Copper OEM)",
   1415 	  WM_T_I210,		WMP_F_COPPER },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1418 	  "I210 Ethernet (Copper IT)",
   1419 	  WM_T_I210,		WMP_F_COPPER },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1422 	  "I210 Ethernet (FLASH less)",
   1423 	  WM_T_I210,		WMP_F_COPPER },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1426 	  "I210 Gigabit Ethernet (Fiber)",
   1427 	  WM_T_I210,		WMP_F_FIBER },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1430 	  "I210 Gigabit Ethernet (SERDES)",
   1431 	  WM_T_I210,		WMP_F_SERDES },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1434 	  "I210 Gigabit Ethernet (FLASH less)",
   1435 	  WM_T_I210,		WMP_F_SERDES },
   1436 
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1438 	  "I210 Gigabit Ethernet (SGMII)",
   1439 	  WM_T_I210,		WMP_F_COPPER },
   1440 
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1442 	  "I211 Ethernet (COPPER)",
   1443 	  WM_T_I211,		WMP_F_COPPER },
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1445 	  "I217 V Ethernet Connection",
   1446 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1448 	  "I217 LM Ethernet Connection",
   1449 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1451 	  "I218 V Ethernet Connection",
   1452 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1454 	  "I218 V Ethernet Connection",
   1455 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1457 	  "I218 V Ethernet Connection",
   1458 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1460 	  "I218 LM Ethernet Connection",
   1461 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1463 	  "I218 LM Ethernet Connection",
   1464 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1466 	  "I218 LM Ethernet Connection",
   1467 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1468 #if 0
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1470 	  "I219 V Ethernet Connection",
   1471 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1473 	  "I219 V Ethernet Connection",
   1474 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1476 	  "I219 V Ethernet Connection",
   1477 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1479 	  "I219 V Ethernet Connection",
   1480 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1482 	  "I219 LM Ethernet Connection",
   1483 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1484 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1485 	  "I219 LM Ethernet Connection",
   1486 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1488 	  "I219 LM Ethernet Connection",
   1489 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1491 	  "I219 LM Ethernet Connection",
   1492 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1494 	  "I219 LM Ethernet Connection",
   1495 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1496 #endif
   1497 	{ 0,			0,
   1498 	  NULL,
   1499 	  0,			0 },
   1500 };
   1501 
   1502 /*
   1503  * Register read/write functions.
   1504  * Other than CSR_{READ|WRITE}().
   1505  */
   1506 
   1507 #if 0 /* Not currently used */
   1508 static inline uint32_t
   1509 wm_io_read(struct wm_softc *sc, int reg)
   1510 {
   1511 
   1512 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1513 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1514 }
   1515 #endif
   1516 
   1517 static inline void
   1518 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1519 {
   1520 
   1521 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1522 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1523 }
   1524 
   1525 static inline void
   1526 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1527     uint32_t data)
   1528 {
   1529 	uint32_t regval;
   1530 	int i;
   1531 
   1532 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1533 
   1534 	CSR_WRITE(sc, reg, regval);
   1535 
   1536 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1537 		delay(5);
   1538 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1539 			break;
   1540 	}
   1541 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1542 		aprint_error("%s: WARNING:"
   1543 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1544 		    device_xname(sc->sc_dev), reg);
   1545 	}
   1546 }
   1547 
   1548 static inline void
   1549 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1550 {
   1551 	wa->wa_low = htole32(v & 0xffffffffU);
   1552 	if (sizeof(bus_addr_t) == 8)
   1553 		wa->wa_high = htole32((uint64_t) v >> 32);
   1554 	else
   1555 		wa->wa_high = 0;
   1556 }
   1557 
   1558 /*
   1559  * Descriptor sync/init functions.
   1560  */
   1561 static inline void
   1562 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1563 {
   1564 	struct wm_softc *sc = txq->txq_sc;
   1565 
   1566 	/* If it will wrap around, sync to the end of the ring. */
   1567 	if ((start + num) > WM_NTXDESC(txq)) {
   1568 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1569 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1570 		    (WM_NTXDESC(txq) - start), ops);
   1571 		num -= (WM_NTXDESC(txq) - start);
   1572 		start = 0;
   1573 	}
   1574 
   1575 	/* Now sync whatever is left. */
   1576 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1577 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1578 }
   1579 
   1580 static inline void
   1581 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1582 {
   1583 	struct wm_softc *sc = rxq->rxq_sc;
   1584 
   1585 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1586 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1587 }
   1588 
   1589 static inline void
   1590 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1591 {
   1592 	struct wm_softc *sc = rxq->rxq_sc;
   1593 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1594 	struct mbuf *m = rxs->rxs_mbuf;
   1595 
   1596 	/*
   1597 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1598 	 * so that the payload after the Ethernet header is aligned
   1599 	 * to a 4-byte boundary.
   1600 
   1601 	 * XXX BRAINDAMAGE ALERT!
   1602 	 * The stupid chip uses the same size for every buffer, which
   1603 	 * is set in the Receive Control register.  We are using the 2K
   1604 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1605 	 * reason, we can't "scoot" packets longer than the standard
   1606 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1607 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1608 	 * the upper layer copy the headers.
   1609 	 */
   1610 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1611 
   1612 	if (sc->sc_type == WM_T_82574) {
   1613 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1614 		rxd->erx_data.erxd_addr =
   1615 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1616 		rxd->erx_data.erxd_dd = 0;
   1617 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1618 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1619 
   1620 		rxd->nqrx_data.nrxd_paddr =
   1621 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1622 		/* Currently, split header is not supported. */
   1623 		rxd->nqrx_data.nrxd_haddr = 0;
   1624 	} else {
   1625 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1626 
   1627 		wm_set_dma_addr(&rxd->wrx_addr,
   1628 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1629 		rxd->wrx_len = 0;
   1630 		rxd->wrx_cksum = 0;
   1631 		rxd->wrx_status = 0;
   1632 		rxd->wrx_errors = 0;
   1633 		rxd->wrx_special = 0;
   1634 	}
   1635 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1636 
   1637 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1638 }
   1639 
   1640 /*
   1641  * Device driver interface functions and commonly used functions.
   1642  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1643  */
   1644 
   1645 /* Lookup supported device table */
   1646 static const struct wm_product *
   1647 wm_lookup(const struct pci_attach_args *pa)
   1648 {
   1649 	const struct wm_product *wmp;
   1650 
   1651 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1652 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1653 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1654 			return wmp;
   1655 	}
   1656 	return NULL;
   1657 }
   1658 
   1659 /* The match function (ca_match) */
   1660 static int
   1661 wm_match(device_t parent, cfdata_t cf, void *aux)
   1662 {
   1663 	struct pci_attach_args *pa = aux;
   1664 
   1665 	if (wm_lookup(pa) != NULL)
   1666 		return 1;
   1667 
   1668 	return 0;
   1669 }
   1670 
   1671 /* The attach function (ca_attach) */
   1672 static void
   1673 wm_attach(device_t parent, device_t self, void *aux)
   1674 {
   1675 	struct wm_softc *sc = device_private(self);
   1676 	struct pci_attach_args *pa = aux;
   1677 	prop_dictionary_t dict;
   1678 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1679 	pci_chipset_tag_t pc = pa->pa_pc;
   1680 	int counts[PCI_INTR_TYPE_SIZE];
   1681 	pci_intr_type_t max_type;
   1682 	const char *eetype, *xname;
   1683 	bus_space_tag_t memt;
   1684 	bus_space_handle_t memh;
   1685 	bus_size_t memsize;
   1686 	int memh_valid;
   1687 	int i, error;
   1688 	const struct wm_product *wmp;
   1689 	prop_data_t ea;
   1690 	prop_number_t pn;
   1691 	uint8_t enaddr[ETHER_ADDR_LEN];
   1692 	char buf[256];
   1693 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1694 	pcireg_t preg, memtype;
   1695 	uint16_t eeprom_data, apme_mask;
   1696 	bool force_clear_smbi;
   1697 	uint32_t link_mode;
   1698 	uint32_t reg;
   1699 
   1700 	sc->sc_dev = self;
   1701 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1702 	sc->sc_core_stopping = false;
   1703 
   1704 	wmp = wm_lookup(pa);
   1705 #ifdef DIAGNOSTIC
   1706 	if (wmp == NULL) {
   1707 		printf("\n");
   1708 		panic("wm_attach: impossible");
   1709 	}
   1710 #endif
   1711 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1712 
   1713 	sc->sc_pc = pa->pa_pc;
   1714 	sc->sc_pcitag = pa->pa_tag;
   1715 
   1716 	if (pci_dma64_available(pa))
   1717 		sc->sc_dmat = pa->pa_dmat64;
   1718 	else
   1719 		sc->sc_dmat = pa->pa_dmat;
   1720 
   1721 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1722 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1723 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1724 
   1725 	sc->sc_type = wmp->wmp_type;
   1726 
   1727 	/* Set default function pointers */
   1728 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1729 	sc->phy.release = sc->nvm.release = wm_put_null;
   1730 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1731 
   1732 	if (sc->sc_type < WM_T_82543) {
   1733 		if (sc->sc_rev < 2) {
   1734 			aprint_error_dev(sc->sc_dev,
   1735 			    "i82542 must be at least rev. 2\n");
   1736 			return;
   1737 		}
   1738 		if (sc->sc_rev < 3)
   1739 			sc->sc_type = WM_T_82542_2_0;
   1740 	}
   1741 
   1742 	/*
   1743 	 * Disable MSI for Errata:
   1744 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1745 	 *
   1746 	 *  82544: Errata 25
   1747 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1748 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1749 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1750 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1751 	 *
   1752 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1753 	 *
   1754 	 *  82571 & 82572: Errata 63
   1755 	 */
   1756 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1757 	    || (sc->sc_type == WM_T_82572))
   1758 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1759 
   1760 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1761 	    || (sc->sc_type == WM_T_82580)
   1762 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1763 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1764 		sc->sc_flags |= WM_F_NEWQUEUE;
   1765 
   1766 	/* Set device properties (mactype) */
   1767 	dict = device_properties(sc->sc_dev);
   1768 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1769 
   1770 	/*
   1771 	 * Map the device.  All devices support memory-mapped acccess,
   1772 	 * and it is really required for normal operation.
   1773 	 */
   1774 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1775 	switch (memtype) {
   1776 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1777 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1778 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1779 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1780 		break;
   1781 	default:
   1782 		memh_valid = 0;
   1783 		break;
   1784 	}
   1785 
   1786 	if (memh_valid) {
   1787 		sc->sc_st = memt;
   1788 		sc->sc_sh = memh;
   1789 		sc->sc_ss = memsize;
   1790 	} else {
   1791 		aprint_error_dev(sc->sc_dev,
   1792 		    "unable to map device registers\n");
   1793 		return;
   1794 	}
   1795 
   1796 	/*
   1797 	 * In addition, i82544 and later support I/O mapped indirect
   1798 	 * register access.  It is not desirable (nor supported in
   1799 	 * this driver) to use it for normal operation, though it is
   1800 	 * required to work around bugs in some chip versions.
   1801 	 */
   1802 	if (sc->sc_type >= WM_T_82544) {
   1803 		/* First we have to find the I/O BAR. */
   1804 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1805 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1806 			if (memtype == PCI_MAPREG_TYPE_IO)
   1807 				break;
   1808 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1809 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1810 				i += 4;	/* skip high bits, too */
   1811 		}
   1812 		if (i < PCI_MAPREG_END) {
   1813 			/*
   1814 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1815 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1816 			 * It's no problem because newer chips has no this
   1817 			 * bug.
   1818 			 *
   1819 			 * The i8254x doesn't apparently respond when the
   1820 			 * I/O BAR is 0, which looks somewhat like it's not
   1821 			 * been configured.
   1822 			 */
   1823 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1824 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1825 				aprint_error_dev(sc->sc_dev,
   1826 				    "WARNING: I/O BAR at zero.\n");
   1827 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1828 					0, &sc->sc_iot, &sc->sc_ioh,
   1829 					NULL, &sc->sc_ios) == 0) {
   1830 				sc->sc_flags |= WM_F_IOH_VALID;
   1831 			} else {
   1832 				aprint_error_dev(sc->sc_dev,
   1833 				    "WARNING: unable to map I/O space\n");
   1834 			}
   1835 		}
   1836 
   1837 	}
   1838 
   1839 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1840 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1841 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1842 	if (sc->sc_type < WM_T_82542_2_1)
   1843 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1844 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1845 
   1846 	/* power up chip */
   1847 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1848 	    NULL)) && error != EOPNOTSUPP) {
   1849 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1850 		return;
   1851 	}
   1852 
   1853 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1854 	/*
   1855 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1856 	 * resource.
   1857 	 */
   1858 	if (sc->sc_nqueues > 1) {
   1859 		max_type = PCI_INTR_TYPE_MSIX;
   1860 		/*
   1861 		 *  82583 has a MSI-X capability in the PCI configuration space
   1862 		 * but it doesn't support it. At least the document doesn't
   1863 		 * say anything about MSI-X.
   1864 		 */
   1865 		counts[PCI_INTR_TYPE_MSIX]
   1866 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1867 	} else {
   1868 		max_type = PCI_INTR_TYPE_MSI;
   1869 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1870 	}
   1871 
   1872 	/* Allocation settings */
   1873 	counts[PCI_INTR_TYPE_MSI] = 1;
   1874 	counts[PCI_INTR_TYPE_INTX] = 1;
   1875 	/* overridden by disable flags */
   1876 	if (wm_disable_msi != 0) {
   1877 		counts[PCI_INTR_TYPE_MSI] = 0;
   1878 		if (wm_disable_msix != 0) {
   1879 			max_type = PCI_INTR_TYPE_INTX;
   1880 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1881 		}
   1882 	} else if (wm_disable_msix != 0) {
   1883 		max_type = PCI_INTR_TYPE_MSI;
   1884 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1885 	}
   1886 
   1887 alloc_retry:
   1888 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1889 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1890 		return;
   1891 	}
   1892 
   1893 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1894 		error = wm_setup_msix(sc);
   1895 		if (error) {
   1896 			pci_intr_release(pc, sc->sc_intrs,
   1897 			    counts[PCI_INTR_TYPE_MSIX]);
   1898 
   1899 			/* Setup for MSI: Disable MSI-X */
   1900 			max_type = PCI_INTR_TYPE_MSI;
   1901 			counts[PCI_INTR_TYPE_MSI] = 1;
   1902 			counts[PCI_INTR_TYPE_INTX] = 1;
   1903 			goto alloc_retry;
   1904 		}
   1905 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1906 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1907 		error = wm_setup_legacy(sc);
   1908 		if (error) {
   1909 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1910 			    counts[PCI_INTR_TYPE_MSI]);
   1911 
   1912 			/* The next try is for INTx: Disable MSI */
   1913 			max_type = PCI_INTR_TYPE_INTX;
   1914 			counts[PCI_INTR_TYPE_INTX] = 1;
   1915 			goto alloc_retry;
   1916 		}
   1917 	} else {
   1918 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1919 		error = wm_setup_legacy(sc);
   1920 		if (error) {
   1921 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1922 			    counts[PCI_INTR_TYPE_INTX]);
   1923 			return;
   1924 		}
   1925 	}
   1926 
   1927 	/*
   1928 	 * Check the function ID (unit number of the chip).
   1929 	 */
   1930 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1931 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1932 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1933 	    || (sc->sc_type == WM_T_82580)
   1934 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1935 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1936 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1937 	else
   1938 		sc->sc_funcid = 0;
   1939 
   1940 	/*
   1941 	 * Determine a few things about the bus we're connected to.
   1942 	 */
   1943 	if (sc->sc_type < WM_T_82543) {
   1944 		/* We don't really know the bus characteristics here. */
   1945 		sc->sc_bus_speed = 33;
   1946 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1947 		/*
   1948 		 * CSA (Communication Streaming Architecture) is about as fast
   1949 		 * a 32-bit 66MHz PCI Bus.
   1950 		 */
   1951 		sc->sc_flags |= WM_F_CSA;
   1952 		sc->sc_bus_speed = 66;
   1953 		aprint_verbose_dev(sc->sc_dev,
   1954 		    "Communication Streaming Architecture\n");
   1955 		if (sc->sc_type == WM_T_82547) {
   1956 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1957 			callout_setfunc(&sc->sc_txfifo_ch,
   1958 					wm_82547_txfifo_stall, sc);
   1959 			aprint_verbose_dev(sc->sc_dev,
   1960 			    "using 82547 Tx FIFO stall work-around\n");
   1961 		}
   1962 	} else if (sc->sc_type >= WM_T_82571) {
   1963 		sc->sc_flags |= WM_F_PCIE;
   1964 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1965 		    && (sc->sc_type != WM_T_ICH10)
   1966 		    && (sc->sc_type != WM_T_PCH)
   1967 		    && (sc->sc_type != WM_T_PCH2)
   1968 		    && (sc->sc_type != WM_T_PCH_LPT)
   1969 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1970 			/* ICH* and PCH* have no PCIe capability registers */
   1971 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1972 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1973 				NULL) == 0)
   1974 				aprint_error_dev(sc->sc_dev,
   1975 				    "unable to find PCIe capability\n");
   1976 		}
   1977 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1978 	} else {
   1979 		reg = CSR_READ(sc, WMREG_STATUS);
   1980 		if (reg & STATUS_BUS64)
   1981 			sc->sc_flags |= WM_F_BUS64;
   1982 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1983 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1984 
   1985 			sc->sc_flags |= WM_F_PCIX;
   1986 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1987 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1988 				aprint_error_dev(sc->sc_dev,
   1989 				    "unable to find PCIX capability\n");
   1990 			else if (sc->sc_type != WM_T_82545_3 &&
   1991 				 sc->sc_type != WM_T_82546_3) {
   1992 				/*
   1993 				 * Work around a problem caused by the BIOS
   1994 				 * setting the max memory read byte count
   1995 				 * incorrectly.
   1996 				 */
   1997 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1998 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1999 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2000 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2001 
   2002 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2003 				    PCIX_CMD_BYTECNT_SHIFT;
   2004 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2005 				    PCIX_STATUS_MAXB_SHIFT;
   2006 				if (bytecnt > maxb) {
   2007 					aprint_verbose_dev(sc->sc_dev,
   2008 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2009 					    512 << bytecnt, 512 << maxb);
   2010 					pcix_cmd = (pcix_cmd &
   2011 					    ~PCIX_CMD_BYTECNT_MASK) |
   2012 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2013 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2014 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2015 					    pcix_cmd);
   2016 				}
   2017 			}
   2018 		}
   2019 		/*
   2020 		 * The quad port adapter is special; it has a PCIX-PCIX
   2021 		 * bridge on the board, and can run the secondary bus at
   2022 		 * a higher speed.
   2023 		 */
   2024 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2025 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2026 								      : 66;
   2027 		} else if (sc->sc_flags & WM_F_PCIX) {
   2028 			switch (reg & STATUS_PCIXSPD_MASK) {
   2029 			case STATUS_PCIXSPD_50_66:
   2030 				sc->sc_bus_speed = 66;
   2031 				break;
   2032 			case STATUS_PCIXSPD_66_100:
   2033 				sc->sc_bus_speed = 100;
   2034 				break;
   2035 			case STATUS_PCIXSPD_100_133:
   2036 				sc->sc_bus_speed = 133;
   2037 				break;
   2038 			default:
   2039 				aprint_error_dev(sc->sc_dev,
   2040 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2041 				    reg & STATUS_PCIXSPD_MASK);
   2042 				sc->sc_bus_speed = 66;
   2043 				break;
   2044 			}
   2045 		} else
   2046 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2047 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2048 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2049 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2050 	}
   2051 
   2052 	/* Disable ASPM L0s and/or L1 for workaround */
   2053 	wm_disable_aspm(sc);
   2054 
   2055 	/* clear interesting stat counters */
   2056 	CSR_READ(sc, WMREG_COLC);
   2057 	CSR_READ(sc, WMREG_RXERRC);
   2058 
   2059 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2060 	    || (sc->sc_type >= WM_T_ICH8))
   2061 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2062 	if (sc->sc_type >= WM_T_ICH8)
   2063 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2064 
   2065 	/* Set PHY, NVM mutex related stuff */
   2066 	switch (sc->sc_type) {
   2067 	case WM_T_82542_2_0:
   2068 	case WM_T_82542_2_1:
   2069 	case WM_T_82543:
   2070 	case WM_T_82544:
   2071 		/* Microwire */
   2072 		sc->nvm.read = wm_nvm_read_uwire;
   2073 		sc->sc_nvm_wordsize = 64;
   2074 		sc->sc_nvm_addrbits = 6;
   2075 		break;
   2076 	case WM_T_82540:
   2077 	case WM_T_82545:
   2078 	case WM_T_82545_3:
   2079 	case WM_T_82546:
   2080 	case WM_T_82546_3:
   2081 		/* Microwire */
   2082 		sc->nvm.read = wm_nvm_read_uwire;
   2083 		reg = CSR_READ(sc, WMREG_EECD);
   2084 		if (reg & EECD_EE_SIZE) {
   2085 			sc->sc_nvm_wordsize = 256;
   2086 			sc->sc_nvm_addrbits = 8;
   2087 		} else {
   2088 			sc->sc_nvm_wordsize = 64;
   2089 			sc->sc_nvm_addrbits = 6;
   2090 		}
   2091 		sc->sc_flags |= WM_F_LOCK_EECD;
   2092 		sc->nvm.acquire = wm_get_eecd;
   2093 		sc->nvm.release = wm_put_eecd;
   2094 		break;
   2095 	case WM_T_82541:
   2096 	case WM_T_82541_2:
   2097 	case WM_T_82547:
   2098 	case WM_T_82547_2:
   2099 		reg = CSR_READ(sc, WMREG_EECD);
   2100 		/*
   2101 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2102 		 * on 8254[17], so set flags and functios before calling it.
   2103 		 */
   2104 		sc->sc_flags |= WM_F_LOCK_EECD;
   2105 		sc->nvm.acquire = wm_get_eecd;
   2106 		sc->nvm.release = wm_put_eecd;
   2107 		if (reg & EECD_EE_TYPE) {
   2108 			/* SPI */
   2109 			sc->nvm.read = wm_nvm_read_spi;
   2110 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2111 			wm_nvm_set_addrbits_size_eecd(sc);
   2112 		} else {
   2113 			/* Microwire */
   2114 			sc->nvm.read = wm_nvm_read_uwire;
   2115 			if ((reg & EECD_EE_ABITS) != 0) {
   2116 				sc->sc_nvm_wordsize = 256;
   2117 				sc->sc_nvm_addrbits = 8;
   2118 			} else {
   2119 				sc->sc_nvm_wordsize = 64;
   2120 				sc->sc_nvm_addrbits = 6;
   2121 			}
   2122 		}
   2123 		break;
   2124 	case WM_T_82571:
   2125 	case WM_T_82572:
   2126 		/* SPI */
   2127 		sc->nvm.read = wm_nvm_read_eerd;
   2128 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2129 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2130 		wm_nvm_set_addrbits_size_eecd(sc);
   2131 		sc->phy.acquire = wm_get_swsm_semaphore;
   2132 		sc->phy.release = wm_put_swsm_semaphore;
   2133 		sc->nvm.acquire = wm_get_nvm_82571;
   2134 		sc->nvm.release = wm_put_nvm_82571;
   2135 		break;
   2136 	case WM_T_82573:
   2137 	case WM_T_82574:
   2138 	case WM_T_82583:
   2139 		sc->nvm.read = wm_nvm_read_eerd;
   2140 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2141 		if (sc->sc_type == WM_T_82573) {
   2142 			sc->phy.acquire = wm_get_swsm_semaphore;
   2143 			sc->phy.release = wm_put_swsm_semaphore;
   2144 			sc->nvm.acquire = wm_get_nvm_82571;
   2145 			sc->nvm.release = wm_put_nvm_82571;
   2146 		} else {
   2147 			/* Both PHY and NVM use the same semaphore. */
   2148 			sc->phy.acquire = sc->nvm.acquire
   2149 			    = wm_get_swfwhw_semaphore;
   2150 			sc->phy.release = sc->nvm.release
   2151 			    = wm_put_swfwhw_semaphore;
   2152 		}
   2153 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2154 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2155 			sc->sc_nvm_wordsize = 2048;
   2156 		} else {
   2157 			/* SPI */
   2158 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2159 			wm_nvm_set_addrbits_size_eecd(sc);
   2160 		}
   2161 		break;
   2162 	case WM_T_82575:
   2163 	case WM_T_82576:
   2164 	case WM_T_82580:
   2165 	case WM_T_I350:
   2166 	case WM_T_I354:
   2167 	case WM_T_80003:
   2168 		/* SPI */
   2169 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2170 		wm_nvm_set_addrbits_size_eecd(sc);
   2171 		if((sc->sc_type == WM_T_80003)
   2172 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2173 			sc->nvm.read = wm_nvm_read_eerd;
   2174 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2175 		} else {
   2176 			sc->nvm.read = wm_nvm_read_spi;
   2177 			sc->sc_flags |= WM_F_LOCK_EECD;
   2178 		}
   2179 		sc->phy.acquire = wm_get_phy_82575;
   2180 		sc->phy.release = wm_put_phy_82575;
   2181 		sc->nvm.acquire = wm_get_nvm_80003;
   2182 		sc->nvm.release = wm_put_nvm_80003;
   2183 		break;
   2184 	case WM_T_ICH8:
   2185 	case WM_T_ICH9:
   2186 	case WM_T_ICH10:
   2187 	case WM_T_PCH:
   2188 	case WM_T_PCH2:
   2189 	case WM_T_PCH_LPT:
   2190 		sc->nvm.read = wm_nvm_read_ich8;
   2191 		/* FLASH */
   2192 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2193 		sc->sc_nvm_wordsize = 2048;
   2194 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2195 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2196 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2197 			aprint_error_dev(sc->sc_dev,
   2198 			    "can't map FLASH registers\n");
   2199 			goto out;
   2200 		}
   2201 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2202 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2203 		    ICH_FLASH_SECTOR_SIZE;
   2204 		sc->sc_ich8_flash_bank_size =
   2205 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2206 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2207 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2208 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2209 		sc->sc_flashreg_offset = 0;
   2210 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2211 		sc->phy.release = wm_put_swflag_ich8lan;
   2212 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2213 		sc->nvm.release = wm_put_nvm_ich8lan;
   2214 		break;
   2215 	case WM_T_PCH_SPT:
   2216 		sc->nvm.read = wm_nvm_read_spt;
   2217 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2218 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2219 		sc->sc_flasht = sc->sc_st;
   2220 		sc->sc_flashh = sc->sc_sh;
   2221 		sc->sc_ich8_flash_base = 0;
   2222 		sc->sc_nvm_wordsize =
   2223 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2224 			* NVM_SIZE_MULTIPLIER;
   2225 		/* It is size in bytes, we want words */
   2226 		sc->sc_nvm_wordsize /= 2;
   2227 		/* assume 2 banks */
   2228 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2229 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2230 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2231 		sc->phy.release = wm_put_swflag_ich8lan;
   2232 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2233 		sc->nvm.release = wm_put_nvm_ich8lan;
   2234 		break;
   2235 	case WM_T_I210:
   2236 	case WM_T_I211:
   2237 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2238 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2239 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2240 			sc->nvm.read = wm_nvm_read_eerd;
   2241 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2242 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2243 			wm_nvm_set_addrbits_size_eecd(sc);
   2244 		} else {
   2245 			sc->nvm.read = wm_nvm_read_invm;
   2246 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2247 			sc->sc_nvm_wordsize = INVM_SIZE;
   2248 		}
   2249 		sc->phy.acquire = wm_get_phy_82575;
   2250 		sc->phy.release = wm_put_phy_82575;
   2251 		sc->nvm.acquire = wm_get_nvm_80003;
   2252 		sc->nvm.release = wm_put_nvm_80003;
   2253 		break;
   2254 	default:
   2255 		break;
   2256 	}
   2257 
   2258 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2259 	switch (sc->sc_type) {
   2260 	case WM_T_82571:
   2261 	case WM_T_82572:
   2262 		reg = CSR_READ(sc, WMREG_SWSM2);
   2263 		if ((reg & SWSM2_LOCK) == 0) {
   2264 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2265 			force_clear_smbi = true;
   2266 		} else
   2267 			force_clear_smbi = false;
   2268 		break;
   2269 	case WM_T_82573:
   2270 	case WM_T_82574:
   2271 	case WM_T_82583:
   2272 		force_clear_smbi = true;
   2273 		break;
   2274 	default:
   2275 		force_clear_smbi = false;
   2276 		break;
   2277 	}
   2278 	if (force_clear_smbi) {
   2279 		reg = CSR_READ(sc, WMREG_SWSM);
   2280 		if ((reg & SWSM_SMBI) != 0)
   2281 			aprint_error_dev(sc->sc_dev,
   2282 			    "Please update the Bootagent\n");
   2283 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2284 	}
   2285 
   2286 	/*
   2287 	 * Defer printing the EEPROM type until after verifying the checksum
   2288 	 * This allows the EEPROM type to be printed correctly in the case
   2289 	 * that no EEPROM is attached.
   2290 	 */
   2291 	/*
   2292 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2293 	 * this for later, so we can fail future reads from the EEPROM.
   2294 	 */
   2295 	if (wm_nvm_validate_checksum(sc)) {
   2296 		/*
   2297 		 * Read twice again because some PCI-e parts fail the
   2298 		 * first check due to the link being in sleep state.
   2299 		 */
   2300 		if (wm_nvm_validate_checksum(sc))
   2301 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2302 	}
   2303 
   2304 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2305 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2306 	else {
   2307 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2308 		    sc->sc_nvm_wordsize);
   2309 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2310 			aprint_verbose("iNVM");
   2311 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2312 			aprint_verbose("FLASH(HW)");
   2313 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2314 			aprint_verbose("FLASH");
   2315 		else {
   2316 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2317 				eetype = "SPI";
   2318 			else
   2319 				eetype = "MicroWire";
   2320 			aprint_verbose("(%d address bits) %s EEPROM",
   2321 			    sc->sc_nvm_addrbits, eetype);
   2322 		}
   2323 	}
   2324 	wm_nvm_version(sc);
   2325 	aprint_verbose("\n");
   2326 
   2327 	/*
   2328 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2329 	 * incorrect.
   2330 	 */
   2331 	wm_gmii_setup_phytype(sc, 0, 0);
   2332 
   2333 	/* Reset the chip to a known state. */
   2334 	wm_reset(sc);
   2335 
   2336 	/* Check for I21[01] PLL workaround */
   2337 	if (sc->sc_type == WM_T_I210)
   2338 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2339 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2340 		/* NVM image release 3.25 has a workaround */
   2341 		if ((sc->sc_nvm_ver_major < 3)
   2342 		    || ((sc->sc_nvm_ver_major == 3)
   2343 			&& (sc->sc_nvm_ver_minor < 25))) {
   2344 			aprint_verbose_dev(sc->sc_dev,
   2345 			    "ROM image version %d.%d is older than 3.25\n",
   2346 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2347 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2348 		}
   2349 	}
   2350 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2351 		wm_pll_workaround_i210(sc);
   2352 
   2353 	wm_get_wakeup(sc);
   2354 
   2355 	/* Non-AMT based hardware can now take control from firmware */
   2356 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2357 		wm_get_hw_control(sc);
   2358 
   2359 	/*
   2360 	 * Read the Ethernet address from the EEPROM, if not first found
   2361 	 * in device properties.
   2362 	 */
   2363 	ea = prop_dictionary_get(dict, "mac-address");
   2364 	if (ea != NULL) {
   2365 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2366 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2367 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2368 	} else {
   2369 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2370 			aprint_error_dev(sc->sc_dev,
   2371 			    "unable to read Ethernet address\n");
   2372 			goto out;
   2373 		}
   2374 	}
   2375 
   2376 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2377 	    ether_sprintf(enaddr));
   2378 
   2379 	/*
   2380 	 * Read the config info from the EEPROM, and set up various
   2381 	 * bits in the control registers based on their contents.
   2382 	 */
   2383 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2384 	if (pn != NULL) {
   2385 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2386 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2387 	} else {
   2388 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2389 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2390 			goto out;
   2391 		}
   2392 	}
   2393 
   2394 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2395 	if (pn != NULL) {
   2396 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2397 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2398 	} else {
   2399 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2400 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2401 			goto out;
   2402 		}
   2403 	}
   2404 
   2405 	/* check for WM_F_WOL */
   2406 	switch (sc->sc_type) {
   2407 	case WM_T_82542_2_0:
   2408 	case WM_T_82542_2_1:
   2409 	case WM_T_82543:
   2410 		/* dummy? */
   2411 		eeprom_data = 0;
   2412 		apme_mask = NVM_CFG3_APME;
   2413 		break;
   2414 	case WM_T_82544:
   2415 		apme_mask = NVM_CFG2_82544_APM_EN;
   2416 		eeprom_data = cfg2;
   2417 		break;
   2418 	case WM_T_82546:
   2419 	case WM_T_82546_3:
   2420 	case WM_T_82571:
   2421 	case WM_T_82572:
   2422 	case WM_T_82573:
   2423 	case WM_T_82574:
   2424 	case WM_T_82583:
   2425 	case WM_T_80003:
   2426 	default:
   2427 		apme_mask = NVM_CFG3_APME;
   2428 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2429 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2430 		break;
   2431 	case WM_T_82575:
   2432 	case WM_T_82576:
   2433 	case WM_T_82580:
   2434 	case WM_T_I350:
   2435 	case WM_T_I354: /* XXX ok? */
   2436 	case WM_T_ICH8:
   2437 	case WM_T_ICH9:
   2438 	case WM_T_ICH10:
   2439 	case WM_T_PCH:
   2440 	case WM_T_PCH2:
   2441 	case WM_T_PCH_LPT:
   2442 	case WM_T_PCH_SPT:
   2443 		/* XXX The funcid should be checked on some devices */
   2444 		apme_mask = WUC_APME;
   2445 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2446 		break;
   2447 	}
   2448 
   2449 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2450 	if ((eeprom_data & apme_mask) != 0)
   2451 		sc->sc_flags |= WM_F_WOL;
   2452 
   2453 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2454 		/* Check NVM for autonegotiation */
   2455 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2456 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2457 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2458 		}
   2459 	}
   2460 
   2461 	/*
   2462 	 * XXX need special handling for some multiple port cards
   2463 	 * to disable a paticular port.
   2464 	 */
   2465 
   2466 	if (sc->sc_type >= WM_T_82544) {
   2467 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2468 		if (pn != NULL) {
   2469 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2470 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2471 		} else {
   2472 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2473 				aprint_error_dev(sc->sc_dev,
   2474 				    "unable to read SWDPIN\n");
   2475 				goto out;
   2476 			}
   2477 		}
   2478 	}
   2479 
   2480 	if (cfg1 & NVM_CFG1_ILOS)
   2481 		sc->sc_ctrl |= CTRL_ILOS;
   2482 
   2483 	/*
   2484 	 * XXX
   2485 	 * This code isn't correct because pin 2 and 3 are located
   2486 	 * in different position on newer chips. Check all datasheet.
   2487 	 *
   2488 	 * Until resolve this problem, check if a chip < 82580
   2489 	 */
   2490 	if (sc->sc_type <= WM_T_82580) {
   2491 		if (sc->sc_type >= WM_T_82544) {
   2492 			sc->sc_ctrl |=
   2493 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2494 			    CTRL_SWDPIO_SHIFT;
   2495 			sc->sc_ctrl |=
   2496 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2497 			    CTRL_SWDPINS_SHIFT;
   2498 		} else {
   2499 			sc->sc_ctrl |=
   2500 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2501 			    CTRL_SWDPIO_SHIFT;
   2502 		}
   2503 	}
   2504 
   2505 	/* XXX For other than 82580? */
   2506 	if (sc->sc_type == WM_T_82580) {
   2507 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2508 		if (nvmword & __BIT(13))
   2509 			sc->sc_ctrl |= CTRL_ILOS;
   2510 	}
   2511 
   2512 #if 0
   2513 	if (sc->sc_type >= WM_T_82544) {
   2514 		if (cfg1 & NVM_CFG1_IPS0)
   2515 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2516 		if (cfg1 & NVM_CFG1_IPS1)
   2517 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2518 		sc->sc_ctrl_ext |=
   2519 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2520 		    CTRL_EXT_SWDPIO_SHIFT;
   2521 		sc->sc_ctrl_ext |=
   2522 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2523 		    CTRL_EXT_SWDPINS_SHIFT;
   2524 	} else {
   2525 		sc->sc_ctrl_ext |=
   2526 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2527 		    CTRL_EXT_SWDPIO_SHIFT;
   2528 	}
   2529 #endif
   2530 
   2531 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2532 #if 0
   2533 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2534 #endif
   2535 
   2536 	if (sc->sc_type == WM_T_PCH) {
   2537 		uint16_t val;
   2538 
   2539 		/* Save the NVM K1 bit setting */
   2540 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2541 
   2542 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2543 			sc->sc_nvm_k1_enabled = 1;
   2544 		else
   2545 			sc->sc_nvm_k1_enabled = 0;
   2546 	}
   2547 
   2548 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2549 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2550 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2551 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2552 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2553 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2554 		/* Copper only */
   2555 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2556 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2557 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2558 	    || (sc->sc_type ==WM_T_I211)) {
   2559 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2560 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2561 		switch (link_mode) {
   2562 		case CTRL_EXT_LINK_MODE_1000KX:
   2563 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2564 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2565 			break;
   2566 		case CTRL_EXT_LINK_MODE_SGMII:
   2567 			if (wm_sgmii_uses_mdio(sc)) {
   2568 				aprint_verbose_dev(sc->sc_dev,
   2569 				    "SGMII(MDIO)\n");
   2570 				sc->sc_flags |= WM_F_SGMII;
   2571 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2572 				break;
   2573 			}
   2574 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2575 			/*FALLTHROUGH*/
   2576 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2577 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2578 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2579 				if (link_mode
   2580 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2581 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2582 					sc->sc_flags |= WM_F_SGMII;
   2583 				} else {
   2584 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2585 					aprint_verbose_dev(sc->sc_dev,
   2586 					    "SERDES\n");
   2587 				}
   2588 				break;
   2589 			}
   2590 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2591 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2592 
   2593 			/* Change current link mode setting */
   2594 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2595 			switch (sc->sc_mediatype) {
   2596 			case WM_MEDIATYPE_COPPER:
   2597 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2598 				break;
   2599 			case WM_MEDIATYPE_SERDES:
   2600 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2601 				break;
   2602 			default:
   2603 				break;
   2604 			}
   2605 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2606 			break;
   2607 		case CTRL_EXT_LINK_MODE_GMII:
   2608 		default:
   2609 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2610 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2611 			break;
   2612 		}
   2613 
   2614 		reg &= ~CTRL_EXT_I2C_ENA;
   2615 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2616 			reg |= CTRL_EXT_I2C_ENA;
   2617 		else
   2618 			reg &= ~CTRL_EXT_I2C_ENA;
   2619 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2620 	} else if (sc->sc_type < WM_T_82543 ||
   2621 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2622 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2623 			aprint_error_dev(sc->sc_dev,
   2624 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2625 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2626 		}
   2627 	} else {
   2628 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2629 			aprint_error_dev(sc->sc_dev,
   2630 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2631 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2632 		}
   2633 	}
   2634 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2635 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2636 
   2637 	/* Set device properties (macflags) */
   2638 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2639 
   2640 	/* Initialize the media structures accordingly. */
   2641 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2642 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2643 	else
   2644 		wm_tbi_mediainit(sc); /* All others */
   2645 
   2646 	ifp = &sc->sc_ethercom.ec_if;
   2647 	xname = device_xname(sc->sc_dev);
   2648 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2649 	ifp->if_softc = sc;
   2650 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2651 #ifdef WM_MPSAFE
   2652 	ifp->if_extflags = IFEF_MPSAFE;
   2653 #endif
   2654 	ifp->if_ioctl = wm_ioctl;
   2655 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2656 		ifp->if_start = wm_nq_start;
   2657 		/*
   2658 		 * When the number of CPUs is one and the controller can use
   2659 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2660 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2661 		 * and the other is used for link status changing.
   2662 		 * In this situation, wm_nq_transmit() is disadvantageous
   2663 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2664 		 */
   2665 		if (wm_is_using_multiqueue(sc))
   2666 			ifp->if_transmit = wm_nq_transmit;
   2667 	} else {
   2668 		ifp->if_start = wm_start;
   2669 		/*
   2670 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2671 		 */
   2672 		if (wm_is_using_multiqueue(sc))
   2673 			ifp->if_transmit = wm_transmit;
   2674 	}
   2675 	ifp->if_watchdog = wm_watchdog;
   2676 	ifp->if_init = wm_init;
   2677 	ifp->if_stop = wm_stop;
   2678 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2679 	IFQ_SET_READY(&ifp->if_snd);
   2680 
   2681 	/* Check for jumbo frame */
   2682 	switch (sc->sc_type) {
   2683 	case WM_T_82573:
   2684 		/* XXX limited to 9234 if ASPM is disabled */
   2685 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2686 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2687 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2688 		break;
   2689 	case WM_T_82571:
   2690 	case WM_T_82572:
   2691 	case WM_T_82574:
   2692 	case WM_T_82583:
   2693 	case WM_T_82575:
   2694 	case WM_T_82576:
   2695 	case WM_T_82580:
   2696 	case WM_T_I350:
   2697 	case WM_T_I354:
   2698 	case WM_T_I210:
   2699 	case WM_T_I211:
   2700 	case WM_T_80003:
   2701 	case WM_T_ICH9:
   2702 	case WM_T_ICH10:
   2703 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2704 	case WM_T_PCH_LPT:
   2705 	case WM_T_PCH_SPT:
   2706 		/* XXX limited to 9234 */
   2707 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2708 		break;
   2709 	case WM_T_PCH:
   2710 		/* XXX limited to 4096 */
   2711 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2712 		break;
   2713 	case WM_T_82542_2_0:
   2714 	case WM_T_82542_2_1:
   2715 	case WM_T_ICH8:
   2716 		/* No support for jumbo frame */
   2717 		break;
   2718 	default:
   2719 		/* ETHER_MAX_LEN_JUMBO */
   2720 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2721 		break;
   2722 	}
   2723 
   2724 	/* If we're a i82543 or greater, we can support VLANs. */
   2725 	if (sc->sc_type >= WM_T_82543)
   2726 		sc->sc_ethercom.ec_capabilities |=
   2727 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2728 
   2729 	/*
   2730 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2731 	 * on i82543 and later.
   2732 	 */
   2733 	if (sc->sc_type >= WM_T_82543) {
   2734 		ifp->if_capabilities |=
   2735 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2736 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2737 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2738 		    IFCAP_CSUM_TCPv6_Tx |
   2739 		    IFCAP_CSUM_UDPv6_Tx;
   2740 	}
   2741 
   2742 	/*
   2743 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2744 	 *
   2745 	 *	82541GI (8086:1076) ... no
   2746 	 *	82572EI (8086:10b9) ... yes
   2747 	 */
   2748 	if (sc->sc_type >= WM_T_82571) {
   2749 		ifp->if_capabilities |=
   2750 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2751 	}
   2752 
   2753 	/*
   2754 	 * If we're a i82544 or greater (except i82547), we can do
   2755 	 * TCP segmentation offload.
   2756 	 */
   2757 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2758 		ifp->if_capabilities |= IFCAP_TSOv4;
   2759 	}
   2760 
   2761 	if (sc->sc_type >= WM_T_82571) {
   2762 		ifp->if_capabilities |= IFCAP_TSOv6;
   2763 	}
   2764 
   2765 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2766 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2767 
   2768 #ifdef WM_MPSAFE
   2769 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2770 #else
   2771 	sc->sc_core_lock = NULL;
   2772 #endif
   2773 
   2774 	/* Attach the interface. */
   2775 	error = if_initialize(ifp);
   2776 	if (error != 0) {
   2777 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2778 		    error);
   2779 		return; /* Error */
   2780 	}
   2781 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2782 	ether_ifattach(ifp, enaddr);
   2783 	if_register(ifp);
   2784 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2785 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2786 			  RND_FLAG_DEFAULT);
   2787 
   2788 #ifdef WM_EVENT_COUNTERS
   2789 	/* Attach event counters. */
   2790 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2791 	    NULL, xname, "linkintr");
   2792 
   2793 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2794 	    NULL, xname, "tx_xoff");
   2795 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2796 	    NULL, xname, "tx_xon");
   2797 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2798 	    NULL, xname, "rx_xoff");
   2799 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2800 	    NULL, xname, "rx_xon");
   2801 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2802 	    NULL, xname, "rx_macctl");
   2803 #endif /* WM_EVENT_COUNTERS */
   2804 
   2805 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2806 		pmf_class_network_register(self, ifp);
   2807 	else
   2808 		aprint_error_dev(self, "couldn't establish power handler\n");
   2809 
   2810 	sc->sc_flags |= WM_F_ATTACHED;
   2811  out:
   2812 	return;
   2813 }
   2814 
   2815 /* The detach function (ca_detach) */
   2816 static int
   2817 wm_detach(device_t self, int flags __unused)
   2818 {
   2819 	struct wm_softc *sc = device_private(self);
   2820 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2821 	int i;
   2822 
   2823 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2824 		return 0;
   2825 
   2826 	/* Stop the interface. Callouts are stopped in it. */
   2827 	wm_stop(ifp, 1);
   2828 
   2829 	pmf_device_deregister(self);
   2830 
   2831 #ifdef WM_EVENT_COUNTERS
   2832 	evcnt_detach(&sc->sc_ev_linkintr);
   2833 
   2834 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2835 	evcnt_detach(&sc->sc_ev_tx_xon);
   2836 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2837 	evcnt_detach(&sc->sc_ev_rx_xon);
   2838 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2839 #endif /* WM_EVENT_COUNTERS */
   2840 
   2841 	/* Tell the firmware about the release */
   2842 	WM_CORE_LOCK(sc);
   2843 	wm_release_manageability(sc);
   2844 	wm_release_hw_control(sc);
   2845 	wm_enable_wakeup(sc);
   2846 	WM_CORE_UNLOCK(sc);
   2847 
   2848 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2849 
   2850 	/* Delete all remaining media. */
   2851 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2852 
   2853 	ether_ifdetach(ifp);
   2854 	if_detach(ifp);
   2855 	if_percpuq_destroy(sc->sc_ipq);
   2856 
   2857 	/* Unload RX dmamaps and free mbufs */
   2858 	for (i = 0; i < sc->sc_nqueues; i++) {
   2859 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2860 		mutex_enter(rxq->rxq_lock);
   2861 		wm_rxdrain(rxq);
   2862 		mutex_exit(rxq->rxq_lock);
   2863 	}
   2864 	/* Must unlock here */
   2865 
   2866 	/* Disestablish the interrupt handler */
   2867 	for (i = 0; i < sc->sc_nintrs; i++) {
   2868 		if (sc->sc_ihs[i] != NULL) {
   2869 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2870 			sc->sc_ihs[i] = NULL;
   2871 		}
   2872 	}
   2873 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2874 
   2875 	wm_free_txrx_queues(sc);
   2876 
   2877 	/* Unmap the registers */
   2878 	if (sc->sc_ss) {
   2879 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2880 		sc->sc_ss = 0;
   2881 	}
   2882 	if (sc->sc_ios) {
   2883 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2884 		sc->sc_ios = 0;
   2885 	}
   2886 	if (sc->sc_flashs) {
   2887 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2888 		sc->sc_flashs = 0;
   2889 	}
   2890 
   2891 	if (sc->sc_core_lock)
   2892 		mutex_obj_free(sc->sc_core_lock);
   2893 	if (sc->sc_ich_phymtx)
   2894 		mutex_obj_free(sc->sc_ich_phymtx);
   2895 	if (sc->sc_ich_nvmmtx)
   2896 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2897 
   2898 	return 0;
   2899 }
   2900 
   2901 static bool
   2902 wm_suspend(device_t self, const pmf_qual_t *qual)
   2903 {
   2904 	struct wm_softc *sc = device_private(self);
   2905 
   2906 	wm_release_manageability(sc);
   2907 	wm_release_hw_control(sc);
   2908 	wm_enable_wakeup(sc);
   2909 
   2910 	return true;
   2911 }
   2912 
   2913 static bool
   2914 wm_resume(device_t self, const pmf_qual_t *qual)
   2915 {
   2916 	struct wm_softc *sc = device_private(self);
   2917 
   2918 	/* Disable ASPM L0s and/or L1 for workaround */
   2919 	wm_disable_aspm(sc);
   2920 	wm_init_manageability(sc);
   2921 
   2922 	return true;
   2923 }
   2924 
   2925 /*
   2926  * wm_watchdog:		[ifnet interface function]
   2927  *
   2928  *	Watchdog timer handler.
   2929  */
   2930 static void
   2931 wm_watchdog(struct ifnet *ifp)
   2932 {
   2933 	int qid;
   2934 	struct wm_softc *sc = ifp->if_softc;
   2935 
   2936 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2937 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2938 
   2939 		wm_watchdog_txq(ifp, txq);
   2940 	}
   2941 
   2942 	/* Reset the interface. */
   2943 	(void) wm_init(ifp);
   2944 
   2945 	/*
   2946 	 * There are still some upper layer processing which call
   2947 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2948 	 */
   2949 	/* Try to get more packets going. */
   2950 	ifp->if_start(ifp);
   2951 }
   2952 
   2953 static void
   2954 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2955 {
   2956 	struct wm_softc *sc = ifp->if_softc;
   2957 
   2958 	/*
   2959 	 * Since we're using delayed interrupts, sweep up
   2960 	 * before we report an error.
   2961 	 */
   2962 	mutex_enter(txq->txq_lock);
   2963 	wm_txeof(sc, txq);
   2964 	mutex_exit(txq->txq_lock);
   2965 
   2966 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2967 #ifdef WM_DEBUG
   2968 		int i, j;
   2969 		struct wm_txsoft *txs;
   2970 #endif
   2971 		log(LOG_ERR,
   2972 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2973 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2974 		    txq->txq_next);
   2975 		ifp->if_oerrors++;
   2976 #ifdef WM_DEBUG
   2977 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2978 		    i = WM_NEXTTXS(txq, i)) {
   2979 		    txs = &txq->txq_soft[i];
   2980 		    printf("txs %d tx %d -> %d\n",
   2981 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2982 		    for (j = txs->txs_firstdesc; ;
   2983 			j = WM_NEXTTX(txq, j)) {
   2984 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2985 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2986 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2987 				    printf("\t %#08x%08x\n",
   2988 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2989 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2990 			    } else {
   2991 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2992 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   2993 					txq->txq_descs[j].wtx_addr.wa_low);
   2994 				    printf("\t %#04x%02x%02x%08x\n",
   2995 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   2996 					txq->txq_descs[j].wtx_fields.wtxu_options,
   2997 					txq->txq_descs[j].wtx_fields.wtxu_status,
   2998 					txq->txq_descs[j].wtx_cmdlen);
   2999 			    }
   3000 			if (j == txs->txs_lastdesc)
   3001 				break;
   3002 			}
   3003 		}
   3004 #endif
   3005 	}
   3006 }
   3007 
   3008 /*
   3009  * wm_tick:
   3010  *
   3011  *	One second timer, used to check link status, sweep up
   3012  *	completed transmit jobs, etc.
   3013  */
   3014 static void
   3015 wm_tick(void *arg)
   3016 {
   3017 	struct wm_softc *sc = arg;
   3018 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3019 #ifndef WM_MPSAFE
   3020 	int s = splnet();
   3021 #endif
   3022 
   3023 	WM_CORE_LOCK(sc);
   3024 
   3025 	if (sc->sc_core_stopping)
   3026 		goto out;
   3027 
   3028 	if (sc->sc_type >= WM_T_82542_2_1) {
   3029 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3030 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3031 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3032 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3033 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3034 	}
   3035 
   3036 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3037 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3038 	    + CSR_READ(sc, WMREG_CRCERRS)
   3039 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3040 	    + CSR_READ(sc, WMREG_SYMERRC)
   3041 	    + CSR_READ(sc, WMREG_RXERRC)
   3042 	    + CSR_READ(sc, WMREG_SEC)
   3043 	    + CSR_READ(sc, WMREG_CEXTERR)
   3044 	    + CSR_READ(sc, WMREG_RLEC);
   3045 	/*
   3046 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3047 	 * memory. It does not mean the number of dropped packet. Because
   3048 	 * ethernet controller can receive packets in such case if there is
   3049 	 * space in phy's FIFO.
   3050 	 *
   3051 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3052 	 * own EVCNT instead of if_iqdrops.
   3053 	 */
   3054 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3055 
   3056 	if (sc->sc_flags & WM_F_HAS_MII)
   3057 		mii_tick(&sc->sc_mii);
   3058 	else if ((sc->sc_type >= WM_T_82575)
   3059 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3060 		wm_serdes_tick(sc);
   3061 	else
   3062 		wm_tbi_tick(sc);
   3063 
   3064 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3065 out:
   3066 	WM_CORE_UNLOCK(sc);
   3067 #ifndef WM_MPSAFE
   3068 	splx(s);
   3069 #endif
   3070 }
   3071 
   3072 static int
   3073 wm_ifflags_cb(struct ethercom *ec)
   3074 {
   3075 	struct ifnet *ifp = &ec->ec_if;
   3076 	struct wm_softc *sc = ifp->if_softc;
   3077 	int rc = 0;
   3078 
   3079 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3080 		device_xname(sc->sc_dev), __func__));
   3081 
   3082 	WM_CORE_LOCK(sc);
   3083 
   3084 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3085 	sc->sc_if_flags = ifp->if_flags;
   3086 
   3087 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3088 		rc = ENETRESET;
   3089 		goto out;
   3090 	}
   3091 
   3092 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3093 		wm_set_filter(sc);
   3094 
   3095 	wm_set_vlan(sc);
   3096 
   3097 out:
   3098 	WM_CORE_UNLOCK(sc);
   3099 
   3100 	return rc;
   3101 }
   3102 
   3103 /*
   3104  * wm_ioctl:		[ifnet interface function]
   3105  *
   3106  *	Handle control requests from the operator.
   3107  */
   3108 static int
   3109 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3110 {
   3111 	struct wm_softc *sc = ifp->if_softc;
   3112 	struct ifreq *ifr = (struct ifreq *) data;
   3113 	struct ifaddr *ifa = (struct ifaddr *)data;
   3114 	struct sockaddr_dl *sdl;
   3115 	int s, error;
   3116 
   3117 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3118 		device_xname(sc->sc_dev), __func__));
   3119 
   3120 #ifndef WM_MPSAFE
   3121 	s = splnet();
   3122 #endif
   3123 	switch (cmd) {
   3124 	case SIOCSIFMEDIA:
   3125 	case SIOCGIFMEDIA:
   3126 		WM_CORE_LOCK(sc);
   3127 		/* Flow control requires full-duplex mode. */
   3128 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3129 		    (ifr->ifr_media & IFM_FDX) == 0)
   3130 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3131 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3132 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3133 				/* We can do both TXPAUSE and RXPAUSE. */
   3134 				ifr->ifr_media |=
   3135 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3136 			}
   3137 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3138 		}
   3139 		WM_CORE_UNLOCK(sc);
   3140 #ifdef WM_MPSAFE
   3141 		s = splnet();
   3142 #endif
   3143 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3144 #ifdef WM_MPSAFE
   3145 		splx(s);
   3146 #endif
   3147 		break;
   3148 	case SIOCINITIFADDR:
   3149 		WM_CORE_LOCK(sc);
   3150 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3151 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3152 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3153 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3154 			/* unicast address is first multicast entry */
   3155 			wm_set_filter(sc);
   3156 			error = 0;
   3157 			WM_CORE_UNLOCK(sc);
   3158 			break;
   3159 		}
   3160 		WM_CORE_UNLOCK(sc);
   3161 		/*FALLTHROUGH*/
   3162 	default:
   3163 #ifdef WM_MPSAFE
   3164 		s = splnet();
   3165 #endif
   3166 		/* It may call wm_start, so unlock here */
   3167 		error = ether_ioctl(ifp, cmd, data);
   3168 #ifdef WM_MPSAFE
   3169 		splx(s);
   3170 #endif
   3171 		if (error != ENETRESET)
   3172 			break;
   3173 
   3174 		error = 0;
   3175 
   3176 		if (cmd == SIOCSIFCAP) {
   3177 			error = (*ifp->if_init)(ifp);
   3178 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3179 			;
   3180 		else if (ifp->if_flags & IFF_RUNNING) {
   3181 			/*
   3182 			 * Multicast list has changed; set the hardware filter
   3183 			 * accordingly.
   3184 			 */
   3185 			WM_CORE_LOCK(sc);
   3186 			wm_set_filter(sc);
   3187 			WM_CORE_UNLOCK(sc);
   3188 		}
   3189 		break;
   3190 	}
   3191 
   3192 #ifndef WM_MPSAFE
   3193 	splx(s);
   3194 #endif
   3195 	return error;
   3196 }
   3197 
   3198 /* MAC address related */
   3199 
   3200 /*
   3201  * Get the offset of MAC address and return it.
   3202  * If error occured, use offset 0.
   3203  */
   3204 static uint16_t
   3205 wm_check_alt_mac_addr(struct wm_softc *sc)
   3206 {
   3207 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3208 	uint16_t offset = NVM_OFF_MACADDR;
   3209 
   3210 	/* Try to read alternative MAC address pointer */
   3211 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3212 		return 0;
   3213 
   3214 	/* Check pointer if it's valid or not. */
   3215 	if ((offset == 0x0000) || (offset == 0xffff))
   3216 		return 0;
   3217 
   3218 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3219 	/*
   3220 	 * Check whether alternative MAC address is valid or not.
   3221 	 * Some cards have non 0xffff pointer but those don't use
   3222 	 * alternative MAC address in reality.
   3223 	 *
   3224 	 * Check whether the broadcast bit is set or not.
   3225 	 */
   3226 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3227 		if (((myea[0] & 0xff) & 0x01) == 0)
   3228 			return offset; /* Found */
   3229 
   3230 	/* Not found */
   3231 	return 0;
   3232 }
   3233 
   3234 static int
   3235 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3236 {
   3237 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3238 	uint16_t offset = NVM_OFF_MACADDR;
   3239 	int do_invert = 0;
   3240 
   3241 	switch (sc->sc_type) {
   3242 	case WM_T_82580:
   3243 	case WM_T_I350:
   3244 	case WM_T_I354:
   3245 		/* EEPROM Top Level Partitioning */
   3246 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3247 		break;
   3248 	case WM_T_82571:
   3249 	case WM_T_82575:
   3250 	case WM_T_82576:
   3251 	case WM_T_80003:
   3252 	case WM_T_I210:
   3253 	case WM_T_I211:
   3254 		offset = wm_check_alt_mac_addr(sc);
   3255 		if (offset == 0)
   3256 			if ((sc->sc_funcid & 0x01) == 1)
   3257 				do_invert = 1;
   3258 		break;
   3259 	default:
   3260 		if ((sc->sc_funcid & 0x01) == 1)
   3261 			do_invert = 1;
   3262 		break;
   3263 	}
   3264 
   3265 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3266 		goto bad;
   3267 
   3268 	enaddr[0] = myea[0] & 0xff;
   3269 	enaddr[1] = myea[0] >> 8;
   3270 	enaddr[2] = myea[1] & 0xff;
   3271 	enaddr[3] = myea[1] >> 8;
   3272 	enaddr[4] = myea[2] & 0xff;
   3273 	enaddr[5] = myea[2] >> 8;
   3274 
   3275 	/*
   3276 	 * Toggle the LSB of the MAC address on the second port
   3277 	 * of some dual port cards.
   3278 	 */
   3279 	if (do_invert != 0)
   3280 		enaddr[5] ^= 1;
   3281 
   3282 	return 0;
   3283 
   3284  bad:
   3285 	return -1;
   3286 }
   3287 
   3288 /*
   3289  * wm_set_ral:
   3290  *
   3291  *	Set an entery in the receive address list.
   3292  */
   3293 static void
   3294 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3295 {
   3296 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3297 	uint32_t wlock_mac;
   3298 	int rv;
   3299 
   3300 	if (enaddr != NULL) {
   3301 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3302 		    (enaddr[3] << 24);
   3303 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3304 		ral_hi |= RAL_AV;
   3305 	} else {
   3306 		ral_lo = 0;
   3307 		ral_hi = 0;
   3308 	}
   3309 
   3310 	switch (sc->sc_type) {
   3311 	case WM_T_82542_2_0:
   3312 	case WM_T_82542_2_1:
   3313 	case WM_T_82543:
   3314 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3315 		CSR_WRITE_FLUSH(sc);
   3316 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3317 		CSR_WRITE_FLUSH(sc);
   3318 		break;
   3319 	case WM_T_PCH2:
   3320 	case WM_T_PCH_LPT:
   3321 	case WM_T_PCH_SPT:
   3322 		if (idx == 0) {
   3323 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3324 			CSR_WRITE_FLUSH(sc);
   3325 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3326 			CSR_WRITE_FLUSH(sc);
   3327 			return;
   3328 		}
   3329 		if (sc->sc_type != WM_T_PCH2) {
   3330 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3331 			    FWSM_WLOCK_MAC);
   3332 			addrl = WMREG_SHRAL(idx - 1);
   3333 			addrh = WMREG_SHRAH(idx - 1);
   3334 		} else {
   3335 			wlock_mac = 0;
   3336 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3337 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3338 		}
   3339 
   3340 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3341 			rv = wm_get_swflag_ich8lan(sc);
   3342 			if (rv != 0)
   3343 				return;
   3344 			CSR_WRITE(sc, addrl, ral_lo);
   3345 			CSR_WRITE_FLUSH(sc);
   3346 			CSR_WRITE(sc, addrh, ral_hi);
   3347 			CSR_WRITE_FLUSH(sc);
   3348 			wm_put_swflag_ich8lan(sc);
   3349 		}
   3350 
   3351 		break;
   3352 	default:
   3353 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3354 		CSR_WRITE_FLUSH(sc);
   3355 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3356 		CSR_WRITE_FLUSH(sc);
   3357 		break;
   3358 	}
   3359 }
   3360 
   3361 /*
   3362  * wm_mchash:
   3363  *
   3364  *	Compute the hash of the multicast address for the 4096-bit
   3365  *	multicast filter.
   3366  */
   3367 static uint32_t
   3368 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3369 {
   3370 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3371 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3372 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3373 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3374 	uint32_t hash;
   3375 
   3376 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3377 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3378 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3379 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3380 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3381 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3382 		return (hash & 0x3ff);
   3383 	}
   3384 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3385 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3386 
   3387 	return (hash & 0xfff);
   3388 }
   3389 
   3390 /*
   3391  * wm_set_filter:
   3392  *
   3393  *	Set up the receive filter.
   3394  */
   3395 static void
   3396 wm_set_filter(struct wm_softc *sc)
   3397 {
   3398 	struct ethercom *ec = &sc->sc_ethercom;
   3399 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3400 	struct ether_multi *enm;
   3401 	struct ether_multistep step;
   3402 	bus_addr_t mta_reg;
   3403 	uint32_t hash, reg, bit;
   3404 	int i, size, ralmax;
   3405 
   3406 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3407 		device_xname(sc->sc_dev), __func__));
   3408 
   3409 	if (sc->sc_type >= WM_T_82544)
   3410 		mta_reg = WMREG_CORDOVA_MTA;
   3411 	else
   3412 		mta_reg = WMREG_MTA;
   3413 
   3414 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3415 
   3416 	if (ifp->if_flags & IFF_BROADCAST)
   3417 		sc->sc_rctl |= RCTL_BAM;
   3418 	if (ifp->if_flags & IFF_PROMISC) {
   3419 		sc->sc_rctl |= RCTL_UPE;
   3420 		goto allmulti;
   3421 	}
   3422 
   3423 	/*
   3424 	 * Set the station address in the first RAL slot, and
   3425 	 * clear the remaining slots.
   3426 	 */
   3427 	if (sc->sc_type == WM_T_ICH8)
   3428 		size = WM_RAL_TABSIZE_ICH8 -1;
   3429 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3430 	    || (sc->sc_type == WM_T_PCH))
   3431 		size = WM_RAL_TABSIZE_ICH8;
   3432 	else if (sc->sc_type == WM_T_PCH2)
   3433 		size = WM_RAL_TABSIZE_PCH2;
   3434 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3435 		size = WM_RAL_TABSIZE_PCH_LPT;
   3436 	else if (sc->sc_type == WM_T_82575)
   3437 		size = WM_RAL_TABSIZE_82575;
   3438 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3439 		size = WM_RAL_TABSIZE_82576;
   3440 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3441 		size = WM_RAL_TABSIZE_I350;
   3442 	else
   3443 		size = WM_RAL_TABSIZE;
   3444 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3445 
   3446 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3447 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3448 		switch (i) {
   3449 		case 0:
   3450 			/* We can use all entries */
   3451 			ralmax = size;
   3452 			break;
   3453 		case 1:
   3454 			/* Only RAR[0] */
   3455 			ralmax = 1;
   3456 			break;
   3457 		default:
   3458 			/* available SHRA + RAR[0] */
   3459 			ralmax = i + 1;
   3460 		}
   3461 	} else
   3462 		ralmax = size;
   3463 	for (i = 1; i < size; i++) {
   3464 		if (i < ralmax)
   3465 			wm_set_ral(sc, NULL, i);
   3466 	}
   3467 
   3468 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3469 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3470 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3471 	    || (sc->sc_type == WM_T_PCH_SPT))
   3472 		size = WM_ICH8_MC_TABSIZE;
   3473 	else
   3474 		size = WM_MC_TABSIZE;
   3475 	/* Clear out the multicast table. */
   3476 	for (i = 0; i < size; i++) {
   3477 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3478 		CSR_WRITE_FLUSH(sc);
   3479 	}
   3480 
   3481 	ETHER_LOCK(ec);
   3482 	ETHER_FIRST_MULTI(step, ec, enm);
   3483 	while (enm != NULL) {
   3484 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3485 			ETHER_UNLOCK(ec);
   3486 			/*
   3487 			 * We must listen to a range of multicast addresses.
   3488 			 * For now, just accept all multicasts, rather than
   3489 			 * trying to set only those filter bits needed to match
   3490 			 * the range.  (At this time, the only use of address
   3491 			 * ranges is for IP multicast routing, for which the
   3492 			 * range is big enough to require all bits set.)
   3493 			 */
   3494 			goto allmulti;
   3495 		}
   3496 
   3497 		hash = wm_mchash(sc, enm->enm_addrlo);
   3498 
   3499 		reg = (hash >> 5);
   3500 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3501 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3502 		    || (sc->sc_type == WM_T_PCH2)
   3503 		    || (sc->sc_type == WM_T_PCH_LPT)
   3504 		    || (sc->sc_type == WM_T_PCH_SPT))
   3505 			reg &= 0x1f;
   3506 		else
   3507 			reg &= 0x7f;
   3508 		bit = hash & 0x1f;
   3509 
   3510 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3511 		hash |= 1U << bit;
   3512 
   3513 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3514 			/*
   3515 			 * 82544 Errata 9: Certain register cannot be written
   3516 			 * with particular alignments in PCI-X bus operation
   3517 			 * (FCAH, MTA and VFTA).
   3518 			 */
   3519 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3520 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3521 			CSR_WRITE_FLUSH(sc);
   3522 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3523 			CSR_WRITE_FLUSH(sc);
   3524 		} else {
   3525 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3526 			CSR_WRITE_FLUSH(sc);
   3527 		}
   3528 
   3529 		ETHER_NEXT_MULTI(step, enm);
   3530 	}
   3531 	ETHER_UNLOCK(ec);
   3532 
   3533 	ifp->if_flags &= ~IFF_ALLMULTI;
   3534 	goto setit;
   3535 
   3536  allmulti:
   3537 	ifp->if_flags |= IFF_ALLMULTI;
   3538 	sc->sc_rctl |= RCTL_MPE;
   3539 
   3540  setit:
   3541 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3542 }
   3543 
   3544 /* Reset and init related */
   3545 
   3546 static void
   3547 wm_set_vlan(struct wm_softc *sc)
   3548 {
   3549 
   3550 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3551 		device_xname(sc->sc_dev), __func__));
   3552 
   3553 	/* Deal with VLAN enables. */
   3554 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3555 		sc->sc_ctrl |= CTRL_VME;
   3556 	else
   3557 		sc->sc_ctrl &= ~CTRL_VME;
   3558 
   3559 	/* Write the control registers. */
   3560 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3561 }
   3562 
   3563 static void
   3564 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3565 {
   3566 	uint32_t gcr;
   3567 	pcireg_t ctrl2;
   3568 
   3569 	gcr = CSR_READ(sc, WMREG_GCR);
   3570 
   3571 	/* Only take action if timeout value is defaulted to 0 */
   3572 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3573 		goto out;
   3574 
   3575 	if ((gcr & GCR_CAP_VER2) == 0) {
   3576 		gcr |= GCR_CMPL_TMOUT_10MS;
   3577 		goto out;
   3578 	}
   3579 
   3580 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3581 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3582 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3583 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3584 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3585 
   3586 out:
   3587 	/* Disable completion timeout resend */
   3588 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3589 
   3590 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3591 }
   3592 
   3593 void
   3594 wm_get_auto_rd_done(struct wm_softc *sc)
   3595 {
   3596 	int i;
   3597 
   3598 	/* wait for eeprom to reload */
   3599 	switch (sc->sc_type) {
   3600 	case WM_T_82571:
   3601 	case WM_T_82572:
   3602 	case WM_T_82573:
   3603 	case WM_T_82574:
   3604 	case WM_T_82583:
   3605 	case WM_T_82575:
   3606 	case WM_T_82576:
   3607 	case WM_T_82580:
   3608 	case WM_T_I350:
   3609 	case WM_T_I354:
   3610 	case WM_T_I210:
   3611 	case WM_T_I211:
   3612 	case WM_T_80003:
   3613 	case WM_T_ICH8:
   3614 	case WM_T_ICH9:
   3615 		for (i = 0; i < 10; i++) {
   3616 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3617 				break;
   3618 			delay(1000);
   3619 		}
   3620 		if (i == 10) {
   3621 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3622 			    "complete\n", device_xname(sc->sc_dev));
   3623 		}
   3624 		break;
   3625 	default:
   3626 		break;
   3627 	}
   3628 }
   3629 
   3630 void
   3631 wm_lan_init_done(struct wm_softc *sc)
   3632 {
   3633 	uint32_t reg = 0;
   3634 	int i;
   3635 
   3636 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3637 		device_xname(sc->sc_dev), __func__));
   3638 
   3639 	/* Wait for eeprom to reload */
   3640 	switch (sc->sc_type) {
   3641 	case WM_T_ICH10:
   3642 	case WM_T_PCH:
   3643 	case WM_T_PCH2:
   3644 	case WM_T_PCH_LPT:
   3645 	case WM_T_PCH_SPT:
   3646 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3647 			reg = CSR_READ(sc, WMREG_STATUS);
   3648 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3649 				break;
   3650 			delay(100);
   3651 		}
   3652 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3653 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3654 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3655 		}
   3656 		break;
   3657 	default:
   3658 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3659 		    __func__);
   3660 		break;
   3661 	}
   3662 
   3663 	reg &= ~STATUS_LAN_INIT_DONE;
   3664 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3665 }
   3666 
   3667 void
   3668 wm_get_cfg_done(struct wm_softc *sc)
   3669 {
   3670 	int mask;
   3671 	uint32_t reg;
   3672 	int i;
   3673 
   3674 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3675 		device_xname(sc->sc_dev), __func__));
   3676 
   3677 	/* Wait for eeprom to reload */
   3678 	switch (sc->sc_type) {
   3679 	case WM_T_82542_2_0:
   3680 	case WM_T_82542_2_1:
   3681 		/* null */
   3682 		break;
   3683 	case WM_T_82543:
   3684 	case WM_T_82544:
   3685 	case WM_T_82540:
   3686 	case WM_T_82545:
   3687 	case WM_T_82545_3:
   3688 	case WM_T_82546:
   3689 	case WM_T_82546_3:
   3690 	case WM_T_82541:
   3691 	case WM_T_82541_2:
   3692 	case WM_T_82547:
   3693 	case WM_T_82547_2:
   3694 	case WM_T_82573:
   3695 	case WM_T_82574:
   3696 	case WM_T_82583:
   3697 		/* generic */
   3698 		delay(10*1000);
   3699 		break;
   3700 	case WM_T_80003:
   3701 	case WM_T_82571:
   3702 	case WM_T_82572:
   3703 	case WM_T_82575:
   3704 	case WM_T_82576:
   3705 	case WM_T_82580:
   3706 	case WM_T_I350:
   3707 	case WM_T_I354:
   3708 	case WM_T_I210:
   3709 	case WM_T_I211:
   3710 		if (sc->sc_type == WM_T_82571) {
   3711 			/* Only 82571 shares port 0 */
   3712 			mask = EEMNGCTL_CFGDONE_0;
   3713 		} else
   3714 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3715 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3716 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3717 				break;
   3718 			delay(1000);
   3719 		}
   3720 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3721 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3722 				device_xname(sc->sc_dev), __func__));
   3723 		}
   3724 		break;
   3725 	case WM_T_ICH8:
   3726 	case WM_T_ICH9:
   3727 	case WM_T_ICH10:
   3728 	case WM_T_PCH:
   3729 	case WM_T_PCH2:
   3730 	case WM_T_PCH_LPT:
   3731 	case WM_T_PCH_SPT:
   3732 		delay(10*1000);
   3733 		if (sc->sc_type >= WM_T_ICH10)
   3734 			wm_lan_init_done(sc);
   3735 		else
   3736 			wm_get_auto_rd_done(sc);
   3737 
   3738 		reg = CSR_READ(sc, WMREG_STATUS);
   3739 		if ((reg & STATUS_PHYRA) != 0)
   3740 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3741 		break;
   3742 	default:
   3743 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3744 		    __func__);
   3745 		break;
   3746 	}
   3747 }
   3748 
   3749 void
   3750 wm_phy_post_reset(struct wm_softc *sc)
   3751 {
   3752 	uint32_t reg;
   3753 
   3754 	/* This function is only for ICH8 and newer. */
   3755 	if (sc->sc_type < WM_T_ICH8)
   3756 		return;
   3757 
   3758 	if (wm_phy_resetisblocked(sc)) {
   3759 		/* XXX */
   3760 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3761 		return;
   3762 	}
   3763 
   3764 	/* Allow time for h/w to get to quiescent state after reset */
   3765 	delay(10*1000);
   3766 
   3767 	/* Perform any necessary post-reset workarounds */
   3768 	if (sc->sc_type == WM_T_PCH)
   3769 		wm_hv_phy_workaround_ich8lan(sc);
   3770 	if (sc->sc_type == WM_T_PCH2)
   3771 		wm_lv_phy_workaround_ich8lan(sc);
   3772 
   3773 	/* Clear the host wakeup bit after lcd reset */
   3774 	if (sc->sc_type >= WM_T_PCH) {
   3775 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3776 		    BM_PORT_GEN_CFG);
   3777 		reg &= ~BM_WUC_HOST_WU_BIT;
   3778 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3779 		    BM_PORT_GEN_CFG, reg);
   3780 	}
   3781 
   3782 	/* Configure the LCD with the extended configuration region in NVM */
   3783 	wm_init_lcd_from_nvm(sc);
   3784 
   3785 	/* Configure the LCD with the OEM bits in NVM */
   3786 }
   3787 
   3788 /* Only for PCH and newer */
   3789 static void
   3790 wm_write_smbus_addr(struct wm_softc *sc)
   3791 {
   3792 	uint32_t strap, freq;
   3793 	uint32_t phy_data;
   3794 
   3795 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3796 		device_xname(sc->sc_dev), __func__));
   3797 
   3798 	strap = CSR_READ(sc, WMREG_STRAP);
   3799 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3800 
   3801 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
   3802 
   3803 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3804 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3805 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3806 
   3807 	if (sc->sc_phytype == WMPHY_I217) {
   3808 		/* Restore SMBus frequency */
   3809 		if (freq --) {
   3810 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3811 			    | HV_SMB_ADDR_FREQ_HIGH);
   3812 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3813 			    HV_SMB_ADDR_FREQ_LOW);
   3814 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3815 			    HV_SMB_ADDR_FREQ_HIGH);
   3816 		} else {
   3817 			DPRINTF(WM_DEBUG_INIT,
   3818 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3819 				device_xname(sc->sc_dev), __func__));
   3820 		}
   3821 	}
   3822 
   3823 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
   3824 }
   3825 
   3826 void
   3827 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3828 {
   3829 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3830 	uint16_t phy_page = 0;
   3831 
   3832 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3833 		device_xname(sc->sc_dev), __func__));
   3834 
   3835 	switch (sc->sc_type) {
   3836 	case WM_T_ICH8:
   3837 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3838 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3839 			return;
   3840 
   3841 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3842 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3843 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3844 			break;
   3845 		}
   3846 		/* FALLTHROUGH */
   3847 	case WM_T_PCH:
   3848 	case WM_T_PCH2:
   3849 	case WM_T_PCH_LPT:
   3850 	case WM_T_PCH_SPT:
   3851 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3852 		break;
   3853 	default:
   3854 		return;
   3855 	}
   3856 
   3857 	sc->phy.acquire(sc);
   3858 
   3859 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3860 	if ((reg & sw_cfg_mask) == 0)
   3861 		goto release;
   3862 
   3863 	/*
   3864 	 * Make sure HW does not configure LCD from PHY extended configuration
   3865 	 * before SW configuration
   3866 	 */
   3867 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3868 	if ((sc->sc_type < WM_T_PCH2)
   3869 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3870 		goto release;
   3871 
   3872 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3873 		device_xname(sc->sc_dev), __func__));
   3874 	/* word_addr is in DWORD */
   3875 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3876 
   3877 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3878 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3879 
   3880 	if (((sc->sc_type == WM_T_PCH)
   3881 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3882 	    || (sc->sc_type > WM_T_PCH)) {
   3883 		/*
   3884 		 * HW configures the SMBus address and LEDs when the OEM and
   3885 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3886 		 * are cleared, SW will configure them instead.
   3887 		 */
   3888 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   3889 			device_xname(sc->sc_dev), __func__));
   3890 		wm_write_smbus_addr(sc);
   3891 
   3892 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3893 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3894 	}
   3895 
   3896 	/* Configure LCD from extended configuration region. */
   3897 	for (i = 0; i < cnf_size; i++) {
   3898 		uint16_t reg_data, reg_addr;
   3899 
   3900 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   3901 			goto release;
   3902 
   3903 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   3904 			goto release;
   3905 
   3906 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   3907 			phy_page = reg_data;
   3908 
   3909 		reg_addr &= IGPHY_MAXREGADDR;
   3910 		reg_addr |= phy_page;
   3911 
   3912 		sc->phy.release(sc); /* XXX */
   3913 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   3914 		sc->phy.acquire(sc); /* XXX */
   3915 	}
   3916 
   3917 release:
   3918 	sc->phy.release(sc);
   3919 	return;
   3920 }
   3921 
   3922 
   3923 /* Init hardware bits */
   3924 void
   3925 wm_initialize_hardware_bits(struct wm_softc *sc)
   3926 {
   3927 	uint32_t tarc0, tarc1, reg;
   3928 
   3929 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3930 		device_xname(sc->sc_dev), __func__));
   3931 
   3932 	/* For 82571 variant, 80003 and ICHs */
   3933 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3934 	    || (sc->sc_type >= WM_T_80003)) {
   3935 
   3936 		/* Transmit Descriptor Control 0 */
   3937 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3938 		reg |= TXDCTL_COUNT_DESC;
   3939 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3940 
   3941 		/* Transmit Descriptor Control 1 */
   3942 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3943 		reg |= TXDCTL_COUNT_DESC;
   3944 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3945 
   3946 		/* TARC0 */
   3947 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3948 		switch (sc->sc_type) {
   3949 		case WM_T_82571:
   3950 		case WM_T_82572:
   3951 		case WM_T_82573:
   3952 		case WM_T_82574:
   3953 		case WM_T_82583:
   3954 		case WM_T_80003:
   3955 			/* Clear bits 30..27 */
   3956 			tarc0 &= ~__BITS(30, 27);
   3957 			break;
   3958 		default:
   3959 			break;
   3960 		}
   3961 
   3962 		switch (sc->sc_type) {
   3963 		case WM_T_82571:
   3964 		case WM_T_82572:
   3965 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3966 
   3967 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3968 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3969 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3970 			/* 8257[12] Errata No.7 */
   3971 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3972 
   3973 			/* TARC1 bit 28 */
   3974 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3975 				tarc1 &= ~__BIT(28);
   3976 			else
   3977 				tarc1 |= __BIT(28);
   3978 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3979 
   3980 			/*
   3981 			 * 8257[12] Errata No.13
   3982 			 * Disable Dyamic Clock Gating.
   3983 			 */
   3984 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3985 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3986 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3987 			break;
   3988 		case WM_T_82573:
   3989 		case WM_T_82574:
   3990 		case WM_T_82583:
   3991 			if ((sc->sc_type == WM_T_82574)
   3992 			    || (sc->sc_type == WM_T_82583))
   3993 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3994 
   3995 			/* Extended Device Control */
   3996 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3997 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3998 			reg |= __BIT(22);	/* Set bit 22 */
   3999 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4000 
   4001 			/* Device Control */
   4002 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4003 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4004 
   4005 			/* PCIe Control Register */
   4006 			/*
   4007 			 * 82573 Errata (unknown).
   4008 			 *
   4009 			 * 82574 Errata 25 and 82583 Errata 12
   4010 			 * "Dropped Rx Packets":
   4011 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4012 			 */
   4013 			reg = CSR_READ(sc, WMREG_GCR);
   4014 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4015 			CSR_WRITE(sc, WMREG_GCR, reg);
   4016 
   4017 			if ((sc->sc_type == WM_T_82574)
   4018 			    || (sc->sc_type == WM_T_82583)) {
   4019 				/*
   4020 				 * Document says this bit must be set for
   4021 				 * proper operation.
   4022 				 */
   4023 				reg = CSR_READ(sc, WMREG_GCR);
   4024 				reg |= __BIT(22);
   4025 				CSR_WRITE(sc, WMREG_GCR, reg);
   4026 
   4027 				/*
   4028 				 * Apply workaround for hardware errata
   4029 				 * documented in errata docs Fixes issue where
   4030 				 * some error prone or unreliable PCIe
   4031 				 * completions are occurring, particularly
   4032 				 * with ASPM enabled. Without fix, issue can
   4033 				 * cause Tx timeouts.
   4034 				 */
   4035 				reg = CSR_READ(sc, WMREG_GCR2);
   4036 				reg |= __BIT(0);
   4037 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4038 			}
   4039 			break;
   4040 		case WM_T_80003:
   4041 			/* TARC0 */
   4042 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4043 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4044 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4045 
   4046 			/* TARC1 bit 28 */
   4047 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4048 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4049 				tarc1 &= ~__BIT(28);
   4050 			else
   4051 				tarc1 |= __BIT(28);
   4052 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4053 			break;
   4054 		case WM_T_ICH8:
   4055 		case WM_T_ICH9:
   4056 		case WM_T_ICH10:
   4057 		case WM_T_PCH:
   4058 		case WM_T_PCH2:
   4059 		case WM_T_PCH_LPT:
   4060 		case WM_T_PCH_SPT:
   4061 			/* TARC0 */
   4062 			if (sc->sc_type == WM_T_ICH8) {
   4063 				/* Set TARC0 bits 29 and 28 */
   4064 				tarc0 |= __BITS(29, 28);
   4065 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4066 				tarc0 |= __BIT(29);
   4067 				/*
   4068 				 *  Drop bit 28. From Linux.
   4069 				 * See I218/I219 spec update
   4070 				 * "5. Buffer Overrun While the I219 is
   4071 				 * Processing DMA Transactions"
   4072 				 */
   4073 				tarc0 &= ~__BIT(28);
   4074 			}
   4075 			/* Set TARC0 bits 23,24,26,27 */
   4076 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4077 
   4078 			/* CTRL_EXT */
   4079 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4080 			reg |= __BIT(22);	/* Set bit 22 */
   4081 			/*
   4082 			 * Enable PHY low-power state when MAC is at D3
   4083 			 * w/o WoL
   4084 			 */
   4085 			if (sc->sc_type >= WM_T_PCH)
   4086 				reg |= CTRL_EXT_PHYPDEN;
   4087 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4088 
   4089 			/* TARC1 */
   4090 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4091 			/* bit 28 */
   4092 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4093 				tarc1 &= ~__BIT(28);
   4094 			else
   4095 				tarc1 |= __BIT(28);
   4096 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4097 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4098 
   4099 			/* Device Status */
   4100 			if (sc->sc_type == WM_T_ICH8) {
   4101 				reg = CSR_READ(sc, WMREG_STATUS);
   4102 				reg &= ~__BIT(31);
   4103 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4104 
   4105 			}
   4106 
   4107 			/* IOSFPC */
   4108 			if (sc->sc_type == WM_T_PCH_SPT) {
   4109 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4110 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4111 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4112 			}
   4113 			/*
   4114 			 * Work-around descriptor data corruption issue during
   4115 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4116 			 * capability.
   4117 			 */
   4118 			reg = CSR_READ(sc, WMREG_RFCTL);
   4119 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4120 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4121 			break;
   4122 		default:
   4123 			break;
   4124 		}
   4125 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4126 
   4127 		switch (sc->sc_type) {
   4128 		/*
   4129 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4130 		 * Avoid RSS Hash Value bug.
   4131 		 */
   4132 		case WM_T_82571:
   4133 		case WM_T_82572:
   4134 		case WM_T_82573:
   4135 		case WM_T_80003:
   4136 		case WM_T_ICH8:
   4137 			reg = CSR_READ(sc, WMREG_RFCTL);
   4138 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4139 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4140 			break;
   4141 		case WM_T_82574:
   4142 			/* use extened Rx descriptor. */
   4143 			reg = CSR_READ(sc, WMREG_RFCTL);
   4144 			reg |= WMREG_RFCTL_EXSTEN;
   4145 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4146 			break;
   4147 		default:
   4148 			break;
   4149 		}
   4150 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4151 		/*
   4152 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4153 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4154 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4155 		 * Correctly by the Device"
   4156 		 *
   4157 		 * I354(C2000) Errata AVR53:
   4158 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4159 		 * Hang"
   4160 		 */
   4161 		reg = CSR_READ(sc, WMREG_RFCTL);
   4162 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4163 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4164 	}
   4165 }
   4166 
   4167 static uint32_t
   4168 wm_rxpbs_adjust_82580(uint32_t val)
   4169 {
   4170 	uint32_t rv = 0;
   4171 
   4172 	if (val < __arraycount(wm_82580_rxpbs_table))
   4173 		rv = wm_82580_rxpbs_table[val];
   4174 
   4175 	return rv;
   4176 }
   4177 
   4178 /*
   4179  * wm_reset_phy:
   4180  *
   4181  *	generic PHY reset function.
   4182  *	Same as e1000_phy_hw_reset_generic()
   4183  */
   4184 static void
   4185 wm_reset_phy(struct wm_softc *sc)
   4186 {
   4187 	uint32_t reg;
   4188 
   4189 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4190 		device_xname(sc->sc_dev), __func__));
   4191 	if (wm_phy_resetisblocked(sc))
   4192 		return;
   4193 
   4194 	sc->phy.acquire(sc);
   4195 
   4196 	reg = CSR_READ(sc, WMREG_CTRL);
   4197 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4198 	CSR_WRITE_FLUSH(sc);
   4199 
   4200 	delay(sc->phy.reset_delay_us);
   4201 
   4202 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4203 	CSR_WRITE_FLUSH(sc);
   4204 
   4205 	delay(150);
   4206 
   4207 	sc->phy.release(sc);
   4208 
   4209 	wm_get_cfg_done(sc);
   4210 	wm_phy_post_reset(sc);
   4211 }
   4212 
   4213 static void
   4214 wm_flush_desc_rings(struct wm_softc *sc)
   4215 {
   4216 	pcireg_t preg;
   4217 	uint32_t reg;
   4218 	struct wm_txqueue *txq;
   4219 	wiseman_txdesc_t *txd;
   4220 	int nexttx;
   4221 	uint32_t rctl;
   4222 
   4223 	/* First, disable MULR fix in FEXTNVM11 */
   4224 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4225 	reg |= FEXTNVM11_DIS_MULRFIX;
   4226 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4227 
   4228 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4229 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4230 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4231 		return;
   4232 
   4233 	/* TX */
   4234 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4235 	    device_xname(sc->sc_dev), preg, reg);
   4236 	reg = CSR_READ(sc, WMREG_TCTL);
   4237 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4238 
   4239 	txq = &sc->sc_queue[0].wmq_txq;
   4240 	nexttx = txq->txq_next;
   4241 	txd = &txq->txq_descs[nexttx];
   4242 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4243 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   4244 	txd->wtx_fields.wtxu_status = 0;
   4245 	txd->wtx_fields.wtxu_options = 0;
   4246 	txd->wtx_fields.wtxu_vlan = 0;
   4247 
   4248 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4249 	    BUS_SPACE_BARRIER_WRITE);
   4250 
   4251 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4252 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4253 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4254 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4255 	delay(250);
   4256 
   4257 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4258 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4259 		return;
   4260 
   4261 	/* RX */
   4262 	printf("%s: Need RX flush (reg = %08x)\n",
   4263 	    device_xname(sc->sc_dev), preg);
   4264 	rctl = CSR_READ(sc, WMREG_RCTL);
   4265 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4266 	CSR_WRITE_FLUSH(sc);
   4267 	delay(150);
   4268 
   4269 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4270 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4271 	reg &= 0xffffc000;
   4272 	/*
   4273 	 * update thresholds: prefetch threshold to 31, host threshold
   4274 	 * to 1 and make sure the granularity is "descriptors" and not
   4275 	 * "cache lines"
   4276 	 */
   4277 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4278 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4279 
   4280 	/*
   4281 	 * momentarily enable the RX ring for the changes to take
   4282 	 * effect
   4283 	 */
   4284 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4285 	CSR_WRITE_FLUSH(sc);
   4286 	delay(150);
   4287 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4288 }
   4289 
   4290 /*
   4291  * wm_reset:
   4292  *
   4293  *	Reset the i82542 chip.
   4294  */
   4295 static void
   4296 wm_reset(struct wm_softc *sc)
   4297 {
   4298 	int phy_reset = 0;
   4299 	int i, error = 0;
   4300 	uint32_t reg;
   4301 	uint16_t kmreg;
   4302 	int rv;
   4303 
   4304 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4305 		device_xname(sc->sc_dev), __func__));
   4306 	KASSERT(sc->sc_type != 0);
   4307 
   4308 	/*
   4309 	 * Allocate on-chip memory according to the MTU size.
   4310 	 * The Packet Buffer Allocation register must be written
   4311 	 * before the chip is reset.
   4312 	 */
   4313 	switch (sc->sc_type) {
   4314 	case WM_T_82547:
   4315 	case WM_T_82547_2:
   4316 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4317 		    PBA_22K : PBA_30K;
   4318 		for (i = 0; i < sc->sc_nqueues; i++) {
   4319 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4320 			txq->txq_fifo_head = 0;
   4321 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4322 			txq->txq_fifo_size =
   4323 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4324 			txq->txq_fifo_stall = 0;
   4325 		}
   4326 		break;
   4327 	case WM_T_82571:
   4328 	case WM_T_82572:
   4329 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4330 	case WM_T_80003:
   4331 		sc->sc_pba = PBA_32K;
   4332 		break;
   4333 	case WM_T_82573:
   4334 		sc->sc_pba = PBA_12K;
   4335 		break;
   4336 	case WM_T_82574:
   4337 	case WM_T_82583:
   4338 		sc->sc_pba = PBA_20K;
   4339 		break;
   4340 	case WM_T_82576:
   4341 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4342 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4343 		break;
   4344 	case WM_T_82580:
   4345 	case WM_T_I350:
   4346 	case WM_T_I354:
   4347 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4348 		break;
   4349 	case WM_T_I210:
   4350 	case WM_T_I211:
   4351 		sc->sc_pba = PBA_34K;
   4352 		break;
   4353 	case WM_T_ICH8:
   4354 		/* Workaround for a bit corruption issue in FIFO memory */
   4355 		sc->sc_pba = PBA_8K;
   4356 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4357 		break;
   4358 	case WM_T_ICH9:
   4359 	case WM_T_ICH10:
   4360 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4361 		    PBA_14K : PBA_10K;
   4362 		break;
   4363 	case WM_T_PCH:
   4364 	case WM_T_PCH2:
   4365 	case WM_T_PCH_LPT:
   4366 	case WM_T_PCH_SPT:
   4367 		sc->sc_pba = PBA_26K;
   4368 		break;
   4369 	default:
   4370 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4371 		    PBA_40K : PBA_48K;
   4372 		break;
   4373 	}
   4374 	/*
   4375 	 * Only old or non-multiqueue devices have the PBA register
   4376 	 * XXX Need special handling for 82575.
   4377 	 */
   4378 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4379 	    || (sc->sc_type == WM_T_82575))
   4380 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4381 
   4382 	/* Prevent the PCI-E bus from sticking */
   4383 	if (sc->sc_flags & WM_F_PCIE) {
   4384 		int timeout = 800;
   4385 
   4386 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4387 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4388 
   4389 		while (timeout--) {
   4390 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4391 			    == 0)
   4392 				break;
   4393 			delay(100);
   4394 		}
   4395 		if (timeout == 0)
   4396 			device_printf(sc->sc_dev,
   4397 			    "failed to disable busmastering\n");
   4398 	}
   4399 
   4400 	/* Set the completion timeout for interface */
   4401 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4402 	    || (sc->sc_type == WM_T_82580)
   4403 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4404 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4405 		wm_set_pcie_completion_timeout(sc);
   4406 
   4407 	/* Clear interrupt */
   4408 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4409 	if (wm_is_using_msix(sc)) {
   4410 		if (sc->sc_type != WM_T_82574) {
   4411 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4412 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4413 		} else {
   4414 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4415 		}
   4416 	}
   4417 
   4418 	/* Stop the transmit and receive processes. */
   4419 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4420 	sc->sc_rctl &= ~RCTL_EN;
   4421 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4422 	CSR_WRITE_FLUSH(sc);
   4423 
   4424 	/* XXX set_tbi_sbp_82543() */
   4425 
   4426 	delay(10*1000);
   4427 
   4428 	/* Must acquire the MDIO ownership before MAC reset */
   4429 	switch (sc->sc_type) {
   4430 	case WM_T_82573:
   4431 	case WM_T_82574:
   4432 	case WM_T_82583:
   4433 		error = wm_get_hw_semaphore_82573(sc);
   4434 		break;
   4435 	default:
   4436 		break;
   4437 	}
   4438 
   4439 	/*
   4440 	 * 82541 Errata 29? & 82547 Errata 28?
   4441 	 * See also the description about PHY_RST bit in CTRL register
   4442 	 * in 8254x_GBe_SDM.pdf.
   4443 	 */
   4444 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4445 		CSR_WRITE(sc, WMREG_CTRL,
   4446 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4447 		CSR_WRITE_FLUSH(sc);
   4448 		delay(5000);
   4449 	}
   4450 
   4451 	switch (sc->sc_type) {
   4452 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4453 	case WM_T_82541:
   4454 	case WM_T_82541_2:
   4455 	case WM_T_82547:
   4456 	case WM_T_82547_2:
   4457 		/*
   4458 		 * On some chipsets, a reset through a memory-mapped write
   4459 		 * cycle can cause the chip to reset before completing the
   4460 		 * write cycle.  This causes major headache that can be
   4461 		 * avoided by issuing the reset via indirect register writes
   4462 		 * through I/O space.
   4463 		 *
   4464 		 * So, if we successfully mapped the I/O BAR at attach time,
   4465 		 * use that.  Otherwise, try our luck with a memory-mapped
   4466 		 * reset.
   4467 		 */
   4468 		if (sc->sc_flags & WM_F_IOH_VALID)
   4469 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4470 		else
   4471 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4472 		break;
   4473 	case WM_T_82545_3:
   4474 	case WM_T_82546_3:
   4475 		/* Use the shadow control register on these chips. */
   4476 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4477 		break;
   4478 	case WM_T_80003:
   4479 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4480 		sc->phy.acquire(sc);
   4481 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4482 		sc->phy.release(sc);
   4483 		break;
   4484 	case WM_T_ICH8:
   4485 	case WM_T_ICH9:
   4486 	case WM_T_ICH10:
   4487 	case WM_T_PCH:
   4488 	case WM_T_PCH2:
   4489 	case WM_T_PCH_LPT:
   4490 	case WM_T_PCH_SPT:
   4491 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4492 		if (wm_phy_resetisblocked(sc) == false) {
   4493 			/*
   4494 			 * Gate automatic PHY configuration by hardware on
   4495 			 * non-managed 82579
   4496 			 */
   4497 			if ((sc->sc_type == WM_T_PCH2)
   4498 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4499 				== 0))
   4500 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4501 
   4502 			reg |= CTRL_PHY_RESET;
   4503 			phy_reset = 1;
   4504 		} else
   4505 			printf("XXX reset is blocked!!!\n");
   4506 		sc->phy.acquire(sc);
   4507 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4508 		/* Don't insert a completion barrier when reset */
   4509 		delay(20*1000);
   4510 		mutex_exit(sc->sc_ich_phymtx);
   4511 		break;
   4512 	case WM_T_82580:
   4513 	case WM_T_I350:
   4514 	case WM_T_I354:
   4515 	case WM_T_I210:
   4516 	case WM_T_I211:
   4517 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4518 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4519 			CSR_WRITE_FLUSH(sc);
   4520 		delay(5000);
   4521 		break;
   4522 	case WM_T_82542_2_0:
   4523 	case WM_T_82542_2_1:
   4524 	case WM_T_82543:
   4525 	case WM_T_82540:
   4526 	case WM_T_82545:
   4527 	case WM_T_82546:
   4528 	case WM_T_82571:
   4529 	case WM_T_82572:
   4530 	case WM_T_82573:
   4531 	case WM_T_82574:
   4532 	case WM_T_82575:
   4533 	case WM_T_82576:
   4534 	case WM_T_82583:
   4535 	default:
   4536 		/* Everything else can safely use the documented method. */
   4537 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4538 		break;
   4539 	}
   4540 
   4541 	/* Must release the MDIO ownership after MAC reset */
   4542 	switch (sc->sc_type) {
   4543 	case WM_T_82573:
   4544 	case WM_T_82574:
   4545 	case WM_T_82583:
   4546 		if (error == 0)
   4547 			wm_put_hw_semaphore_82573(sc);
   4548 		break;
   4549 	default:
   4550 		break;
   4551 	}
   4552 
   4553 	if (phy_reset != 0)
   4554 		wm_get_cfg_done(sc);
   4555 
   4556 	/* reload EEPROM */
   4557 	switch (sc->sc_type) {
   4558 	case WM_T_82542_2_0:
   4559 	case WM_T_82542_2_1:
   4560 	case WM_T_82543:
   4561 	case WM_T_82544:
   4562 		delay(10);
   4563 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4564 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4565 		CSR_WRITE_FLUSH(sc);
   4566 		delay(2000);
   4567 		break;
   4568 	case WM_T_82540:
   4569 	case WM_T_82545:
   4570 	case WM_T_82545_3:
   4571 	case WM_T_82546:
   4572 	case WM_T_82546_3:
   4573 		delay(5*1000);
   4574 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4575 		break;
   4576 	case WM_T_82541:
   4577 	case WM_T_82541_2:
   4578 	case WM_T_82547:
   4579 	case WM_T_82547_2:
   4580 		delay(20000);
   4581 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4582 		break;
   4583 	case WM_T_82571:
   4584 	case WM_T_82572:
   4585 	case WM_T_82573:
   4586 	case WM_T_82574:
   4587 	case WM_T_82583:
   4588 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4589 			delay(10);
   4590 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4591 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4592 			CSR_WRITE_FLUSH(sc);
   4593 		}
   4594 		/* check EECD_EE_AUTORD */
   4595 		wm_get_auto_rd_done(sc);
   4596 		/*
   4597 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4598 		 * is set.
   4599 		 */
   4600 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4601 		    || (sc->sc_type == WM_T_82583))
   4602 			delay(25*1000);
   4603 		break;
   4604 	case WM_T_82575:
   4605 	case WM_T_82576:
   4606 	case WM_T_82580:
   4607 	case WM_T_I350:
   4608 	case WM_T_I354:
   4609 	case WM_T_I210:
   4610 	case WM_T_I211:
   4611 	case WM_T_80003:
   4612 		/* check EECD_EE_AUTORD */
   4613 		wm_get_auto_rd_done(sc);
   4614 		break;
   4615 	case WM_T_ICH8:
   4616 	case WM_T_ICH9:
   4617 	case WM_T_ICH10:
   4618 	case WM_T_PCH:
   4619 	case WM_T_PCH2:
   4620 	case WM_T_PCH_LPT:
   4621 	case WM_T_PCH_SPT:
   4622 		break;
   4623 	default:
   4624 		panic("%s: unknown type\n", __func__);
   4625 	}
   4626 
   4627 	/* Check whether EEPROM is present or not */
   4628 	switch (sc->sc_type) {
   4629 	case WM_T_82575:
   4630 	case WM_T_82576:
   4631 	case WM_T_82580:
   4632 	case WM_T_I350:
   4633 	case WM_T_I354:
   4634 	case WM_T_ICH8:
   4635 	case WM_T_ICH9:
   4636 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4637 			/* Not found */
   4638 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4639 			if (sc->sc_type == WM_T_82575)
   4640 				wm_reset_init_script_82575(sc);
   4641 		}
   4642 		break;
   4643 	default:
   4644 		break;
   4645 	}
   4646 
   4647 	if (phy_reset != 0)
   4648 		wm_phy_post_reset(sc);
   4649 
   4650 	if ((sc->sc_type == WM_T_82580)
   4651 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4652 		/* clear global device reset status bit */
   4653 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4654 	}
   4655 
   4656 	/* Clear any pending interrupt events. */
   4657 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4658 	reg = CSR_READ(sc, WMREG_ICR);
   4659 	if (wm_is_using_msix(sc)) {
   4660 		if (sc->sc_type != WM_T_82574) {
   4661 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4662 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4663 		} else
   4664 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4665 	}
   4666 
   4667 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4668 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4669 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4670 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4671 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4672 		reg |= KABGTXD_BGSQLBIAS;
   4673 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4674 	}
   4675 
   4676 	/* reload sc_ctrl */
   4677 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4678 
   4679 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4680 		wm_set_eee_i350(sc);
   4681 
   4682 	/*
   4683 	 * For PCH, this write will make sure that any noise will be detected
   4684 	 * as a CRC error and be dropped rather than show up as a bad packet
   4685 	 * to the DMA engine
   4686 	 */
   4687 	if (sc->sc_type == WM_T_PCH)
   4688 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4689 
   4690 	if (sc->sc_type >= WM_T_82544)
   4691 		CSR_WRITE(sc, WMREG_WUC, 0);
   4692 
   4693 	wm_reset_mdicnfg_82580(sc);
   4694 
   4695 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4696 		wm_pll_workaround_i210(sc);
   4697 
   4698 	if (sc->sc_type == WM_T_80003) {
   4699 		/* default to TRUE to enable the MDIC W/A */
   4700 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4701 
   4702 		rv = wm_kmrn_readreg(sc,
   4703 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4704 		if (rv == 0) {
   4705 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4706 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4707 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4708 			else
   4709 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4710 		}
   4711 	}
   4712 }
   4713 
   4714 /*
   4715  * wm_add_rxbuf:
   4716  *
   4717  *	Add a receive buffer to the indiciated descriptor.
   4718  */
   4719 static int
   4720 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4721 {
   4722 	struct wm_softc *sc = rxq->rxq_sc;
   4723 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4724 	struct mbuf *m;
   4725 	int error;
   4726 
   4727 	KASSERT(mutex_owned(rxq->rxq_lock));
   4728 
   4729 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4730 	if (m == NULL)
   4731 		return ENOBUFS;
   4732 
   4733 	MCLGET(m, M_DONTWAIT);
   4734 	if ((m->m_flags & M_EXT) == 0) {
   4735 		m_freem(m);
   4736 		return ENOBUFS;
   4737 	}
   4738 
   4739 	if (rxs->rxs_mbuf != NULL)
   4740 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4741 
   4742 	rxs->rxs_mbuf = m;
   4743 
   4744 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4745 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4746 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4747 	if (error) {
   4748 		/* XXX XXX XXX */
   4749 		aprint_error_dev(sc->sc_dev,
   4750 		    "unable to load rx DMA map %d, error = %d\n",
   4751 		    idx, error);
   4752 		panic("wm_add_rxbuf");
   4753 	}
   4754 
   4755 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4756 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4757 
   4758 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4759 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4760 			wm_init_rxdesc(rxq, idx);
   4761 	} else
   4762 		wm_init_rxdesc(rxq, idx);
   4763 
   4764 	return 0;
   4765 }
   4766 
   4767 /*
   4768  * wm_rxdrain:
   4769  *
   4770  *	Drain the receive queue.
   4771  */
   4772 static void
   4773 wm_rxdrain(struct wm_rxqueue *rxq)
   4774 {
   4775 	struct wm_softc *sc = rxq->rxq_sc;
   4776 	struct wm_rxsoft *rxs;
   4777 	int i;
   4778 
   4779 	KASSERT(mutex_owned(rxq->rxq_lock));
   4780 
   4781 	for (i = 0; i < WM_NRXDESC; i++) {
   4782 		rxs = &rxq->rxq_soft[i];
   4783 		if (rxs->rxs_mbuf != NULL) {
   4784 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4785 			m_freem(rxs->rxs_mbuf);
   4786 			rxs->rxs_mbuf = NULL;
   4787 		}
   4788 	}
   4789 }
   4790 
   4791 
   4792 /*
   4793  * XXX copy from FreeBSD's sys/net/rss_config.c
   4794  */
   4795 /*
   4796  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4797  * effectiveness may be limited by algorithm choice and available entropy
   4798  * during the boot.
   4799  *
   4800  * XXXRW: And that we don't randomize it yet!
   4801  *
   4802  * This is the default Microsoft RSS specification key which is also
   4803  * the Chelsio T5 firmware default key.
   4804  */
   4805 #define RSS_KEYSIZE 40
   4806 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4807 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4808 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4809 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4810 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4811 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4812 };
   4813 
   4814 /*
   4815  * Caller must pass an array of size sizeof(rss_key).
   4816  *
   4817  * XXX
   4818  * As if_ixgbe may use this function, this function should not be
   4819  * if_wm specific function.
   4820  */
   4821 static void
   4822 wm_rss_getkey(uint8_t *key)
   4823 {
   4824 
   4825 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4826 }
   4827 
   4828 /*
   4829  * Setup registers for RSS.
   4830  *
   4831  * XXX not yet VMDq support
   4832  */
   4833 static void
   4834 wm_init_rss(struct wm_softc *sc)
   4835 {
   4836 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4837 	int i;
   4838 
   4839 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4840 
   4841 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4842 		int qid, reta_ent;
   4843 
   4844 		qid  = i % sc->sc_nqueues;
   4845 		switch(sc->sc_type) {
   4846 		case WM_T_82574:
   4847 			reta_ent = __SHIFTIN(qid,
   4848 			    RETA_ENT_QINDEX_MASK_82574);
   4849 			break;
   4850 		case WM_T_82575:
   4851 			reta_ent = __SHIFTIN(qid,
   4852 			    RETA_ENT_QINDEX1_MASK_82575);
   4853 			break;
   4854 		default:
   4855 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4856 			break;
   4857 		}
   4858 
   4859 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4860 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4861 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4862 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4863 	}
   4864 
   4865 	wm_rss_getkey((uint8_t *)rss_key);
   4866 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4867 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4868 
   4869 	if (sc->sc_type == WM_T_82574)
   4870 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4871 	else
   4872 		mrqc = MRQC_ENABLE_RSS_MQ;
   4873 
   4874 	/*
   4875 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4876 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4877 	 */
   4878 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4879 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4880 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4881 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4882 
   4883 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4884 }
   4885 
   4886 /*
   4887  * Adjust TX and RX queue numbers which the system actulally uses.
   4888  *
   4889  * The numbers are affected by below parameters.
   4890  *     - The nubmer of hardware queues
   4891  *     - The number of MSI-X vectors (= "nvectors" argument)
   4892  *     - ncpu
   4893  */
   4894 static void
   4895 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4896 {
   4897 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4898 
   4899 	if (nvectors < 2) {
   4900 		sc->sc_nqueues = 1;
   4901 		return;
   4902 	}
   4903 
   4904 	switch(sc->sc_type) {
   4905 	case WM_T_82572:
   4906 		hw_ntxqueues = 2;
   4907 		hw_nrxqueues = 2;
   4908 		break;
   4909 	case WM_T_82574:
   4910 		hw_ntxqueues = 2;
   4911 		hw_nrxqueues = 2;
   4912 		break;
   4913 	case WM_T_82575:
   4914 		hw_ntxqueues = 4;
   4915 		hw_nrxqueues = 4;
   4916 		break;
   4917 	case WM_T_82576:
   4918 		hw_ntxqueues = 16;
   4919 		hw_nrxqueues = 16;
   4920 		break;
   4921 	case WM_T_82580:
   4922 	case WM_T_I350:
   4923 	case WM_T_I354:
   4924 		hw_ntxqueues = 8;
   4925 		hw_nrxqueues = 8;
   4926 		break;
   4927 	case WM_T_I210:
   4928 		hw_ntxqueues = 4;
   4929 		hw_nrxqueues = 4;
   4930 		break;
   4931 	case WM_T_I211:
   4932 		hw_ntxqueues = 2;
   4933 		hw_nrxqueues = 2;
   4934 		break;
   4935 		/*
   4936 		 * As below ethernet controllers does not support MSI-X,
   4937 		 * this driver let them not use multiqueue.
   4938 		 *     - WM_T_80003
   4939 		 *     - WM_T_ICH8
   4940 		 *     - WM_T_ICH9
   4941 		 *     - WM_T_ICH10
   4942 		 *     - WM_T_PCH
   4943 		 *     - WM_T_PCH2
   4944 		 *     - WM_T_PCH_LPT
   4945 		 */
   4946 	default:
   4947 		hw_ntxqueues = 1;
   4948 		hw_nrxqueues = 1;
   4949 		break;
   4950 	}
   4951 
   4952 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4953 
   4954 	/*
   4955 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4956 	 * the number of queues used actually.
   4957 	 */
   4958 	if (nvectors < hw_nqueues + 1) {
   4959 		sc->sc_nqueues = nvectors - 1;
   4960 	} else {
   4961 		sc->sc_nqueues = hw_nqueues;
   4962 	}
   4963 
   4964 	/*
   4965 	 * As queues more then cpus cannot improve scaling, we limit
   4966 	 * the number of queues used actually.
   4967 	 */
   4968 	if (ncpu < sc->sc_nqueues)
   4969 		sc->sc_nqueues = ncpu;
   4970 }
   4971 
   4972 static inline bool
   4973 wm_is_using_msix(struct wm_softc *sc)
   4974 {
   4975 
   4976 	return (sc->sc_nintrs > 1);
   4977 }
   4978 
   4979 static inline bool
   4980 wm_is_using_multiqueue(struct wm_softc *sc)
   4981 {
   4982 
   4983 	return (sc->sc_nqueues > 1);
   4984 }
   4985 
   4986 static int
   4987 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4988 {
   4989 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4990 	wmq->wmq_id = qidx;
   4991 	wmq->wmq_intr_idx = intr_idx;
   4992 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4993 #ifdef WM_MPSAFE
   4994 	    | SOFTINT_MPSAFE
   4995 #endif
   4996 	    , wm_handle_queue, wmq);
   4997 	if (wmq->wmq_si != NULL)
   4998 		return 0;
   4999 
   5000 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5001 	    wmq->wmq_id);
   5002 
   5003 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5004 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5005 	return ENOMEM;
   5006 }
   5007 
   5008 /*
   5009  * Both single interrupt MSI and INTx can use this function.
   5010  */
   5011 static int
   5012 wm_setup_legacy(struct wm_softc *sc)
   5013 {
   5014 	pci_chipset_tag_t pc = sc->sc_pc;
   5015 	const char *intrstr = NULL;
   5016 	char intrbuf[PCI_INTRSTR_LEN];
   5017 	int error;
   5018 
   5019 	error = wm_alloc_txrx_queues(sc);
   5020 	if (error) {
   5021 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5022 		    error);
   5023 		return ENOMEM;
   5024 	}
   5025 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5026 	    sizeof(intrbuf));
   5027 #ifdef WM_MPSAFE
   5028 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5029 #endif
   5030 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5031 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5032 	if (sc->sc_ihs[0] == NULL) {
   5033 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5034 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5035 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5036 		return ENOMEM;
   5037 	}
   5038 
   5039 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5040 	sc->sc_nintrs = 1;
   5041 
   5042 	return wm_softint_establish(sc, 0, 0);
   5043 }
   5044 
   5045 static int
   5046 wm_setup_msix(struct wm_softc *sc)
   5047 {
   5048 	void *vih;
   5049 	kcpuset_t *affinity;
   5050 	int qidx, error, intr_idx, txrx_established;
   5051 	pci_chipset_tag_t pc = sc->sc_pc;
   5052 	const char *intrstr = NULL;
   5053 	char intrbuf[PCI_INTRSTR_LEN];
   5054 	char intr_xname[INTRDEVNAMEBUF];
   5055 
   5056 	if (sc->sc_nqueues < ncpu) {
   5057 		/*
   5058 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5059 		 * interrupts start from CPU#1.
   5060 		 */
   5061 		sc->sc_affinity_offset = 1;
   5062 	} else {
   5063 		/*
   5064 		 * In this case, this device use all CPUs. So, we unify
   5065 		 * affinitied cpu_index to msix vector number for readability.
   5066 		 */
   5067 		sc->sc_affinity_offset = 0;
   5068 	}
   5069 
   5070 	error = wm_alloc_txrx_queues(sc);
   5071 	if (error) {
   5072 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5073 		    error);
   5074 		return ENOMEM;
   5075 	}
   5076 
   5077 	kcpuset_create(&affinity, false);
   5078 	intr_idx = 0;
   5079 
   5080 	/*
   5081 	 * TX and RX
   5082 	 */
   5083 	txrx_established = 0;
   5084 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5085 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5086 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5087 
   5088 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5089 		    sizeof(intrbuf));
   5090 #ifdef WM_MPSAFE
   5091 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5092 		    PCI_INTR_MPSAFE, true);
   5093 #endif
   5094 		memset(intr_xname, 0, sizeof(intr_xname));
   5095 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5096 		    device_xname(sc->sc_dev), qidx);
   5097 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5098 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5099 		if (vih == NULL) {
   5100 			aprint_error_dev(sc->sc_dev,
   5101 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5102 			    intrstr ? " at " : "",
   5103 			    intrstr ? intrstr : "");
   5104 
   5105 			goto fail;
   5106 		}
   5107 		kcpuset_zero(affinity);
   5108 		/* Round-robin affinity */
   5109 		kcpuset_set(affinity, affinity_to);
   5110 		error = interrupt_distribute(vih, affinity, NULL);
   5111 		if (error == 0) {
   5112 			aprint_normal_dev(sc->sc_dev,
   5113 			    "for TX and RX interrupting at %s affinity to %u\n",
   5114 			    intrstr, affinity_to);
   5115 		} else {
   5116 			aprint_normal_dev(sc->sc_dev,
   5117 			    "for TX and RX interrupting at %s\n", intrstr);
   5118 		}
   5119 		sc->sc_ihs[intr_idx] = vih;
   5120 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5121 			goto fail;
   5122 		txrx_established++;
   5123 		intr_idx++;
   5124 	}
   5125 
   5126 	/*
   5127 	 * LINK
   5128 	 */
   5129 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5130 	    sizeof(intrbuf));
   5131 #ifdef WM_MPSAFE
   5132 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5133 #endif
   5134 	memset(intr_xname, 0, sizeof(intr_xname));
   5135 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5136 	    device_xname(sc->sc_dev));
   5137 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5138 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5139 	if (vih == NULL) {
   5140 		aprint_error_dev(sc->sc_dev,
   5141 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5142 		    intrstr ? " at " : "",
   5143 		    intrstr ? intrstr : "");
   5144 
   5145 		goto fail;
   5146 	}
   5147 	/* keep default affinity to LINK interrupt */
   5148 	aprint_normal_dev(sc->sc_dev,
   5149 	    "for LINK interrupting at %s\n", intrstr);
   5150 	sc->sc_ihs[intr_idx] = vih;
   5151 	sc->sc_link_intr_idx = intr_idx;
   5152 
   5153 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5154 	kcpuset_destroy(affinity);
   5155 	return 0;
   5156 
   5157  fail:
   5158 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5159 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5160 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5161 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5162 	}
   5163 
   5164 	kcpuset_destroy(affinity);
   5165 	return ENOMEM;
   5166 }
   5167 
   5168 static void
   5169 wm_unset_stopping_flags(struct wm_softc *sc)
   5170 {
   5171 	int i;
   5172 
   5173 	KASSERT(WM_CORE_LOCKED(sc));
   5174 
   5175 	/*
   5176 	 * must unset stopping flags in ascending order.
   5177 	 */
   5178 	for(i = 0; i < sc->sc_nqueues; i++) {
   5179 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5180 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5181 
   5182 		mutex_enter(txq->txq_lock);
   5183 		txq->txq_stopping = false;
   5184 		mutex_exit(txq->txq_lock);
   5185 
   5186 		mutex_enter(rxq->rxq_lock);
   5187 		rxq->rxq_stopping = false;
   5188 		mutex_exit(rxq->rxq_lock);
   5189 	}
   5190 
   5191 	sc->sc_core_stopping = false;
   5192 }
   5193 
   5194 static void
   5195 wm_set_stopping_flags(struct wm_softc *sc)
   5196 {
   5197 	int i;
   5198 
   5199 	KASSERT(WM_CORE_LOCKED(sc));
   5200 
   5201 	sc->sc_core_stopping = true;
   5202 
   5203 	/*
   5204 	 * must set stopping flags in ascending order.
   5205 	 */
   5206 	for(i = 0; i < sc->sc_nqueues; i++) {
   5207 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5208 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5209 
   5210 		mutex_enter(rxq->rxq_lock);
   5211 		rxq->rxq_stopping = true;
   5212 		mutex_exit(rxq->rxq_lock);
   5213 
   5214 		mutex_enter(txq->txq_lock);
   5215 		txq->txq_stopping = true;
   5216 		mutex_exit(txq->txq_lock);
   5217 	}
   5218 }
   5219 
   5220 /*
   5221  * write interrupt interval value to ITR or EITR
   5222  */
   5223 static void
   5224 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5225 {
   5226 
   5227 	if (!wmq->wmq_set_itr)
   5228 		return;
   5229 
   5230 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5231 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5232 
   5233 		/*
   5234 		 * 82575 doesn't have CNT_INGR field.
   5235 		 * So, overwrite counter field by software.
   5236 		 */
   5237 		if (sc->sc_type == WM_T_82575)
   5238 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5239 		else
   5240 			eitr |= EITR_CNT_INGR;
   5241 
   5242 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5243 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5244 		/*
   5245 		 * 82574 has both ITR and EITR. SET EITR when we use
   5246 		 * the multi queue function with MSI-X.
   5247 		 */
   5248 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5249 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5250 	} else {
   5251 		KASSERT(wmq->wmq_id == 0);
   5252 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5253 	}
   5254 
   5255 	wmq->wmq_set_itr = false;
   5256 }
   5257 
   5258 /*
   5259  * TODO
   5260  * Below dynamic calculation of itr is almost the same as linux igb,
   5261  * however it does not fit to wm(4). So, we will have been disable AIM
   5262  * until we will find appropriate calculation of itr.
   5263  */
   5264 /*
   5265  * calculate interrupt interval value to be going to write register in
   5266  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5267  */
   5268 static void
   5269 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5270 {
   5271 #ifdef NOTYET
   5272 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5273 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5274 	uint32_t avg_size = 0;
   5275 	uint32_t new_itr;
   5276 
   5277 	if (rxq->rxq_packets)
   5278 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5279 	if (txq->txq_packets)
   5280 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5281 
   5282 	if (avg_size == 0) {
   5283 		new_itr = 450; /* restore default value */
   5284 		goto out;
   5285 	}
   5286 
   5287 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5288 	avg_size += 24;
   5289 
   5290 	/* Don't starve jumbo frames */
   5291 	avg_size = min(avg_size, 3000);
   5292 
   5293 	/* Give a little boost to mid-size frames */
   5294 	if ((avg_size > 300) && (avg_size < 1200))
   5295 		new_itr = avg_size / 3;
   5296 	else
   5297 		new_itr = avg_size / 2;
   5298 
   5299 out:
   5300 	/*
   5301 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5302 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5303 	 */
   5304 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5305 		new_itr *= 4;
   5306 
   5307 	if (new_itr != wmq->wmq_itr) {
   5308 		wmq->wmq_itr = new_itr;
   5309 		wmq->wmq_set_itr = true;
   5310 	} else
   5311 		wmq->wmq_set_itr = false;
   5312 
   5313 	rxq->rxq_packets = 0;
   5314 	rxq->rxq_bytes = 0;
   5315 	txq->txq_packets = 0;
   5316 	txq->txq_bytes = 0;
   5317 #endif
   5318 }
   5319 
   5320 /*
   5321  * wm_init:		[ifnet interface function]
   5322  *
   5323  *	Initialize the interface.
   5324  */
   5325 static int
   5326 wm_init(struct ifnet *ifp)
   5327 {
   5328 	struct wm_softc *sc = ifp->if_softc;
   5329 	int ret;
   5330 
   5331 	WM_CORE_LOCK(sc);
   5332 	ret = wm_init_locked(ifp);
   5333 	WM_CORE_UNLOCK(sc);
   5334 
   5335 	return ret;
   5336 }
   5337 
   5338 static int
   5339 wm_init_locked(struct ifnet *ifp)
   5340 {
   5341 	struct wm_softc *sc = ifp->if_softc;
   5342 	int i, j, trynum, error = 0;
   5343 	uint32_t reg;
   5344 
   5345 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5346 		device_xname(sc->sc_dev), __func__));
   5347 	KASSERT(WM_CORE_LOCKED(sc));
   5348 
   5349 	/*
   5350 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5351 	 * There is a small but measurable benefit to avoiding the adjusment
   5352 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5353 	 * on such platforms.  One possibility is that the DMA itself is
   5354 	 * slightly more efficient if the front of the entire packet (instead
   5355 	 * of the front of the headers) is aligned.
   5356 	 *
   5357 	 * Note we must always set align_tweak to 0 if we are using
   5358 	 * jumbo frames.
   5359 	 */
   5360 #ifdef __NO_STRICT_ALIGNMENT
   5361 	sc->sc_align_tweak = 0;
   5362 #else
   5363 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5364 		sc->sc_align_tweak = 0;
   5365 	else
   5366 		sc->sc_align_tweak = 2;
   5367 #endif /* __NO_STRICT_ALIGNMENT */
   5368 
   5369 	/* Cancel any pending I/O. */
   5370 	wm_stop_locked(ifp, 0);
   5371 
   5372 	/* update statistics before reset */
   5373 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5374 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5375 
   5376 	/* PCH_SPT hardware workaround */
   5377 	if (sc->sc_type == WM_T_PCH_SPT)
   5378 		wm_flush_desc_rings(sc);
   5379 
   5380 	/* Reset the chip to a known state. */
   5381 	wm_reset(sc);
   5382 
   5383 	/*
   5384 	 * AMT based hardware can now take control from firmware
   5385 	 * Do this after reset.
   5386 	 */
   5387 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5388 		wm_get_hw_control(sc);
   5389 
   5390 	if ((sc->sc_type == WM_T_PCH_SPT) &&
   5391 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5392 		wm_legacy_irq_quirk_spt(sc);
   5393 
   5394 	/* Init hardware bits */
   5395 	wm_initialize_hardware_bits(sc);
   5396 
   5397 	/* Reset the PHY. */
   5398 	if (sc->sc_flags & WM_F_HAS_MII)
   5399 		wm_gmii_reset(sc);
   5400 
   5401 	/* Calculate (E)ITR value */
   5402 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5403 		/*
   5404 		 * For NEWQUEUE's EITR (except for 82575).
   5405 		 * 82575's EITR should be set same throttling value as other
   5406 		 * old controllers' ITR because the interrupt/sec calculation
   5407 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5408 		 *
   5409 		 * 82574's EITR should be set same throttling value as ITR.
   5410 		 *
   5411 		 * For N interrupts/sec, set this value to:
   5412 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5413 		 */
   5414 		sc->sc_itr_init = 450;
   5415 	} else if (sc->sc_type >= WM_T_82543) {
   5416 		/*
   5417 		 * Set up the interrupt throttling register (units of 256ns)
   5418 		 * Note that a footnote in Intel's documentation says this
   5419 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5420 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5421 		 * that that is also true for the 1024ns units of the other
   5422 		 * interrupt-related timer registers -- so, really, we ought
   5423 		 * to divide this value by 4 when the link speed is low.
   5424 		 *
   5425 		 * XXX implement this division at link speed change!
   5426 		 */
   5427 
   5428 		/*
   5429 		 * For N interrupts/sec, set this value to:
   5430 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5431 		 * absolute and packet timer values to this value
   5432 		 * divided by 4 to get "simple timer" behavior.
   5433 		 */
   5434 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5435 	}
   5436 
   5437 	error = wm_init_txrx_queues(sc);
   5438 	if (error)
   5439 		goto out;
   5440 
   5441 	/*
   5442 	 * Clear out the VLAN table -- we don't use it (yet).
   5443 	 */
   5444 	CSR_WRITE(sc, WMREG_VET, 0);
   5445 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5446 		trynum = 10; /* Due to hw errata */
   5447 	else
   5448 		trynum = 1;
   5449 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5450 		for (j = 0; j < trynum; j++)
   5451 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5452 
   5453 	/*
   5454 	 * Set up flow-control parameters.
   5455 	 *
   5456 	 * XXX Values could probably stand some tuning.
   5457 	 */
   5458 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5459 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5460 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5461 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5462 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5463 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5464 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5465 	}
   5466 
   5467 	sc->sc_fcrtl = FCRTL_DFLT;
   5468 	if (sc->sc_type < WM_T_82543) {
   5469 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5470 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5471 	} else {
   5472 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5473 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5474 	}
   5475 
   5476 	if (sc->sc_type == WM_T_80003)
   5477 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5478 	else
   5479 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5480 
   5481 	/* Writes the control register. */
   5482 	wm_set_vlan(sc);
   5483 
   5484 	if (sc->sc_flags & WM_F_HAS_MII) {
   5485 		uint16_t kmreg;
   5486 
   5487 		switch (sc->sc_type) {
   5488 		case WM_T_80003:
   5489 		case WM_T_ICH8:
   5490 		case WM_T_ICH9:
   5491 		case WM_T_ICH10:
   5492 		case WM_T_PCH:
   5493 		case WM_T_PCH2:
   5494 		case WM_T_PCH_LPT:
   5495 		case WM_T_PCH_SPT:
   5496 			/*
   5497 			 * Set the mac to wait the maximum time between each
   5498 			 * iteration and increase the max iterations when
   5499 			 * polling the phy; this fixes erroneous timeouts at
   5500 			 * 10Mbps.
   5501 			 */
   5502 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5503 			    0xFFFF);
   5504 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5505 			    &kmreg);
   5506 			kmreg |= 0x3F;
   5507 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5508 			    kmreg);
   5509 			break;
   5510 		default:
   5511 			break;
   5512 		}
   5513 
   5514 		if (sc->sc_type == WM_T_80003) {
   5515 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5516 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5517 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5518 
   5519 			/* Bypass RX and TX FIFO's */
   5520 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5521 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5522 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5523 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5524 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5525 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5526 		}
   5527 	}
   5528 #if 0
   5529 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5530 #endif
   5531 
   5532 	/* Set up checksum offload parameters. */
   5533 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5534 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5535 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5536 		reg |= RXCSUM_IPOFL;
   5537 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5538 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5539 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5540 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5541 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5542 
   5543 	/* Set registers about MSI-X */
   5544 	if (wm_is_using_msix(sc)) {
   5545 		uint32_t ivar;
   5546 		struct wm_queue *wmq;
   5547 		int qid, qintr_idx;
   5548 
   5549 		if (sc->sc_type == WM_T_82575) {
   5550 			/* Interrupt control */
   5551 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5552 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5553 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5554 
   5555 			/* TX and RX */
   5556 			for (i = 0; i < sc->sc_nqueues; i++) {
   5557 				wmq = &sc->sc_queue[i];
   5558 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5559 				    EITR_TX_QUEUE(wmq->wmq_id)
   5560 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5561 			}
   5562 			/* Link status */
   5563 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5564 			    EITR_OTHER);
   5565 		} else if (sc->sc_type == WM_T_82574) {
   5566 			/* Interrupt control */
   5567 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5568 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5569 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5570 
   5571 			/*
   5572 			 * workaround issue with spurious interrupts
   5573 			 * in MSI-X mode.
   5574 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5575 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5576 			 */
   5577 			reg = CSR_READ(sc, WMREG_RFCTL);
   5578 			reg |= WMREG_RFCTL_ACKDIS;
   5579 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5580 
   5581 			ivar = 0;
   5582 			/* TX and RX */
   5583 			for (i = 0; i < sc->sc_nqueues; i++) {
   5584 				wmq = &sc->sc_queue[i];
   5585 				qid = wmq->wmq_id;
   5586 				qintr_idx = wmq->wmq_intr_idx;
   5587 
   5588 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5589 				    IVAR_TX_MASK_Q_82574(qid));
   5590 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5591 				    IVAR_RX_MASK_Q_82574(qid));
   5592 			}
   5593 			/* Link status */
   5594 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5595 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5596 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5597 		} else {
   5598 			/* Interrupt control */
   5599 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5600 			    | GPIE_EIAME | GPIE_PBA);
   5601 
   5602 			switch (sc->sc_type) {
   5603 			case WM_T_82580:
   5604 			case WM_T_I350:
   5605 			case WM_T_I354:
   5606 			case WM_T_I210:
   5607 			case WM_T_I211:
   5608 				/* TX and RX */
   5609 				for (i = 0; i < sc->sc_nqueues; i++) {
   5610 					wmq = &sc->sc_queue[i];
   5611 					qid = wmq->wmq_id;
   5612 					qintr_idx = wmq->wmq_intr_idx;
   5613 
   5614 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5615 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5616 					ivar |= __SHIFTIN((qintr_idx
   5617 						| IVAR_VALID),
   5618 					    IVAR_TX_MASK_Q(qid));
   5619 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5620 					ivar |= __SHIFTIN((qintr_idx
   5621 						| IVAR_VALID),
   5622 					    IVAR_RX_MASK_Q(qid));
   5623 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5624 				}
   5625 				break;
   5626 			case WM_T_82576:
   5627 				/* TX and RX */
   5628 				for (i = 0; i < sc->sc_nqueues; i++) {
   5629 					wmq = &sc->sc_queue[i];
   5630 					qid = wmq->wmq_id;
   5631 					qintr_idx = wmq->wmq_intr_idx;
   5632 
   5633 					ivar = CSR_READ(sc,
   5634 					    WMREG_IVAR_Q_82576(qid));
   5635 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5636 					ivar |= __SHIFTIN((qintr_idx
   5637 						| IVAR_VALID),
   5638 					    IVAR_TX_MASK_Q_82576(qid));
   5639 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5640 					ivar |= __SHIFTIN((qintr_idx
   5641 						| IVAR_VALID),
   5642 					    IVAR_RX_MASK_Q_82576(qid));
   5643 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5644 					    ivar);
   5645 				}
   5646 				break;
   5647 			default:
   5648 				break;
   5649 			}
   5650 
   5651 			/* Link status */
   5652 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5653 			    IVAR_MISC_OTHER);
   5654 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5655 		}
   5656 
   5657 		if (wm_is_using_multiqueue(sc)) {
   5658 			wm_init_rss(sc);
   5659 
   5660 			/*
   5661 			** NOTE: Receive Full-Packet Checksum Offload
   5662 			** is mutually exclusive with Multiqueue. However
   5663 			** this is not the same as TCP/IP checksums which
   5664 			** still work.
   5665 			*/
   5666 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5667 			reg |= RXCSUM_PCSD;
   5668 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5669 		}
   5670 	}
   5671 
   5672 	/* Set up the interrupt registers. */
   5673 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5674 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5675 	    ICR_RXO | ICR_RXT0;
   5676 	if (wm_is_using_msix(sc)) {
   5677 		uint32_t mask;
   5678 		struct wm_queue *wmq;
   5679 
   5680 		switch (sc->sc_type) {
   5681 		case WM_T_82574:
   5682 			mask = 0;
   5683 			for (i = 0; i < sc->sc_nqueues; i++) {
   5684 				wmq = &sc->sc_queue[i];
   5685 				mask |= ICR_TXQ(wmq->wmq_id);
   5686 				mask |= ICR_RXQ(wmq->wmq_id);
   5687 			}
   5688 			mask |= ICR_OTHER;
   5689 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5690 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5691 			break;
   5692 		default:
   5693 			if (sc->sc_type == WM_T_82575) {
   5694 				mask = 0;
   5695 				for (i = 0; i < sc->sc_nqueues; i++) {
   5696 					wmq = &sc->sc_queue[i];
   5697 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5698 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5699 				}
   5700 				mask |= EITR_OTHER;
   5701 			} else {
   5702 				mask = 0;
   5703 				for (i = 0; i < sc->sc_nqueues; i++) {
   5704 					wmq = &sc->sc_queue[i];
   5705 					mask |= 1 << wmq->wmq_intr_idx;
   5706 				}
   5707 				mask |= 1 << sc->sc_link_intr_idx;
   5708 			}
   5709 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5710 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5711 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5712 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5713 			break;
   5714 		}
   5715 	} else
   5716 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5717 
   5718 	/* Set up the inter-packet gap. */
   5719 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5720 
   5721 	if (sc->sc_type >= WM_T_82543) {
   5722 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5723 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5724 			wm_itrs_writereg(sc, wmq);
   5725 		}
   5726 		/*
   5727 		 * Link interrupts occur much less than TX
   5728 		 * interrupts and RX interrupts. So, we don't
   5729 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5730 		 * FreeBSD's if_igb.
   5731 		 */
   5732 	}
   5733 
   5734 	/* Set the VLAN ethernetype. */
   5735 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5736 
   5737 	/*
   5738 	 * Set up the transmit control register; we start out with
   5739 	 * a collision distance suitable for FDX, but update it whe
   5740 	 * we resolve the media type.
   5741 	 */
   5742 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5743 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5744 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5745 	if (sc->sc_type >= WM_T_82571)
   5746 		sc->sc_tctl |= TCTL_MULR;
   5747 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5748 
   5749 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5750 		/* Write TDT after TCTL.EN is set. See the document. */
   5751 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5752 	}
   5753 
   5754 	if (sc->sc_type == WM_T_80003) {
   5755 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5756 		reg &= ~TCTL_EXT_GCEX_MASK;
   5757 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5758 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5759 	}
   5760 
   5761 	/* Set the media. */
   5762 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5763 		goto out;
   5764 
   5765 	/* Configure for OS presence */
   5766 	wm_init_manageability(sc);
   5767 
   5768 	/*
   5769 	 * Set up the receive control register; we actually program
   5770 	 * the register when we set the receive filter.  Use multicast
   5771 	 * address offset type 0.
   5772 	 *
   5773 	 * Only the i82544 has the ability to strip the incoming
   5774 	 * CRC, so we don't enable that feature.
   5775 	 */
   5776 	sc->sc_mchash_type = 0;
   5777 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5778 	    | RCTL_MO(sc->sc_mchash_type);
   5779 
   5780 	/*
   5781 	 * 82574 use one buffer extended Rx descriptor.
   5782 	 */
   5783 	if (sc->sc_type == WM_T_82574)
   5784 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5785 
   5786 	/*
   5787 	 * The I350 has a bug where it always strips the CRC whether
   5788 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5789 	 */
   5790 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5791 	    || (sc->sc_type == WM_T_I210))
   5792 		sc->sc_rctl |= RCTL_SECRC;
   5793 
   5794 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5795 	    && (ifp->if_mtu > ETHERMTU)) {
   5796 		sc->sc_rctl |= RCTL_LPE;
   5797 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5798 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5799 	}
   5800 
   5801 	if (MCLBYTES == 2048) {
   5802 		sc->sc_rctl |= RCTL_2k;
   5803 	} else {
   5804 		if (sc->sc_type >= WM_T_82543) {
   5805 			switch (MCLBYTES) {
   5806 			case 4096:
   5807 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5808 				break;
   5809 			case 8192:
   5810 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5811 				break;
   5812 			case 16384:
   5813 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5814 				break;
   5815 			default:
   5816 				panic("wm_init: MCLBYTES %d unsupported",
   5817 				    MCLBYTES);
   5818 				break;
   5819 			}
   5820 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5821 	}
   5822 
   5823 	/* Enable ECC */
   5824 	switch (sc->sc_type) {
   5825 	case WM_T_82571:
   5826 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5827 		reg |= PBA_ECC_CORR_EN;
   5828 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5829 		break;
   5830 	case WM_T_PCH_LPT:
   5831 	case WM_T_PCH_SPT:
   5832 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5833 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5834 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5835 
   5836 		sc->sc_ctrl |= CTRL_MEHE;
   5837 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5838 		break;
   5839 	default:
   5840 		break;
   5841 	}
   5842 
   5843 	/*
   5844 	 * Set the receive filter.
   5845 	 *
   5846 	 * For 82575 and 82576, the RX descriptors must be initialized after
   5847 	 * the setting of RCTL.EN in wm_set_filter()
   5848 	 */
   5849 	wm_set_filter(sc);
   5850 
   5851 	/* On 575 and later set RDT only if RX enabled */
   5852 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5853 		int qidx;
   5854 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5855 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5856 			for (i = 0; i < WM_NRXDESC; i++) {
   5857 				mutex_enter(rxq->rxq_lock);
   5858 				wm_init_rxdesc(rxq, i);
   5859 				mutex_exit(rxq->rxq_lock);
   5860 
   5861 			}
   5862 		}
   5863 	}
   5864 
   5865 	wm_unset_stopping_flags(sc);
   5866 
   5867 	/* Start the one second link check clock. */
   5868 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5869 
   5870 	/* ...all done! */
   5871 	ifp->if_flags |= IFF_RUNNING;
   5872 	ifp->if_flags &= ~IFF_OACTIVE;
   5873 
   5874  out:
   5875 	sc->sc_if_flags = ifp->if_flags;
   5876 	if (error)
   5877 		log(LOG_ERR, "%s: interface not running\n",
   5878 		    device_xname(sc->sc_dev));
   5879 	return error;
   5880 }
   5881 
   5882 /*
   5883  * wm_stop:		[ifnet interface function]
   5884  *
   5885  *	Stop transmission on the interface.
   5886  */
   5887 static void
   5888 wm_stop(struct ifnet *ifp, int disable)
   5889 {
   5890 	struct wm_softc *sc = ifp->if_softc;
   5891 
   5892 	WM_CORE_LOCK(sc);
   5893 	wm_stop_locked(ifp, disable);
   5894 	WM_CORE_UNLOCK(sc);
   5895 }
   5896 
   5897 static void
   5898 wm_stop_locked(struct ifnet *ifp, int disable)
   5899 {
   5900 	struct wm_softc *sc = ifp->if_softc;
   5901 	struct wm_txsoft *txs;
   5902 	int i, qidx;
   5903 
   5904 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5905 		device_xname(sc->sc_dev), __func__));
   5906 	KASSERT(WM_CORE_LOCKED(sc));
   5907 
   5908 	wm_set_stopping_flags(sc);
   5909 
   5910 	/* Stop the one second clock. */
   5911 	callout_stop(&sc->sc_tick_ch);
   5912 
   5913 	/* Stop the 82547 Tx FIFO stall check timer. */
   5914 	if (sc->sc_type == WM_T_82547)
   5915 		callout_stop(&sc->sc_txfifo_ch);
   5916 
   5917 	if (sc->sc_flags & WM_F_HAS_MII) {
   5918 		/* Down the MII. */
   5919 		mii_down(&sc->sc_mii);
   5920 	} else {
   5921 #if 0
   5922 		/* Should we clear PHY's status properly? */
   5923 		wm_reset(sc);
   5924 #endif
   5925 	}
   5926 
   5927 	/* Stop the transmit and receive processes. */
   5928 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5929 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5930 	sc->sc_rctl &= ~RCTL_EN;
   5931 
   5932 	/*
   5933 	 * Clear the interrupt mask to ensure the device cannot assert its
   5934 	 * interrupt line.
   5935 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5936 	 * service any currently pending or shared interrupt.
   5937 	 */
   5938 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5939 	sc->sc_icr = 0;
   5940 	if (wm_is_using_msix(sc)) {
   5941 		if (sc->sc_type != WM_T_82574) {
   5942 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5943 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5944 		} else
   5945 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5946 	}
   5947 
   5948 	/* Release any queued transmit buffers. */
   5949 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5950 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5951 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5952 		mutex_enter(txq->txq_lock);
   5953 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5954 			txs = &txq->txq_soft[i];
   5955 			if (txs->txs_mbuf != NULL) {
   5956 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5957 				m_freem(txs->txs_mbuf);
   5958 				txs->txs_mbuf = NULL;
   5959 			}
   5960 		}
   5961 		mutex_exit(txq->txq_lock);
   5962 	}
   5963 
   5964 	/* Mark the interface as down and cancel the watchdog timer. */
   5965 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5966 	ifp->if_timer = 0;
   5967 
   5968 	if (disable) {
   5969 		for (i = 0; i < sc->sc_nqueues; i++) {
   5970 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5971 			mutex_enter(rxq->rxq_lock);
   5972 			wm_rxdrain(rxq);
   5973 			mutex_exit(rxq->rxq_lock);
   5974 		}
   5975 	}
   5976 
   5977 #if 0 /* notyet */
   5978 	if (sc->sc_type >= WM_T_82544)
   5979 		CSR_WRITE(sc, WMREG_WUC, 0);
   5980 #endif
   5981 }
   5982 
   5983 static void
   5984 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5985 {
   5986 	struct mbuf *m;
   5987 	int i;
   5988 
   5989 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5990 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5991 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5992 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5993 		    m->m_data, m->m_len, m->m_flags);
   5994 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5995 	    i, i == 1 ? "" : "s");
   5996 }
   5997 
   5998 /*
   5999  * wm_82547_txfifo_stall:
   6000  *
   6001  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6002  *	reset the FIFO pointers, and restart packet transmission.
   6003  */
   6004 static void
   6005 wm_82547_txfifo_stall(void *arg)
   6006 {
   6007 	struct wm_softc *sc = arg;
   6008 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6009 
   6010 	mutex_enter(txq->txq_lock);
   6011 
   6012 	if (txq->txq_stopping)
   6013 		goto out;
   6014 
   6015 	if (txq->txq_fifo_stall) {
   6016 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6017 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6018 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6019 			/*
   6020 			 * Packets have drained.  Stop transmitter, reset
   6021 			 * FIFO pointers, restart transmitter, and kick
   6022 			 * the packet queue.
   6023 			 */
   6024 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6025 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6026 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6027 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6028 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6029 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6030 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6031 			CSR_WRITE_FLUSH(sc);
   6032 
   6033 			txq->txq_fifo_head = 0;
   6034 			txq->txq_fifo_stall = 0;
   6035 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6036 		} else {
   6037 			/*
   6038 			 * Still waiting for packets to drain; try again in
   6039 			 * another tick.
   6040 			 */
   6041 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6042 		}
   6043 	}
   6044 
   6045 out:
   6046 	mutex_exit(txq->txq_lock);
   6047 }
   6048 
   6049 /*
   6050  * wm_82547_txfifo_bugchk:
   6051  *
   6052  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6053  *	prevent enqueueing a packet that would wrap around the end
   6054  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6055  *
   6056  *	We do this by checking the amount of space before the end
   6057  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   6058  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6059  *	the internal FIFO pointers to the beginning, and restart
   6060  *	transmission on the interface.
   6061  */
   6062 #define	WM_FIFO_HDR		0x10
   6063 #define	WM_82547_PAD_LEN	0x3e0
   6064 static int
   6065 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6066 {
   6067 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6068 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6069 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6070 
   6071 	/* Just return if already stalled. */
   6072 	if (txq->txq_fifo_stall)
   6073 		return 1;
   6074 
   6075 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6076 		/* Stall only occurs in half-duplex mode. */
   6077 		goto send_packet;
   6078 	}
   6079 
   6080 	if (len >= WM_82547_PAD_LEN + space) {
   6081 		txq->txq_fifo_stall = 1;
   6082 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6083 		return 1;
   6084 	}
   6085 
   6086  send_packet:
   6087 	txq->txq_fifo_head += len;
   6088 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6089 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6090 
   6091 	return 0;
   6092 }
   6093 
   6094 static int
   6095 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6096 {
   6097 	int error;
   6098 
   6099 	/*
   6100 	 * Allocate the control data structures, and create and load the
   6101 	 * DMA map for it.
   6102 	 *
   6103 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6104 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6105 	 * both sets within the same 4G segment.
   6106 	 */
   6107 	if (sc->sc_type < WM_T_82544)
   6108 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6109 	else
   6110 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6111 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6112 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6113 	else
   6114 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6115 
   6116 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6117 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6118 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6119 		aprint_error_dev(sc->sc_dev,
   6120 		    "unable to allocate TX control data, error = %d\n",
   6121 		    error);
   6122 		goto fail_0;
   6123 	}
   6124 
   6125 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6126 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6127 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6128 		aprint_error_dev(sc->sc_dev,
   6129 		    "unable to map TX control data, error = %d\n", error);
   6130 		goto fail_1;
   6131 	}
   6132 
   6133 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6134 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6135 		aprint_error_dev(sc->sc_dev,
   6136 		    "unable to create TX control data DMA map, error = %d\n",
   6137 		    error);
   6138 		goto fail_2;
   6139 	}
   6140 
   6141 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6142 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6143 		aprint_error_dev(sc->sc_dev,
   6144 		    "unable to load TX control data DMA map, error = %d\n",
   6145 		    error);
   6146 		goto fail_3;
   6147 	}
   6148 
   6149 	return 0;
   6150 
   6151  fail_3:
   6152 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6153  fail_2:
   6154 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6155 	    WM_TXDESCS_SIZE(txq));
   6156  fail_1:
   6157 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6158  fail_0:
   6159 	return error;
   6160 }
   6161 
   6162 static void
   6163 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6164 {
   6165 
   6166 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6167 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6168 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6169 	    WM_TXDESCS_SIZE(txq));
   6170 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6171 }
   6172 
   6173 static int
   6174 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6175 {
   6176 	int error;
   6177 	size_t rxq_descs_size;
   6178 
   6179 	/*
   6180 	 * Allocate the control data structures, and create and load the
   6181 	 * DMA map for it.
   6182 	 *
   6183 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6184 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6185 	 * both sets within the same 4G segment.
   6186 	 */
   6187 	rxq->rxq_ndesc = WM_NRXDESC;
   6188 	if (sc->sc_type == WM_T_82574)
   6189 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6190 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6191 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6192 	else
   6193 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6194 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6195 
   6196 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6197 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6198 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6199 		aprint_error_dev(sc->sc_dev,
   6200 		    "unable to allocate RX control data, error = %d\n",
   6201 		    error);
   6202 		goto fail_0;
   6203 	}
   6204 
   6205 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6206 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6207 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6208 		aprint_error_dev(sc->sc_dev,
   6209 		    "unable to map RX control data, error = %d\n", error);
   6210 		goto fail_1;
   6211 	}
   6212 
   6213 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6214 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6215 		aprint_error_dev(sc->sc_dev,
   6216 		    "unable to create RX control data DMA map, error = %d\n",
   6217 		    error);
   6218 		goto fail_2;
   6219 	}
   6220 
   6221 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6222 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6223 		aprint_error_dev(sc->sc_dev,
   6224 		    "unable to load RX control data DMA map, error = %d\n",
   6225 		    error);
   6226 		goto fail_3;
   6227 	}
   6228 
   6229 	return 0;
   6230 
   6231  fail_3:
   6232 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6233  fail_2:
   6234 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6235 	    rxq_descs_size);
   6236  fail_1:
   6237 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6238  fail_0:
   6239 	return error;
   6240 }
   6241 
   6242 static void
   6243 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6244 {
   6245 
   6246 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6247 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6248 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6249 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6250 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6251 }
   6252 
   6253 
   6254 static int
   6255 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6256 {
   6257 	int i, error;
   6258 
   6259 	/* Create the transmit buffer DMA maps. */
   6260 	WM_TXQUEUELEN(txq) =
   6261 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6262 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6263 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6264 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6265 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6266 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6267 			aprint_error_dev(sc->sc_dev,
   6268 			    "unable to create Tx DMA map %d, error = %d\n",
   6269 			    i, error);
   6270 			goto fail;
   6271 		}
   6272 	}
   6273 
   6274 	return 0;
   6275 
   6276  fail:
   6277 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6278 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6279 			bus_dmamap_destroy(sc->sc_dmat,
   6280 			    txq->txq_soft[i].txs_dmamap);
   6281 	}
   6282 	return error;
   6283 }
   6284 
   6285 static void
   6286 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6287 {
   6288 	int i;
   6289 
   6290 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6291 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6292 			bus_dmamap_destroy(sc->sc_dmat,
   6293 			    txq->txq_soft[i].txs_dmamap);
   6294 	}
   6295 }
   6296 
   6297 static int
   6298 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6299 {
   6300 	int i, error;
   6301 
   6302 	/* Create the receive buffer DMA maps. */
   6303 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6304 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6305 			    MCLBYTES, 0, 0,
   6306 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6307 			aprint_error_dev(sc->sc_dev,
   6308 			    "unable to create Rx DMA map %d error = %d\n",
   6309 			    i, error);
   6310 			goto fail;
   6311 		}
   6312 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6313 	}
   6314 
   6315 	return 0;
   6316 
   6317  fail:
   6318 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6319 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6320 			bus_dmamap_destroy(sc->sc_dmat,
   6321 			    rxq->rxq_soft[i].rxs_dmamap);
   6322 	}
   6323 	return error;
   6324 }
   6325 
   6326 static void
   6327 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6328 {
   6329 	int i;
   6330 
   6331 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6332 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6333 			bus_dmamap_destroy(sc->sc_dmat,
   6334 			    rxq->rxq_soft[i].rxs_dmamap);
   6335 	}
   6336 }
   6337 
   6338 /*
   6339  * wm_alloc_quques:
   6340  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6341  */
   6342 static int
   6343 wm_alloc_txrx_queues(struct wm_softc *sc)
   6344 {
   6345 	int i, error, tx_done, rx_done;
   6346 
   6347 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6348 	    KM_SLEEP);
   6349 	if (sc->sc_queue == NULL) {
   6350 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6351 		error = ENOMEM;
   6352 		goto fail_0;
   6353 	}
   6354 
   6355 	/*
   6356 	 * For transmission
   6357 	 */
   6358 	error = 0;
   6359 	tx_done = 0;
   6360 	for (i = 0; i < sc->sc_nqueues; i++) {
   6361 #ifdef WM_EVENT_COUNTERS
   6362 		int j;
   6363 		const char *xname;
   6364 #endif
   6365 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6366 		txq->txq_sc = sc;
   6367 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6368 
   6369 		error = wm_alloc_tx_descs(sc, txq);
   6370 		if (error)
   6371 			break;
   6372 		error = wm_alloc_tx_buffer(sc, txq);
   6373 		if (error) {
   6374 			wm_free_tx_descs(sc, txq);
   6375 			break;
   6376 		}
   6377 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6378 		if (txq->txq_interq == NULL) {
   6379 			wm_free_tx_descs(sc, txq);
   6380 			wm_free_tx_buffer(sc, txq);
   6381 			error = ENOMEM;
   6382 			break;
   6383 		}
   6384 
   6385 #ifdef WM_EVENT_COUNTERS
   6386 		xname = device_xname(sc->sc_dev);
   6387 
   6388 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6389 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6390 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6391 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6392 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6393 
   6394 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6395 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6396 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6397 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6398 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6399 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6400 
   6401 		for (j = 0; j < WM_NTXSEGS; j++) {
   6402 			snprintf(txq->txq_txseg_evcnt_names[j],
   6403 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6404 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6405 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6406 		}
   6407 
   6408 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6409 
   6410 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6411 #endif /* WM_EVENT_COUNTERS */
   6412 
   6413 		tx_done++;
   6414 	}
   6415 	if (error)
   6416 		goto fail_1;
   6417 
   6418 	/*
   6419 	 * For recieve
   6420 	 */
   6421 	error = 0;
   6422 	rx_done = 0;
   6423 	for (i = 0; i < sc->sc_nqueues; i++) {
   6424 #ifdef WM_EVENT_COUNTERS
   6425 		const char *xname;
   6426 #endif
   6427 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6428 		rxq->rxq_sc = sc;
   6429 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6430 
   6431 		error = wm_alloc_rx_descs(sc, rxq);
   6432 		if (error)
   6433 			break;
   6434 
   6435 		error = wm_alloc_rx_buffer(sc, rxq);
   6436 		if (error) {
   6437 			wm_free_rx_descs(sc, rxq);
   6438 			break;
   6439 		}
   6440 
   6441 #ifdef WM_EVENT_COUNTERS
   6442 		xname = device_xname(sc->sc_dev);
   6443 
   6444 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6445 
   6446 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6447 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6448 #endif /* WM_EVENT_COUNTERS */
   6449 
   6450 		rx_done++;
   6451 	}
   6452 	if (error)
   6453 		goto fail_2;
   6454 
   6455 	return 0;
   6456 
   6457  fail_2:
   6458 	for (i = 0; i < rx_done; i++) {
   6459 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6460 		wm_free_rx_buffer(sc, rxq);
   6461 		wm_free_rx_descs(sc, rxq);
   6462 		if (rxq->rxq_lock)
   6463 			mutex_obj_free(rxq->rxq_lock);
   6464 	}
   6465  fail_1:
   6466 	for (i = 0; i < tx_done; i++) {
   6467 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6468 		pcq_destroy(txq->txq_interq);
   6469 		wm_free_tx_buffer(sc, txq);
   6470 		wm_free_tx_descs(sc, txq);
   6471 		if (txq->txq_lock)
   6472 			mutex_obj_free(txq->txq_lock);
   6473 	}
   6474 
   6475 	kmem_free(sc->sc_queue,
   6476 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6477  fail_0:
   6478 	return error;
   6479 }
   6480 
   6481 /*
   6482  * wm_free_quques:
   6483  *	Free {tx,rx}descs and {tx,rx} buffers
   6484  */
   6485 static void
   6486 wm_free_txrx_queues(struct wm_softc *sc)
   6487 {
   6488 	int i;
   6489 
   6490 	for (i = 0; i < sc->sc_nqueues; i++) {
   6491 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6492 
   6493 #ifdef WM_EVENT_COUNTERS
   6494 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6495 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6496 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6497 #endif /* WM_EVENT_COUNTERS */
   6498 
   6499 		wm_free_rx_buffer(sc, rxq);
   6500 		wm_free_rx_descs(sc, rxq);
   6501 		if (rxq->rxq_lock)
   6502 			mutex_obj_free(rxq->rxq_lock);
   6503 	}
   6504 
   6505 	for (i = 0; i < sc->sc_nqueues; i++) {
   6506 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6507 		struct mbuf *m;
   6508 #ifdef WM_EVENT_COUNTERS
   6509 		int j;
   6510 
   6511 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6512 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6513 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6514 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6515 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6516 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6517 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6518 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6519 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6520 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6521 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6522 
   6523 		for (j = 0; j < WM_NTXSEGS; j++)
   6524 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6525 
   6526 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6527 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6528 #endif /* WM_EVENT_COUNTERS */
   6529 
   6530 		/* drain txq_interq */
   6531 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6532 			m_freem(m);
   6533 		pcq_destroy(txq->txq_interq);
   6534 
   6535 		wm_free_tx_buffer(sc, txq);
   6536 		wm_free_tx_descs(sc, txq);
   6537 		if (txq->txq_lock)
   6538 			mutex_obj_free(txq->txq_lock);
   6539 	}
   6540 
   6541 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6542 }
   6543 
   6544 static void
   6545 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6546 {
   6547 
   6548 	KASSERT(mutex_owned(txq->txq_lock));
   6549 
   6550 	/* Initialize the transmit descriptor ring. */
   6551 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6552 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6553 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6554 	txq->txq_free = WM_NTXDESC(txq);
   6555 	txq->txq_next = 0;
   6556 }
   6557 
   6558 static void
   6559 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6560     struct wm_txqueue *txq)
   6561 {
   6562 
   6563 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6564 		device_xname(sc->sc_dev), __func__));
   6565 	KASSERT(mutex_owned(txq->txq_lock));
   6566 
   6567 	if (sc->sc_type < WM_T_82543) {
   6568 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6569 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6570 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6571 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6572 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6573 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6574 	} else {
   6575 		int qid = wmq->wmq_id;
   6576 
   6577 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6578 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6579 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6580 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6581 
   6582 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6583 			/*
   6584 			 * Don't write TDT before TCTL.EN is set.
   6585 			 * See the document.
   6586 			 */
   6587 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6588 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6589 			    | TXDCTL_WTHRESH(0));
   6590 		else {
   6591 			/* XXX should update with AIM? */
   6592 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6593 			if (sc->sc_type >= WM_T_82540) {
   6594 				/* should be same */
   6595 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6596 			}
   6597 
   6598 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6599 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6600 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6601 		}
   6602 	}
   6603 }
   6604 
   6605 static void
   6606 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6607 {
   6608 	int i;
   6609 
   6610 	KASSERT(mutex_owned(txq->txq_lock));
   6611 
   6612 	/* Initialize the transmit job descriptors. */
   6613 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6614 		txq->txq_soft[i].txs_mbuf = NULL;
   6615 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6616 	txq->txq_snext = 0;
   6617 	txq->txq_sdirty = 0;
   6618 }
   6619 
   6620 static void
   6621 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6622     struct wm_txqueue *txq)
   6623 {
   6624 
   6625 	KASSERT(mutex_owned(txq->txq_lock));
   6626 
   6627 	/*
   6628 	 * Set up some register offsets that are different between
   6629 	 * the i82542 and the i82543 and later chips.
   6630 	 */
   6631 	if (sc->sc_type < WM_T_82543)
   6632 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6633 	else
   6634 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6635 
   6636 	wm_init_tx_descs(sc, txq);
   6637 	wm_init_tx_regs(sc, wmq, txq);
   6638 	wm_init_tx_buffer(sc, txq);
   6639 }
   6640 
   6641 static void
   6642 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6643     struct wm_rxqueue *rxq)
   6644 {
   6645 
   6646 	KASSERT(mutex_owned(rxq->rxq_lock));
   6647 
   6648 	/*
   6649 	 * Initialize the receive descriptor and receive job
   6650 	 * descriptor rings.
   6651 	 */
   6652 	if (sc->sc_type < WM_T_82543) {
   6653 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6654 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6655 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6656 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6657 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6658 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6659 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6660 
   6661 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6662 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6663 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6664 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6665 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6666 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6667 	} else {
   6668 		int qid = wmq->wmq_id;
   6669 
   6670 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6671 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6672 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6673 
   6674 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6675 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6676 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6677 
   6678 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6679 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6680 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6681 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6682 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6683 			    | RXDCTL_WTHRESH(1));
   6684 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6685 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6686 		} else {
   6687 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6688 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6689 			/* XXX should update with AIM? */
   6690 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6691 			/* MUST be same */
   6692 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6693 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6694 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6695 		}
   6696 	}
   6697 }
   6698 
   6699 static int
   6700 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6701 {
   6702 	struct wm_rxsoft *rxs;
   6703 	int error, i;
   6704 
   6705 	KASSERT(mutex_owned(rxq->rxq_lock));
   6706 
   6707 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6708 		rxs = &rxq->rxq_soft[i];
   6709 		if (rxs->rxs_mbuf == NULL) {
   6710 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6711 				log(LOG_ERR, "%s: unable to allocate or map "
   6712 				    "rx buffer %d, error = %d\n",
   6713 				    device_xname(sc->sc_dev), i, error);
   6714 				/*
   6715 				 * XXX Should attempt to run with fewer receive
   6716 				 * XXX buffers instead of just failing.
   6717 				 */
   6718 				wm_rxdrain(rxq);
   6719 				return ENOMEM;
   6720 			}
   6721 		} else {
   6722 			/*
   6723 			 * For 82575 and 82576, the RX descriptors must be
   6724 			 * initialized after the setting of RCTL.EN in
   6725 			 * wm_set_filter()
   6726 			 */
   6727 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6728 				wm_init_rxdesc(rxq, i);
   6729 		}
   6730 	}
   6731 	rxq->rxq_ptr = 0;
   6732 	rxq->rxq_discard = 0;
   6733 	WM_RXCHAIN_RESET(rxq);
   6734 
   6735 	return 0;
   6736 }
   6737 
   6738 static int
   6739 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6740     struct wm_rxqueue *rxq)
   6741 {
   6742 
   6743 	KASSERT(mutex_owned(rxq->rxq_lock));
   6744 
   6745 	/*
   6746 	 * Set up some register offsets that are different between
   6747 	 * the i82542 and the i82543 and later chips.
   6748 	 */
   6749 	if (sc->sc_type < WM_T_82543)
   6750 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6751 	else
   6752 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6753 
   6754 	wm_init_rx_regs(sc, wmq, rxq);
   6755 	return wm_init_rx_buffer(sc, rxq);
   6756 }
   6757 
   6758 /*
   6759  * wm_init_quques:
   6760  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6761  */
   6762 static int
   6763 wm_init_txrx_queues(struct wm_softc *sc)
   6764 {
   6765 	int i, error = 0;
   6766 
   6767 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6768 		device_xname(sc->sc_dev), __func__));
   6769 
   6770 	for (i = 0; i < sc->sc_nqueues; i++) {
   6771 		struct wm_queue *wmq = &sc->sc_queue[i];
   6772 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6773 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6774 
   6775 		/*
   6776 		 * TODO
   6777 		 * Currently, use constant variable instead of AIM.
   6778 		 * Furthermore, the interrupt interval of multiqueue which use
   6779 		 * polling mode is less than default value.
   6780 		 * More tuning and AIM are required.
   6781 		 */
   6782 		if (wm_is_using_multiqueue(sc))
   6783 			wmq->wmq_itr = 50;
   6784 		else
   6785 			wmq->wmq_itr = sc->sc_itr_init;
   6786 		wmq->wmq_set_itr = true;
   6787 
   6788 		mutex_enter(txq->txq_lock);
   6789 		wm_init_tx_queue(sc, wmq, txq);
   6790 		mutex_exit(txq->txq_lock);
   6791 
   6792 		mutex_enter(rxq->rxq_lock);
   6793 		error = wm_init_rx_queue(sc, wmq, rxq);
   6794 		mutex_exit(rxq->rxq_lock);
   6795 		if (error)
   6796 			break;
   6797 	}
   6798 
   6799 	return error;
   6800 }
   6801 
   6802 /*
   6803  * wm_tx_offload:
   6804  *
   6805  *	Set up TCP/IP checksumming parameters for the
   6806  *	specified packet.
   6807  */
   6808 static int
   6809 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6810     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6811 {
   6812 	struct mbuf *m0 = txs->txs_mbuf;
   6813 	struct livengood_tcpip_ctxdesc *t;
   6814 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6815 	uint32_t ipcse;
   6816 	struct ether_header *eh;
   6817 	int offset, iphl;
   6818 	uint8_t fields;
   6819 
   6820 	/*
   6821 	 * XXX It would be nice if the mbuf pkthdr had offset
   6822 	 * fields for the protocol headers.
   6823 	 */
   6824 
   6825 	eh = mtod(m0, struct ether_header *);
   6826 	switch (htons(eh->ether_type)) {
   6827 	case ETHERTYPE_IP:
   6828 	case ETHERTYPE_IPV6:
   6829 		offset = ETHER_HDR_LEN;
   6830 		break;
   6831 
   6832 	case ETHERTYPE_VLAN:
   6833 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6834 		break;
   6835 
   6836 	default:
   6837 		/*
   6838 		 * Don't support this protocol or encapsulation.
   6839 		 */
   6840 		*fieldsp = 0;
   6841 		*cmdp = 0;
   6842 		return 0;
   6843 	}
   6844 
   6845 	if ((m0->m_pkthdr.csum_flags &
   6846 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6847 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6848 	} else {
   6849 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6850 	}
   6851 	ipcse = offset + iphl - 1;
   6852 
   6853 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6854 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6855 	seg = 0;
   6856 	fields = 0;
   6857 
   6858 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6859 		int hlen = offset + iphl;
   6860 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6861 
   6862 		if (__predict_false(m0->m_len <
   6863 				    (hlen + sizeof(struct tcphdr)))) {
   6864 			/*
   6865 			 * TCP/IP headers are not in the first mbuf; we need
   6866 			 * to do this the slow and painful way.  Let's just
   6867 			 * hope this doesn't happen very often.
   6868 			 */
   6869 			struct tcphdr th;
   6870 
   6871 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6872 
   6873 			m_copydata(m0, hlen, sizeof(th), &th);
   6874 			if (v4) {
   6875 				struct ip ip;
   6876 
   6877 				m_copydata(m0, offset, sizeof(ip), &ip);
   6878 				ip.ip_len = 0;
   6879 				m_copyback(m0,
   6880 				    offset + offsetof(struct ip, ip_len),
   6881 				    sizeof(ip.ip_len), &ip.ip_len);
   6882 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6883 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6884 			} else {
   6885 				struct ip6_hdr ip6;
   6886 
   6887 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6888 				ip6.ip6_plen = 0;
   6889 				m_copyback(m0,
   6890 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6891 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6892 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6893 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6894 			}
   6895 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6896 			    sizeof(th.th_sum), &th.th_sum);
   6897 
   6898 			hlen += th.th_off << 2;
   6899 		} else {
   6900 			/*
   6901 			 * TCP/IP headers are in the first mbuf; we can do
   6902 			 * this the easy way.
   6903 			 */
   6904 			struct tcphdr *th;
   6905 
   6906 			if (v4) {
   6907 				struct ip *ip =
   6908 				    (void *)(mtod(m0, char *) + offset);
   6909 				th = (void *)(mtod(m0, char *) + hlen);
   6910 
   6911 				ip->ip_len = 0;
   6912 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6913 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6914 			} else {
   6915 				struct ip6_hdr *ip6 =
   6916 				    (void *)(mtod(m0, char *) + offset);
   6917 				th = (void *)(mtod(m0, char *) + hlen);
   6918 
   6919 				ip6->ip6_plen = 0;
   6920 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6921 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6922 			}
   6923 			hlen += th->th_off << 2;
   6924 		}
   6925 
   6926 		if (v4) {
   6927 			WM_Q_EVCNT_INCR(txq, txtso);
   6928 			cmdlen |= WTX_TCPIP_CMD_IP;
   6929 		} else {
   6930 			WM_Q_EVCNT_INCR(txq, txtso6);
   6931 			ipcse = 0;
   6932 		}
   6933 		cmd |= WTX_TCPIP_CMD_TSE;
   6934 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6935 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6936 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6937 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6938 	}
   6939 
   6940 	/*
   6941 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6942 	 * offload feature, if we load the context descriptor, we
   6943 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6944 	 */
   6945 
   6946 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6947 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6948 	    WTX_TCPIP_IPCSE(ipcse);
   6949 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6950 		WM_Q_EVCNT_INCR(txq, txipsum);
   6951 		fields |= WTX_IXSM;
   6952 	}
   6953 
   6954 	offset += iphl;
   6955 
   6956 	if (m0->m_pkthdr.csum_flags &
   6957 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6958 		WM_Q_EVCNT_INCR(txq, txtusum);
   6959 		fields |= WTX_TXSM;
   6960 		tucs = WTX_TCPIP_TUCSS(offset) |
   6961 		    WTX_TCPIP_TUCSO(offset +
   6962 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6963 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6964 	} else if ((m0->m_pkthdr.csum_flags &
   6965 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6966 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6967 		fields |= WTX_TXSM;
   6968 		tucs = WTX_TCPIP_TUCSS(offset) |
   6969 		    WTX_TCPIP_TUCSO(offset +
   6970 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6971 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6972 	} else {
   6973 		/* Just initialize it to a valid TCP context. */
   6974 		tucs = WTX_TCPIP_TUCSS(offset) |
   6975 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6976 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6977 	}
   6978 
   6979 	/*
   6980 	 * We don't have to write context descriptor for every packet
   6981 	 * except for 82574. For 82574, we must write context descriptor
   6982 	 * for every packet when we use two descriptor queues.
   6983 	 * It would be overhead to write context descriptor for every packet,
   6984 	 * however it does not cause problems.
   6985 	 */
   6986 	/* Fill in the context descriptor. */
   6987 	t = (struct livengood_tcpip_ctxdesc *)
   6988 	    &txq->txq_descs[txq->txq_next];
   6989 	t->tcpip_ipcs = htole32(ipcs);
   6990 	t->tcpip_tucs = htole32(tucs);
   6991 	t->tcpip_cmdlen = htole32(cmdlen);
   6992 	t->tcpip_seg = htole32(seg);
   6993 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6994 
   6995 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6996 	txs->txs_ndesc++;
   6997 
   6998 	*cmdp = cmd;
   6999 	*fieldsp = fields;
   7000 
   7001 	return 0;
   7002 }
   7003 
   7004 static inline int
   7005 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7006 {
   7007 	struct wm_softc *sc = ifp->if_softc;
   7008 	u_int cpuid = cpu_index(curcpu());
   7009 
   7010 	/*
   7011 	 * Currently, simple distribute strategy.
   7012 	 * TODO:
   7013 	 * distribute by flowid(RSS has value).
   7014 	 */
   7015         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   7016 }
   7017 
   7018 /*
   7019  * wm_start:		[ifnet interface function]
   7020  *
   7021  *	Start packet transmission on the interface.
   7022  */
   7023 static void
   7024 wm_start(struct ifnet *ifp)
   7025 {
   7026 	struct wm_softc *sc = ifp->if_softc;
   7027 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7028 
   7029 #ifdef WM_MPSAFE
   7030 	KASSERT(if_is_mpsafe(ifp));
   7031 #endif
   7032 	/*
   7033 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7034 	 */
   7035 
   7036 	mutex_enter(txq->txq_lock);
   7037 	if (!txq->txq_stopping)
   7038 		wm_start_locked(ifp);
   7039 	mutex_exit(txq->txq_lock);
   7040 }
   7041 
   7042 static void
   7043 wm_start_locked(struct ifnet *ifp)
   7044 {
   7045 	struct wm_softc *sc = ifp->if_softc;
   7046 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7047 
   7048 	wm_send_common_locked(ifp, txq, false);
   7049 }
   7050 
   7051 static int
   7052 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7053 {
   7054 	int qid;
   7055 	struct wm_softc *sc = ifp->if_softc;
   7056 	struct wm_txqueue *txq;
   7057 
   7058 	qid = wm_select_txqueue(ifp, m);
   7059 	txq = &sc->sc_queue[qid].wmq_txq;
   7060 
   7061 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7062 		m_freem(m);
   7063 		WM_Q_EVCNT_INCR(txq, txdrop);
   7064 		return ENOBUFS;
   7065 	}
   7066 
   7067 	/*
   7068 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7069 	 */
   7070 	ifp->if_obytes += m->m_pkthdr.len;
   7071 	if (m->m_flags & M_MCAST)
   7072 		ifp->if_omcasts++;
   7073 
   7074 	if (mutex_tryenter(txq->txq_lock)) {
   7075 		if (!txq->txq_stopping)
   7076 			wm_transmit_locked(ifp, txq);
   7077 		mutex_exit(txq->txq_lock);
   7078 	}
   7079 
   7080 	return 0;
   7081 }
   7082 
   7083 static void
   7084 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7085 {
   7086 
   7087 	wm_send_common_locked(ifp, txq, true);
   7088 }
   7089 
   7090 static void
   7091 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7092     bool is_transmit)
   7093 {
   7094 	struct wm_softc *sc = ifp->if_softc;
   7095 	struct mbuf *m0;
   7096 	struct wm_txsoft *txs;
   7097 	bus_dmamap_t dmamap;
   7098 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7099 	bus_addr_t curaddr;
   7100 	bus_size_t seglen, curlen;
   7101 	uint32_t cksumcmd;
   7102 	uint8_t cksumfields;
   7103 
   7104 	KASSERT(mutex_owned(txq->txq_lock));
   7105 
   7106 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7107 		return;
   7108 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7109 		return;
   7110 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7111 		return;
   7112 
   7113 	/* Remember the previous number of free descriptors. */
   7114 	ofree = txq->txq_free;
   7115 
   7116 	/*
   7117 	 * Loop through the send queue, setting up transmit descriptors
   7118 	 * until we drain the queue, or use up all available transmit
   7119 	 * descriptors.
   7120 	 */
   7121 	for (;;) {
   7122 		m0 = NULL;
   7123 
   7124 		/* Get a work queue entry. */
   7125 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7126 			wm_txeof(sc, txq);
   7127 			if (txq->txq_sfree == 0) {
   7128 				DPRINTF(WM_DEBUG_TX,
   7129 				    ("%s: TX: no free job descriptors\n",
   7130 					device_xname(sc->sc_dev)));
   7131 				WM_Q_EVCNT_INCR(txq, txsstall);
   7132 				break;
   7133 			}
   7134 		}
   7135 
   7136 		/* Grab a packet off the queue. */
   7137 		if (is_transmit)
   7138 			m0 = pcq_get(txq->txq_interq);
   7139 		else
   7140 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7141 		if (m0 == NULL)
   7142 			break;
   7143 
   7144 		DPRINTF(WM_DEBUG_TX,
   7145 		    ("%s: TX: have packet to transmit: %p\n",
   7146 		    device_xname(sc->sc_dev), m0));
   7147 
   7148 		txs = &txq->txq_soft[txq->txq_snext];
   7149 		dmamap = txs->txs_dmamap;
   7150 
   7151 		use_tso = (m0->m_pkthdr.csum_flags &
   7152 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7153 
   7154 		/*
   7155 		 * So says the Linux driver:
   7156 		 * The controller does a simple calculation to make sure
   7157 		 * there is enough room in the FIFO before initiating the
   7158 		 * DMA for each buffer.  The calc is:
   7159 		 *	4 = ceil(buffer len / MSS)
   7160 		 * To make sure we don't overrun the FIFO, adjust the max
   7161 		 * buffer len if the MSS drops.
   7162 		 */
   7163 		dmamap->dm_maxsegsz =
   7164 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7165 		    ? m0->m_pkthdr.segsz << 2
   7166 		    : WTX_MAX_LEN;
   7167 
   7168 		/*
   7169 		 * Load the DMA map.  If this fails, the packet either
   7170 		 * didn't fit in the allotted number of segments, or we
   7171 		 * were short on resources.  For the too-many-segments
   7172 		 * case, we simply report an error and drop the packet,
   7173 		 * since we can't sanely copy a jumbo packet to a single
   7174 		 * buffer.
   7175 		 */
   7176 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7177 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7178 		if (error) {
   7179 			if (error == EFBIG) {
   7180 				WM_Q_EVCNT_INCR(txq, txdrop);
   7181 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7182 				    "DMA segments, dropping...\n",
   7183 				    device_xname(sc->sc_dev));
   7184 				wm_dump_mbuf_chain(sc, m0);
   7185 				m_freem(m0);
   7186 				continue;
   7187 			}
   7188 			/*  Short on resources, just stop for now. */
   7189 			DPRINTF(WM_DEBUG_TX,
   7190 			    ("%s: TX: dmamap load failed: %d\n",
   7191 			    device_xname(sc->sc_dev), error));
   7192 			break;
   7193 		}
   7194 
   7195 		segs_needed = dmamap->dm_nsegs;
   7196 		if (use_tso) {
   7197 			/* For sentinel descriptor; see below. */
   7198 			segs_needed++;
   7199 		}
   7200 
   7201 		/*
   7202 		 * Ensure we have enough descriptors free to describe
   7203 		 * the packet.  Note, we always reserve one descriptor
   7204 		 * at the end of the ring due to the semantics of the
   7205 		 * TDT register, plus one more in the event we need
   7206 		 * to load offload context.
   7207 		 */
   7208 		if (segs_needed > txq->txq_free - 2) {
   7209 			/*
   7210 			 * Not enough free descriptors to transmit this
   7211 			 * packet.  We haven't committed anything yet,
   7212 			 * so just unload the DMA map, put the packet
   7213 			 * pack on the queue, and punt.  Notify the upper
   7214 			 * layer that there are no more slots left.
   7215 			 */
   7216 			DPRINTF(WM_DEBUG_TX,
   7217 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7218 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7219 			    segs_needed, txq->txq_free - 1));
   7220 			if (!is_transmit)
   7221 				ifp->if_flags |= IFF_OACTIVE;
   7222 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7223 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7224 			WM_Q_EVCNT_INCR(txq, txdstall);
   7225 			break;
   7226 		}
   7227 
   7228 		/*
   7229 		 * Check for 82547 Tx FIFO bug.  We need to do this
   7230 		 * once we know we can transmit the packet, since we
   7231 		 * do some internal FIFO space accounting here.
   7232 		 */
   7233 		if (sc->sc_type == WM_T_82547 &&
   7234 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7235 			DPRINTF(WM_DEBUG_TX,
   7236 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7237 			    device_xname(sc->sc_dev)));
   7238 			if (!is_transmit)
   7239 				ifp->if_flags |= IFF_OACTIVE;
   7240 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7241 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7242 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   7243 			break;
   7244 		}
   7245 
   7246 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7247 
   7248 		DPRINTF(WM_DEBUG_TX,
   7249 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7250 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7251 
   7252 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7253 
   7254 		/*
   7255 		 * Store a pointer to the packet so that we can free it
   7256 		 * later.
   7257 		 *
   7258 		 * Initially, we consider the number of descriptors the
   7259 		 * packet uses the number of DMA segments.  This may be
   7260 		 * incremented by 1 if we do checksum offload (a descriptor
   7261 		 * is used to set the checksum context).
   7262 		 */
   7263 		txs->txs_mbuf = m0;
   7264 		txs->txs_firstdesc = txq->txq_next;
   7265 		txs->txs_ndesc = segs_needed;
   7266 
   7267 		/* Set up offload parameters for this packet. */
   7268 		if (m0->m_pkthdr.csum_flags &
   7269 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7270 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7271 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7272 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7273 					  &cksumfields) != 0) {
   7274 				/* Error message already displayed. */
   7275 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7276 				continue;
   7277 			}
   7278 		} else {
   7279 			cksumcmd = 0;
   7280 			cksumfields = 0;
   7281 		}
   7282 
   7283 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7284 
   7285 		/* Sync the DMA map. */
   7286 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7287 		    BUS_DMASYNC_PREWRITE);
   7288 
   7289 		/* Initialize the transmit descriptor. */
   7290 		for (nexttx = txq->txq_next, seg = 0;
   7291 		     seg < dmamap->dm_nsegs; seg++) {
   7292 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7293 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7294 			     seglen != 0;
   7295 			     curaddr += curlen, seglen -= curlen,
   7296 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7297 				curlen = seglen;
   7298 
   7299 				/*
   7300 				 * So says the Linux driver:
   7301 				 * Work around for premature descriptor
   7302 				 * write-backs in TSO mode.  Append a
   7303 				 * 4-byte sentinel descriptor.
   7304 				 */
   7305 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7306 				    curlen > 8)
   7307 					curlen -= 4;
   7308 
   7309 				wm_set_dma_addr(
   7310 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7311 				txq->txq_descs[nexttx].wtx_cmdlen
   7312 				    = htole32(cksumcmd | curlen);
   7313 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7314 				    = 0;
   7315 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7316 				    = cksumfields;
   7317 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7318 				lasttx = nexttx;
   7319 
   7320 				DPRINTF(WM_DEBUG_TX,
   7321 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7322 				     "len %#04zx\n",
   7323 				    device_xname(sc->sc_dev), nexttx,
   7324 				    (uint64_t)curaddr, curlen));
   7325 			}
   7326 		}
   7327 
   7328 		KASSERT(lasttx != -1);
   7329 
   7330 		/*
   7331 		 * Set up the command byte on the last descriptor of
   7332 		 * the packet.  If we're in the interrupt delay window,
   7333 		 * delay the interrupt.
   7334 		 */
   7335 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7336 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7337 
   7338 		/*
   7339 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7340 		 * up the descriptor to encapsulate the packet for us.
   7341 		 *
   7342 		 * This is only valid on the last descriptor of the packet.
   7343 		 */
   7344 		if (vlan_has_tag(m0)) {
   7345 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7346 			    htole32(WTX_CMD_VLE);
   7347 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7348 			    = htole16(vlan_get_tag(m0));
   7349 		}
   7350 
   7351 		txs->txs_lastdesc = lasttx;
   7352 
   7353 		DPRINTF(WM_DEBUG_TX,
   7354 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7355 		    device_xname(sc->sc_dev),
   7356 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7357 
   7358 		/* Sync the descriptors we're using. */
   7359 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7360 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7361 
   7362 		/* Give the packet to the chip. */
   7363 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7364 
   7365 		DPRINTF(WM_DEBUG_TX,
   7366 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7367 
   7368 		DPRINTF(WM_DEBUG_TX,
   7369 		    ("%s: TX: finished transmitting packet, job %d\n",
   7370 		    device_xname(sc->sc_dev), txq->txq_snext));
   7371 
   7372 		/* Advance the tx pointer. */
   7373 		txq->txq_free -= txs->txs_ndesc;
   7374 		txq->txq_next = nexttx;
   7375 
   7376 		txq->txq_sfree--;
   7377 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7378 
   7379 		/* Pass the packet to any BPF listeners. */
   7380 		bpf_mtap(ifp, m0);
   7381 	}
   7382 
   7383 	if (m0 != NULL) {
   7384 		if (!is_transmit)
   7385 			ifp->if_flags |= IFF_OACTIVE;
   7386 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7387 		WM_Q_EVCNT_INCR(txq, txdrop);
   7388 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7389 			__func__));
   7390 		m_freem(m0);
   7391 	}
   7392 
   7393 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7394 		/* No more slots; notify upper layer. */
   7395 		if (!is_transmit)
   7396 			ifp->if_flags |= IFF_OACTIVE;
   7397 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7398 	}
   7399 
   7400 	if (txq->txq_free != ofree) {
   7401 		/* Set a watchdog timer in case the chip flakes out. */
   7402 		ifp->if_timer = 5;
   7403 	}
   7404 }
   7405 
   7406 /*
   7407  * wm_nq_tx_offload:
   7408  *
   7409  *	Set up TCP/IP checksumming parameters for the
   7410  *	specified packet, for NEWQUEUE devices
   7411  */
   7412 static int
   7413 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7414     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7415 {
   7416 	struct mbuf *m0 = txs->txs_mbuf;
   7417 	uint32_t vl_len, mssidx, cmdc;
   7418 	struct ether_header *eh;
   7419 	int offset, iphl;
   7420 
   7421 	/*
   7422 	 * XXX It would be nice if the mbuf pkthdr had offset
   7423 	 * fields for the protocol headers.
   7424 	 */
   7425 	*cmdlenp = 0;
   7426 	*fieldsp = 0;
   7427 
   7428 	eh = mtod(m0, struct ether_header *);
   7429 	switch (htons(eh->ether_type)) {
   7430 	case ETHERTYPE_IP:
   7431 	case ETHERTYPE_IPV6:
   7432 		offset = ETHER_HDR_LEN;
   7433 		break;
   7434 
   7435 	case ETHERTYPE_VLAN:
   7436 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7437 		break;
   7438 
   7439 	default:
   7440 		/* Don't support this protocol or encapsulation. */
   7441 		*do_csum = false;
   7442 		return 0;
   7443 	}
   7444 	*do_csum = true;
   7445 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7446 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7447 
   7448 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7449 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7450 
   7451 	if ((m0->m_pkthdr.csum_flags &
   7452 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7453 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7454 	} else {
   7455 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7456 	}
   7457 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7458 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7459 
   7460 	if (vlan_has_tag(m0)) {
   7461 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7462 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7463 		*cmdlenp |= NQTX_CMD_VLE;
   7464 	}
   7465 
   7466 	mssidx = 0;
   7467 
   7468 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7469 		int hlen = offset + iphl;
   7470 		int tcp_hlen;
   7471 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7472 
   7473 		if (__predict_false(m0->m_len <
   7474 				    (hlen + sizeof(struct tcphdr)))) {
   7475 			/*
   7476 			 * TCP/IP headers are not in the first mbuf; we need
   7477 			 * to do this the slow and painful way.  Let's just
   7478 			 * hope this doesn't happen very often.
   7479 			 */
   7480 			struct tcphdr th;
   7481 
   7482 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7483 
   7484 			m_copydata(m0, hlen, sizeof(th), &th);
   7485 			if (v4) {
   7486 				struct ip ip;
   7487 
   7488 				m_copydata(m0, offset, sizeof(ip), &ip);
   7489 				ip.ip_len = 0;
   7490 				m_copyback(m0,
   7491 				    offset + offsetof(struct ip, ip_len),
   7492 				    sizeof(ip.ip_len), &ip.ip_len);
   7493 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7494 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7495 			} else {
   7496 				struct ip6_hdr ip6;
   7497 
   7498 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7499 				ip6.ip6_plen = 0;
   7500 				m_copyback(m0,
   7501 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7502 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7503 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7504 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7505 			}
   7506 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7507 			    sizeof(th.th_sum), &th.th_sum);
   7508 
   7509 			tcp_hlen = th.th_off << 2;
   7510 		} else {
   7511 			/*
   7512 			 * TCP/IP headers are in the first mbuf; we can do
   7513 			 * this the easy way.
   7514 			 */
   7515 			struct tcphdr *th;
   7516 
   7517 			if (v4) {
   7518 				struct ip *ip =
   7519 				    (void *)(mtod(m0, char *) + offset);
   7520 				th = (void *)(mtod(m0, char *) + hlen);
   7521 
   7522 				ip->ip_len = 0;
   7523 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7524 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7525 			} else {
   7526 				struct ip6_hdr *ip6 =
   7527 				    (void *)(mtod(m0, char *) + offset);
   7528 				th = (void *)(mtod(m0, char *) + hlen);
   7529 
   7530 				ip6->ip6_plen = 0;
   7531 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7532 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7533 			}
   7534 			tcp_hlen = th->th_off << 2;
   7535 		}
   7536 		hlen += tcp_hlen;
   7537 		*cmdlenp |= NQTX_CMD_TSE;
   7538 
   7539 		if (v4) {
   7540 			WM_Q_EVCNT_INCR(txq, txtso);
   7541 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7542 		} else {
   7543 			WM_Q_EVCNT_INCR(txq, txtso6);
   7544 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7545 		}
   7546 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7547 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7548 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7549 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7550 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7551 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7552 	} else {
   7553 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7554 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7555 	}
   7556 
   7557 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7558 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7559 		cmdc |= NQTXC_CMD_IP4;
   7560 	}
   7561 
   7562 	if (m0->m_pkthdr.csum_flags &
   7563 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7564 		WM_Q_EVCNT_INCR(txq, txtusum);
   7565 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7566 			cmdc |= NQTXC_CMD_TCP;
   7567 		} else {
   7568 			cmdc |= NQTXC_CMD_UDP;
   7569 		}
   7570 		cmdc |= NQTXC_CMD_IP4;
   7571 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7572 	}
   7573 	if (m0->m_pkthdr.csum_flags &
   7574 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7575 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7576 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7577 			cmdc |= NQTXC_CMD_TCP;
   7578 		} else {
   7579 			cmdc |= NQTXC_CMD_UDP;
   7580 		}
   7581 		cmdc |= NQTXC_CMD_IP6;
   7582 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7583 	}
   7584 
   7585 	/*
   7586 	 * We don't have to write context descriptor for every packet to
   7587 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7588 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7589 	 * controllers.
   7590 	 * It would be overhead to write context descriptor for every packet,
   7591 	 * however it does not cause problems.
   7592 	 */
   7593 	/* Fill in the context descriptor. */
   7594 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7595 	    htole32(vl_len);
   7596 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7597 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7598 	    htole32(cmdc);
   7599 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7600 	    htole32(mssidx);
   7601 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7602 	DPRINTF(WM_DEBUG_TX,
   7603 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7604 	    txq->txq_next, 0, vl_len));
   7605 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7606 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7607 	txs->txs_ndesc++;
   7608 	return 0;
   7609 }
   7610 
   7611 /*
   7612  * wm_nq_start:		[ifnet interface function]
   7613  *
   7614  *	Start packet transmission on the interface for NEWQUEUE devices
   7615  */
   7616 static void
   7617 wm_nq_start(struct ifnet *ifp)
   7618 {
   7619 	struct wm_softc *sc = ifp->if_softc;
   7620 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7621 
   7622 #ifdef WM_MPSAFE
   7623 	KASSERT(if_is_mpsafe(ifp));
   7624 #endif
   7625 	/*
   7626 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7627 	 */
   7628 
   7629 	mutex_enter(txq->txq_lock);
   7630 	if (!txq->txq_stopping)
   7631 		wm_nq_start_locked(ifp);
   7632 	mutex_exit(txq->txq_lock);
   7633 }
   7634 
   7635 static void
   7636 wm_nq_start_locked(struct ifnet *ifp)
   7637 {
   7638 	struct wm_softc *sc = ifp->if_softc;
   7639 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7640 
   7641 	wm_nq_send_common_locked(ifp, txq, false);
   7642 }
   7643 
   7644 static int
   7645 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7646 {
   7647 	int qid;
   7648 	struct wm_softc *sc = ifp->if_softc;
   7649 	struct wm_txqueue *txq;
   7650 
   7651 	qid = wm_select_txqueue(ifp, m);
   7652 	txq = &sc->sc_queue[qid].wmq_txq;
   7653 
   7654 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7655 		m_freem(m);
   7656 		WM_Q_EVCNT_INCR(txq, txdrop);
   7657 		return ENOBUFS;
   7658 	}
   7659 
   7660 	/*
   7661 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7662 	 */
   7663 	ifp->if_obytes += m->m_pkthdr.len;
   7664 	if (m->m_flags & M_MCAST)
   7665 		ifp->if_omcasts++;
   7666 
   7667 	/*
   7668 	 * The situations which this mutex_tryenter() fails at running time
   7669 	 * are below two patterns.
   7670 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7671 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7672 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7673 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7674 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7675 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7676 	 */
   7677 	if (mutex_tryenter(txq->txq_lock)) {
   7678 		if (!txq->txq_stopping)
   7679 			wm_nq_transmit_locked(ifp, txq);
   7680 		mutex_exit(txq->txq_lock);
   7681 	}
   7682 
   7683 	return 0;
   7684 }
   7685 
   7686 static void
   7687 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7688 {
   7689 
   7690 	wm_nq_send_common_locked(ifp, txq, true);
   7691 }
   7692 
   7693 static void
   7694 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7695     bool is_transmit)
   7696 {
   7697 	struct wm_softc *sc = ifp->if_softc;
   7698 	struct mbuf *m0;
   7699 	struct wm_txsoft *txs;
   7700 	bus_dmamap_t dmamap;
   7701 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7702 	bool do_csum, sent;
   7703 
   7704 	KASSERT(mutex_owned(txq->txq_lock));
   7705 
   7706 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7707 		return;
   7708 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7709 		return;
   7710 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7711 		return;
   7712 
   7713 	sent = false;
   7714 
   7715 	/*
   7716 	 * Loop through the send queue, setting up transmit descriptors
   7717 	 * until we drain the queue, or use up all available transmit
   7718 	 * descriptors.
   7719 	 */
   7720 	for (;;) {
   7721 		m0 = NULL;
   7722 
   7723 		/* Get a work queue entry. */
   7724 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7725 			wm_txeof(sc, txq);
   7726 			if (txq->txq_sfree == 0) {
   7727 				DPRINTF(WM_DEBUG_TX,
   7728 				    ("%s: TX: no free job descriptors\n",
   7729 					device_xname(sc->sc_dev)));
   7730 				WM_Q_EVCNT_INCR(txq, txsstall);
   7731 				break;
   7732 			}
   7733 		}
   7734 
   7735 		/* Grab a packet off the queue. */
   7736 		if (is_transmit)
   7737 			m0 = pcq_get(txq->txq_interq);
   7738 		else
   7739 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7740 		if (m0 == NULL)
   7741 			break;
   7742 
   7743 		DPRINTF(WM_DEBUG_TX,
   7744 		    ("%s: TX: have packet to transmit: %p\n",
   7745 		    device_xname(sc->sc_dev), m0));
   7746 
   7747 		txs = &txq->txq_soft[txq->txq_snext];
   7748 		dmamap = txs->txs_dmamap;
   7749 
   7750 		/*
   7751 		 * Load the DMA map.  If this fails, the packet either
   7752 		 * didn't fit in the allotted number of segments, or we
   7753 		 * were short on resources.  For the too-many-segments
   7754 		 * case, we simply report an error and drop the packet,
   7755 		 * since we can't sanely copy a jumbo packet to a single
   7756 		 * buffer.
   7757 		 */
   7758 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7759 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7760 		if (error) {
   7761 			if (error == EFBIG) {
   7762 				WM_Q_EVCNT_INCR(txq, txdrop);
   7763 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7764 				    "DMA segments, dropping...\n",
   7765 				    device_xname(sc->sc_dev));
   7766 				wm_dump_mbuf_chain(sc, m0);
   7767 				m_freem(m0);
   7768 				continue;
   7769 			}
   7770 			/* Short on resources, just stop for now. */
   7771 			DPRINTF(WM_DEBUG_TX,
   7772 			    ("%s: TX: dmamap load failed: %d\n",
   7773 			    device_xname(sc->sc_dev), error));
   7774 			break;
   7775 		}
   7776 
   7777 		segs_needed = dmamap->dm_nsegs;
   7778 
   7779 		/*
   7780 		 * Ensure we have enough descriptors free to describe
   7781 		 * the packet.  Note, we always reserve one descriptor
   7782 		 * at the end of the ring due to the semantics of the
   7783 		 * TDT register, plus one more in the event we need
   7784 		 * to load offload context.
   7785 		 */
   7786 		if (segs_needed > txq->txq_free - 2) {
   7787 			/*
   7788 			 * Not enough free descriptors to transmit this
   7789 			 * packet.  We haven't committed anything yet,
   7790 			 * so just unload the DMA map, put the packet
   7791 			 * pack on the queue, and punt.  Notify the upper
   7792 			 * layer that there are no more slots left.
   7793 			 */
   7794 			DPRINTF(WM_DEBUG_TX,
   7795 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7796 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7797 			    segs_needed, txq->txq_free - 1));
   7798 			if (!is_transmit)
   7799 				ifp->if_flags |= IFF_OACTIVE;
   7800 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7801 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7802 			WM_Q_EVCNT_INCR(txq, txdstall);
   7803 			break;
   7804 		}
   7805 
   7806 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7807 
   7808 		DPRINTF(WM_DEBUG_TX,
   7809 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7810 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7811 
   7812 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7813 
   7814 		/*
   7815 		 * Store a pointer to the packet so that we can free it
   7816 		 * later.
   7817 		 *
   7818 		 * Initially, we consider the number of descriptors the
   7819 		 * packet uses the number of DMA segments.  This may be
   7820 		 * incremented by 1 if we do checksum offload (a descriptor
   7821 		 * is used to set the checksum context).
   7822 		 */
   7823 		txs->txs_mbuf = m0;
   7824 		txs->txs_firstdesc = txq->txq_next;
   7825 		txs->txs_ndesc = segs_needed;
   7826 
   7827 		/* Set up offload parameters for this packet. */
   7828 		uint32_t cmdlen, fields, dcmdlen;
   7829 		if (m0->m_pkthdr.csum_flags &
   7830 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7831 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7832 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7833 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7834 			    &do_csum) != 0) {
   7835 				/* Error message already displayed. */
   7836 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7837 				continue;
   7838 			}
   7839 		} else {
   7840 			do_csum = false;
   7841 			cmdlen = 0;
   7842 			fields = 0;
   7843 		}
   7844 
   7845 		/* Sync the DMA map. */
   7846 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7847 		    BUS_DMASYNC_PREWRITE);
   7848 
   7849 		/* Initialize the first transmit descriptor. */
   7850 		nexttx = txq->txq_next;
   7851 		if (!do_csum) {
   7852 			/* setup a legacy descriptor */
   7853 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7854 			    dmamap->dm_segs[0].ds_addr);
   7855 			txq->txq_descs[nexttx].wtx_cmdlen =
   7856 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7857 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7858 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7859 			if (vlan_has_tag(m0)) {
   7860 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7861 				    htole32(WTX_CMD_VLE);
   7862 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7863 				    htole16(vlan_get_tag(m0));
   7864 			} else {
   7865 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7866 			}
   7867 			dcmdlen = 0;
   7868 		} else {
   7869 			/* setup an advanced data descriptor */
   7870 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7871 			    htole64(dmamap->dm_segs[0].ds_addr);
   7872 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7873 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7874 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7875 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7876 			    htole32(fields);
   7877 			DPRINTF(WM_DEBUG_TX,
   7878 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7879 			    device_xname(sc->sc_dev), nexttx,
   7880 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7881 			DPRINTF(WM_DEBUG_TX,
   7882 			    ("\t 0x%08x%08x\n", fields,
   7883 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7884 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7885 		}
   7886 
   7887 		lasttx = nexttx;
   7888 		nexttx = WM_NEXTTX(txq, nexttx);
   7889 		/*
   7890 		 * fill in the next descriptors. legacy or advanced format
   7891 		 * is the same here
   7892 		 */
   7893 		for (seg = 1; seg < dmamap->dm_nsegs;
   7894 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7895 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7896 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7897 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7898 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7899 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7900 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7901 			lasttx = nexttx;
   7902 
   7903 			DPRINTF(WM_DEBUG_TX,
   7904 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7905 			     "len %#04zx\n",
   7906 			    device_xname(sc->sc_dev), nexttx,
   7907 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7908 			    dmamap->dm_segs[seg].ds_len));
   7909 		}
   7910 
   7911 		KASSERT(lasttx != -1);
   7912 
   7913 		/*
   7914 		 * Set up the command byte on the last descriptor of
   7915 		 * the packet.  If we're in the interrupt delay window,
   7916 		 * delay the interrupt.
   7917 		 */
   7918 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7919 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7920 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7921 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7922 
   7923 		txs->txs_lastdesc = lasttx;
   7924 
   7925 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7926 		    device_xname(sc->sc_dev),
   7927 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7928 
   7929 		/* Sync the descriptors we're using. */
   7930 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7931 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7932 
   7933 		/* Give the packet to the chip. */
   7934 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7935 		sent = true;
   7936 
   7937 		DPRINTF(WM_DEBUG_TX,
   7938 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7939 
   7940 		DPRINTF(WM_DEBUG_TX,
   7941 		    ("%s: TX: finished transmitting packet, job %d\n",
   7942 		    device_xname(sc->sc_dev), txq->txq_snext));
   7943 
   7944 		/* Advance the tx pointer. */
   7945 		txq->txq_free -= txs->txs_ndesc;
   7946 		txq->txq_next = nexttx;
   7947 
   7948 		txq->txq_sfree--;
   7949 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7950 
   7951 		/* Pass the packet to any BPF listeners. */
   7952 		bpf_mtap(ifp, m0);
   7953 	}
   7954 
   7955 	if (m0 != NULL) {
   7956 		if (!is_transmit)
   7957 			ifp->if_flags |= IFF_OACTIVE;
   7958 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7959 		WM_Q_EVCNT_INCR(txq, txdrop);
   7960 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7961 			__func__));
   7962 		m_freem(m0);
   7963 	}
   7964 
   7965 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7966 		/* No more slots; notify upper layer. */
   7967 		if (!is_transmit)
   7968 			ifp->if_flags |= IFF_OACTIVE;
   7969 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7970 	}
   7971 
   7972 	if (sent) {
   7973 		/* Set a watchdog timer in case the chip flakes out. */
   7974 		ifp->if_timer = 5;
   7975 	}
   7976 }
   7977 
   7978 static void
   7979 wm_deferred_start_locked(struct wm_txqueue *txq)
   7980 {
   7981 	struct wm_softc *sc = txq->txq_sc;
   7982 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7983 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7984 	int qid = wmq->wmq_id;
   7985 
   7986 	KASSERT(mutex_owned(txq->txq_lock));
   7987 
   7988 	if (txq->txq_stopping) {
   7989 		mutex_exit(txq->txq_lock);
   7990 		return;
   7991 	}
   7992 
   7993 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7994 		/* XXX need for ALTQ or one CPU system */
   7995 		if (qid == 0)
   7996 			wm_nq_start_locked(ifp);
   7997 		wm_nq_transmit_locked(ifp, txq);
   7998 	} else {
   7999 		/* XXX need for ALTQ or one CPU system */
   8000 		if (qid == 0)
   8001 			wm_start_locked(ifp);
   8002 		wm_transmit_locked(ifp, txq);
   8003 	}
   8004 }
   8005 
   8006 /* Interrupt */
   8007 
   8008 /*
   8009  * wm_txeof:
   8010  *
   8011  *	Helper; handle transmit interrupts.
   8012  */
   8013 static int
   8014 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   8015 {
   8016 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8017 	struct wm_txsoft *txs;
   8018 	bool processed = false;
   8019 	int count = 0;
   8020 	int i;
   8021 	uint8_t status;
   8022 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8023 
   8024 	KASSERT(mutex_owned(txq->txq_lock));
   8025 
   8026 	if (txq->txq_stopping)
   8027 		return 0;
   8028 
   8029 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8030 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8031 	if (wmq->wmq_id == 0)
   8032 		ifp->if_flags &= ~IFF_OACTIVE;
   8033 
   8034 	/*
   8035 	 * Go through the Tx list and free mbufs for those
   8036 	 * frames which have been transmitted.
   8037 	 */
   8038 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8039 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8040 		txs = &txq->txq_soft[i];
   8041 
   8042 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8043 			device_xname(sc->sc_dev), i));
   8044 
   8045 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8046 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8047 
   8048 		status =
   8049 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8050 		if ((status & WTX_ST_DD) == 0) {
   8051 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8052 			    BUS_DMASYNC_PREREAD);
   8053 			break;
   8054 		}
   8055 
   8056 		processed = true;
   8057 		count++;
   8058 		DPRINTF(WM_DEBUG_TX,
   8059 		    ("%s: TX: job %d done: descs %d..%d\n",
   8060 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8061 		    txs->txs_lastdesc));
   8062 
   8063 		/*
   8064 		 * XXX We should probably be using the statistics
   8065 		 * XXX registers, but I don't know if they exist
   8066 		 * XXX on chips before the i82544.
   8067 		 */
   8068 
   8069 #ifdef WM_EVENT_COUNTERS
   8070 		if (status & WTX_ST_TU)
   8071 			WM_Q_EVCNT_INCR(txq, tu);
   8072 #endif /* WM_EVENT_COUNTERS */
   8073 
   8074 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   8075 			ifp->if_oerrors++;
   8076 			if (status & WTX_ST_LC)
   8077 				log(LOG_WARNING, "%s: late collision\n",
   8078 				    device_xname(sc->sc_dev));
   8079 			else if (status & WTX_ST_EC) {
   8080 				ifp->if_collisions += 16;
   8081 				log(LOG_WARNING, "%s: excessive collisions\n",
   8082 				    device_xname(sc->sc_dev));
   8083 			}
   8084 		} else
   8085 			ifp->if_opackets++;
   8086 
   8087 		txq->txq_packets++;
   8088 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8089 
   8090 		txq->txq_free += txs->txs_ndesc;
   8091 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8092 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8093 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8094 		m_freem(txs->txs_mbuf);
   8095 		txs->txs_mbuf = NULL;
   8096 	}
   8097 
   8098 	/* Update the dirty transmit buffer pointer. */
   8099 	txq->txq_sdirty = i;
   8100 	DPRINTF(WM_DEBUG_TX,
   8101 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8102 
   8103 	if (count != 0)
   8104 		rnd_add_uint32(&sc->rnd_source, count);
   8105 
   8106 	/*
   8107 	 * If there are no more pending transmissions, cancel the watchdog
   8108 	 * timer.
   8109 	 */
   8110 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8111 		ifp->if_timer = 0;
   8112 
   8113 	return processed;
   8114 }
   8115 
   8116 static inline uint32_t
   8117 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8118 {
   8119 	struct wm_softc *sc = rxq->rxq_sc;
   8120 
   8121 	if (sc->sc_type == WM_T_82574)
   8122 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8123 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8124 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8125 	else
   8126 		return rxq->rxq_descs[idx].wrx_status;
   8127 }
   8128 
   8129 static inline uint32_t
   8130 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8131 {
   8132 	struct wm_softc *sc = rxq->rxq_sc;
   8133 
   8134 	if (sc->sc_type == WM_T_82574)
   8135 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8136 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8137 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8138 	else
   8139 		return rxq->rxq_descs[idx].wrx_errors;
   8140 }
   8141 
   8142 static inline uint16_t
   8143 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8144 {
   8145 	struct wm_softc *sc = rxq->rxq_sc;
   8146 
   8147 	if (sc->sc_type == WM_T_82574)
   8148 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8149 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8150 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8151 	else
   8152 		return rxq->rxq_descs[idx].wrx_special;
   8153 }
   8154 
   8155 static inline int
   8156 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8157 {
   8158 	struct wm_softc *sc = rxq->rxq_sc;
   8159 
   8160 	if (sc->sc_type == WM_T_82574)
   8161 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8162 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8163 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8164 	else
   8165 		return rxq->rxq_descs[idx].wrx_len;
   8166 }
   8167 
   8168 #ifdef WM_DEBUG
   8169 static inline uint32_t
   8170 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8171 {
   8172 	struct wm_softc *sc = rxq->rxq_sc;
   8173 
   8174 	if (sc->sc_type == WM_T_82574)
   8175 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8176 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8177 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8178 	else
   8179 		return 0;
   8180 }
   8181 
   8182 static inline uint8_t
   8183 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8184 {
   8185 	struct wm_softc *sc = rxq->rxq_sc;
   8186 
   8187 	if (sc->sc_type == WM_T_82574)
   8188 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8189 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8190 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8191 	else
   8192 		return 0;
   8193 }
   8194 #endif /* WM_DEBUG */
   8195 
   8196 static inline bool
   8197 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8198     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8199 {
   8200 
   8201 	if (sc->sc_type == WM_T_82574)
   8202 		return (status & ext_bit) != 0;
   8203 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8204 		return (status & nq_bit) != 0;
   8205 	else
   8206 		return (status & legacy_bit) != 0;
   8207 }
   8208 
   8209 static inline bool
   8210 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8211     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8212 {
   8213 
   8214 	if (sc->sc_type == WM_T_82574)
   8215 		return (error & ext_bit) != 0;
   8216 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8217 		return (error & nq_bit) != 0;
   8218 	else
   8219 		return (error & legacy_bit) != 0;
   8220 }
   8221 
   8222 static inline bool
   8223 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8224 {
   8225 
   8226 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8227 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8228 		return true;
   8229 	else
   8230 		return false;
   8231 }
   8232 
   8233 static inline bool
   8234 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8235 {
   8236 	struct wm_softc *sc = rxq->rxq_sc;
   8237 
   8238 	/* XXXX missing error bit for newqueue? */
   8239 	if (wm_rxdesc_is_set_error(sc, errors,
   8240 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   8241 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   8242 		NQRXC_ERROR_RXE)) {
   8243 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   8244 			log(LOG_WARNING, "%s: symbol error\n",
   8245 			    device_xname(sc->sc_dev));
   8246 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   8247 			log(LOG_WARNING, "%s: receive sequence error\n",
   8248 			    device_xname(sc->sc_dev));
   8249 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   8250 			log(LOG_WARNING, "%s: CRC error\n",
   8251 			    device_xname(sc->sc_dev));
   8252 		return true;
   8253 	}
   8254 
   8255 	return false;
   8256 }
   8257 
   8258 static inline bool
   8259 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8260 {
   8261 	struct wm_softc *sc = rxq->rxq_sc;
   8262 
   8263 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8264 		NQRXC_STATUS_DD)) {
   8265 		/* We have processed all of the receive descriptors. */
   8266 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8267 		return false;
   8268 	}
   8269 
   8270 	return true;
   8271 }
   8272 
   8273 static inline bool
   8274 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   8275     struct mbuf *m)
   8276 {
   8277 
   8278 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8279 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8280 		vlan_set_tag(m, le16toh(vlantag));
   8281 	}
   8282 
   8283 	return true;
   8284 }
   8285 
   8286 static inline void
   8287 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8288     uint32_t errors, struct mbuf *m)
   8289 {
   8290 	struct wm_softc *sc = rxq->rxq_sc;
   8291 
   8292 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8293 		if (wm_rxdesc_is_set_status(sc, status,
   8294 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8295 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8296 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8297 			if (wm_rxdesc_is_set_error(sc, errors,
   8298 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8299 				m->m_pkthdr.csum_flags |=
   8300 					M_CSUM_IPv4_BAD;
   8301 		}
   8302 		if (wm_rxdesc_is_set_status(sc, status,
   8303 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8304 			/*
   8305 			 * Note: we don't know if this was TCP or UDP,
   8306 			 * so we just set both bits, and expect the
   8307 			 * upper layers to deal.
   8308 			 */
   8309 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8310 			m->m_pkthdr.csum_flags |=
   8311 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8312 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8313 			if (wm_rxdesc_is_set_error(sc, errors,
   8314 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8315 				m->m_pkthdr.csum_flags |=
   8316 					M_CSUM_TCP_UDP_BAD;
   8317 		}
   8318 	}
   8319 }
   8320 
   8321 /*
   8322  * wm_rxeof:
   8323  *
   8324  *	Helper; handle receive interrupts.
   8325  */
   8326 static void
   8327 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8328 {
   8329 	struct wm_softc *sc = rxq->rxq_sc;
   8330 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8331 	struct wm_rxsoft *rxs;
   8332 	struct mbuf *m;
   8333 	int i, len;
   8334 	int count = 0;
   8335 	uint32_t status, errors;
   8336 	uint16_t vlantag;
   8337 
   8338 	KASSERT(mutex_owned(rxq->rxq_lock));
   8339 
   8340 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8341 		if (limit-- == 0) {
   8342 			rxq->rxq_ptr = i;
   8343 			break;
   8344 		}
   8345 
   8346 		rxs = &rxq->rxq_soft[i];
   8347 
   8348 		DPRINTF(WM_DEBUG_RX,
   8349 		    ("%s: RX: checking descriptor %d\n",
   8350 		    device_xname(sc->sc_dev), i));
   8351 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8352 
   8353 		status = wm_rxdesc_get_status(rxq, i);
   8354 		errors = wm_rxdesc_get_errors(rxq, i);
   8355 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8356 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8357 #ifdef WM_DEBUG
   8358 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8359 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8360 #endif
   8361 
   8362 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8363 			/*
   8364 			 * Update the receive pointer holding rxq_lock
   8365 			 * consistent with increment counter.
   8366 			 */
   8367 			rxq->rxq_ptr = i;
   8368 			break;
   8369 		}
   8370 
   8371 		count++;
   8372 		if (__predict_false(rxq->rxq_discard)) {
   8373 			DPRINTF(WM_DEBUG_RX,
   8374 			    ("%s: RX: discarding contents of descriptor %d\n",
   8375 			    device_xname(sc->sc_dev), i));
   8376 			wm_init_rxdesc(rxq, i);
   8377 			if (wm_rxdesc_is_eop(rxq, status)) {
   8378 				/* Reset our state. */
   8379 				DPRINTF(WM_DEBUG_RX,
   8380 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8381 				    device_xname(sc->sc_dev)));
   8382 				rxq->rxq_discard = 0;
   8383 			}
   8384 			continue;
   8385 		}
   8386 
   8387 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8388 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8389 
   8390 		m = rxs->rxs_mbuf;
   8391 
   8392 		/*
   8393 		 * Add a new receive buffer to the ring, unless of
   8394 		 * course the length is zero. Treat the latter as a
   8395 		 * failed mapping.
   8396 		 */
   8397 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8398 			/*
   8399 			 * Failed, throw away what we've done so
   8400 			 * far, and discard the rest of the packet.
   8401 			 */
   8402 			ifp->if_ierrors++;
   8403 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8404 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8405 			wm_init_rxdesc(rxq, i);
   8406 			if (!wm_rxdesc_is_eop(rxq, status))
   8407 				rxq->rxq_discard = 1;
   8408 			if (rxq->rxq_head != NULL)
   8409 				m_freem(rxq->rxq_head);
   8410 			WM_RXCHAIN_RESET(rxq);
   8411 			DPRINTF(WM_DEBUG_RX,
   8412 			    ("%s: RX: Rx buffer allocation failed, "
   8413 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8414 			    rxq->rxq_discard ? " (discard)" : ""));
   8415 			continue;
   8416 		}
   8417 
   8418 		m->m_len = len;
   8419 		rxq->rxq_len += len;
   8420 		DPRINTF(WM_DEBUG_RX,
   8421 		    ("%s: RX: buffer at %p len %d\n",
   8422 		    device_xname(sc->sc_dev), m->m_data, len));
   8423 
   8424 		/* If this is not the end of the packet, keep looking. */
   8425 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8426 			WM_RXCHAIN_LINK(rxq, m);
   8427 			DPRINTF(WM_DEBUG_RX,
   8428 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8429 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8430 			continue;
   8431 		}
   8432 
   8433 		/*
   8434 		 * Okay, we have the entire packet now.  The chip is
   8435 		 * configured to include the FCS except I350 and I21[01]
   8436 		 * (not all chips can be configured to strip it),
   8437 		 * so we need to trim it.
   8438 		 * May need to adjust length of previous mbuf in the
   8439 		 * chain if the current mbuf is too short.
   8440 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8441 		 * is always set in I350, so we don't trim it.
   8442 		 */
   8443 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8444 		    && (sc->sc_type != WM_T_I210)
   8445 		    && (sc->sc_type != WM_T_I211)) {
   8446 			if (m->m_len < ETHER_CRC_LEN) {
   8447 				rxq->rxq_tail->m_len
   8448 				    -= (ETHER_CRC_LEN - m->m_len);
   8449 				m->m_len = 0;
   8450 			} else
   8451 				m->m_len -= ETHER_CRC_LEN;
   8452 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8453 		} else
   8454 			len = rxq->rxq_len;
   8455 
   8456 		WM_RXCHAIN_LINK(rxq, m);
   8457 
   8458 		*rxq->rxq_tailp = NULL;
   8459 		m = rxq->rxq_head;
   8460 
   8461 		WM_RXCHAIN_RESET(rxq);
   8462 
   8463 		DPRINTF(WM_DEBUG_RX,
   8464 		    ("%s: RX: have entire packet, len -> %d\n",
   8465 		    device_xname(sc->sc_dev), len));
   8466 
   8467 		/* If an error occurred, update stats and drop the packet. */
   8468 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8469 			m_freem(m);
   8470 			continue;
   8471 		}
   8472 
   8473 		/* No errors.  Receive the packet. */
   8474 		m_set_rcvif(m, ifp);
   8475 		m->m_pkthdr.len = len;
   8476 		/*
   8477 		 * TODO
   8478 		 * should be save rsshash and rsstype to this mbuf.
   8479 		 */
   8480 		DPRINTF(WM_DEBUG_RX,
   8481 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8482 			device_xname(sc->sc_dev), rsstype, rsshash));
   8483 
   8484 		/*
   8485 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8486 		 * for us.  Associate the tag with the packet.
   8487 		 */
   8488 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8489 			continue;
   8490 
   8491 		/* Set up checksum info for this packet. */
   8492 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8493 		/*
   8494 		 * Update the receive pointer holding rxq_lock consistent with
   8495 		 * increment counter.
   8496 		 */
   8497 		rxq->rxq_ptr = i;
   8498 		rxq->rxq_packets++;
   8499 		rxq->rxq_bytes += len;
   8500 		mutex_exit(rxq->rxq_lock);
   8501 
   8502 		/* Pass it on. */
   8503 		if_percpuq_enqueue(sc->sc_ipq, m);
   8504 
   8505 		mutex_enter(rxq->rxq_lock);
   8506 
   8507 		if (rxq->rxq_stopping)
   8508 			break;
   8509 	}
   8510 
   8511 	if (count != 0)
   8512 		rnd_add_uint32(&sc->rnd_source, count);
   8513 
   8514 	DPRINTF(WM_DEBUG_RX,
   8515 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8516 }
   8517 
   8518 /*
   8519  * wm_linkintr_gmii:
   8520  *
   8521  *	Helper; handle link interrupts for GMII.
   8522  */
   8523 static void
   8524 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8525 {
   8526 
   8527 	KASSERT(WM_CORE_LOCKED(sc));
   8528 
   8529 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8530 		__func__));
   8531 
   8532 	if (icr & ICR_LSC) {
   8533 		uint32_t reg;
   8534 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8535 
   8536 		if ((status & STATUS_LU) != 0) {
   8537 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8538 				device_xname(sc->sc_dev),
   8539 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8540 		} else {
   8541 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8542 				device_xname(sc->sc_dev)));
   8543 		}
   8544 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8545 			wm_gig_downshift_workaround_ich8lan(sc);
   8546 
   8547 		if ((sc->sc_type == WM_T_ICH8)
   8548 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8549 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8550 		}
   8551 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8552 			device_xname(sc->sc_dev)));
   8553 		mii_pollstat(&sc->sc_mii);
   8554 		if (sc->sc_type == WM_T_82543) {
   8555 			int miistatus, active;
   8556 
   8557 			/*
   8558 			 * With 82543, we need to force speed and
   8559 			 * duplex on the MAC equal to what the PHY
   8560 			 * speed and duplex configuration is.
   8561 			 */
   8562 			miistatus = sc->sc_mii.mii_media_status;
   8563 
   8564 			if (miistatus & IFM_ACTIVE) {
   8565 				active = sc->sc_mii.mii_media_active;
   8566 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8567 				switch (IFM_SUBTYPE(active)) {
   8568 				case IFM_10_T:
   8569 					sc->sc_ctrl |= CTRL_SPEED_10;
   8570 					break;
   8571 				case IFM_100_TX:
   8572 					sc->sc_ctrl |= CTRL_SPEED_100;
   8573 					break;
   8574 				case IFM_1000_T:
   8575 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8576 					break;
   8577 				default:
   8578 					/*
   8579 					 * fiber?
   8580 					 * Shoud not enter here.
   8581 					 */
   8582 					printf("unknown media (%x)\n", active);
   8583 					break;
   8584 				}
   8585 				if (active & IFM_FDX)
   8586 					sc->sc_ctrl |= CTRL_FD;
   8587 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8588 			}
   8589 		} else if (sc->sc_type == WM_T_PCH) {
   8590 			wm_k1_gig_workaround_hv(sc,
   8591 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8592 		}
   8593 
   8594 		if ((sc->sc_phytype == WMPHY_82578)
   8595 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8596 			== IFM_1000_T)) {
   8597 
   8598 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8599 				delay(200*1000); /* XXX too big */
   8600 
   8601 				/* Link stall fix for link up */
   8602 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8603 				    HV_MUX_DATA_CTRL,
   8604 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8605 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8606 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8607 				    HV_MUX_DATA_CTRL,
   8608 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8609 			}
   8610 		}
   8611 		/*
   8612 		 * I217 Packet Loss issue:
   8613 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8614 		 * on power up.
   8615 		 * Set the Beacon Duration for I217 to 8 usec
   8616 		 */
   8617 		if ((sc->sc_type == WM_T_PCH_LPT)
   8618 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8619 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8620 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8621 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8622 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8623 		}
   8624 
   8625 		/* XXX Work-around I218 hang issue */
   8626 		/* e1000_k1_workaround_lpt_lp() */
   8627 
   8628 		if ((sc->sc_type == WM_T_PCH_LPT)
   8629 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8630 			/*
   8631 			 * Set platform power management values for Latency
   8632 			 * Tolerance Reporting (LTR)
   8633 			 */
   8634 			wm_platform_pm_pch_lpt(sc,
   8635 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8636 				    != 0));
   8637 		}
   8638 
   8639 		/* FEXTNVM6 K1-off workaround */
   8640 		if (sc->sc_type == WM_T_PCH_SPT) {
   8641 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8642 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8643 			    & FEXTNVM6_K1_OFF_ENABLE)
   8644 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8645 			else
   8646 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8647 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8648 		}
   8649 	} else if (icr & ICR_RXSEQ) {
   8650 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8651 			device_xname(sc->sc_dev)));
   8652 	}
   8653 }
   8654 
   8655 /*
   8656  * wm_linkintr_tbi:
   8657  *
   8658  *	Helper; handle link interrupts for TBI mode.
   8659  */
   8660 static void
   8661 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8662 {
   8663 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8664 	uint32_t status;
   8665 
   8666 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8667 		__func__));
   8668 
   8669 	status = CSR_READ(sc, WMREG_STATUS);
   8670 	if (icr & ICR_LSC) {
   8671 		if (status & STATUS_LU) {
   8672 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8673 			    device_xname(sc->sc_dev),
   8674 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8675 			/*
   8676 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8677 			 * so we should update sc->sc_ctrl
   8678 			 */
   8679 
   8680 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8681 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8682 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8683 			if (status & STATUS_FD)
   8684 				sc->sc_tctl |=
   8685 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8686 			else
   8687 				sc->sc_tctl |=
   8688 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8689 			if (sc->sc_ctrl & CTRL_TFCE)
   8690 				sc->sc_fcrtl |= FCRTL_XONE;
   8691 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8692 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8693 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8694 				      sc->sc_fcrtl);
   8695 			sc->sc_tbi_linkup = 1;
   8696 			if_link_state_change(ifp, LINK_STATE_UP);
   8697 		} else {
   8698 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8699 			    device_xname(sc->sc_dev)));
   8700 			sc->sc_tbi_linkup = 0;
   8701 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8702 		}
   8703 		/* Update LED */
   8704 		wm_tbi_serdes_set_linkled(sc);
   8705 	} else if (icr & ICR_RXSEQ) {
   8706 		DPRINTF(WM_DEBUG_LINK,
   8707 		    ("%s: LINK: Receive sequence error\n",
   8708 		    device_xname(sc->sc_dev)));
   8709 	}
   8710 }
   8711 
   8712 /*
   8713  * wm_linkintr_serdes:
   8714  *
   8715  *	Helper; handle link interrupts for TBI mode.
   8716  */
   8717 static void
   8718 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8719 {
   8720 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8721 	struct mii_data *mii = &sc->sc_mii;
   8722 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8723 	uint32_t pcs_adv, pcs_lpab, reg;
   8724 
   8725 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8726 		__func__));
   8727 
   8728 	if (icr & ICR_LSC) {
   8729 		/* Check PCS */
   8730 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8731 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8732 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8733 				device_xname(sc->sc_dev)));
   8734 			mii->mii_media_status |= IFM_ACTIVE;
   8735 			sc->sc_tbi_linkup = 1;
   8736 			if_link_state_change(ifp, LINK_STATE_UP);
   8737 		} else {
   8738 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8739 				device_xname(sc->sc_dev)));
   8740 			mii->mii_media_status |= IFM_NONE;
   8741 			sc->sc_tbi_linkup = 0;
   8742 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8743 			wm_tbi_serdes_set_linkled(sc);
   8744 			return;
   8745 		}
   8746 		mii->mii_media_active |= IFM_1000_SX;
   8747 		if ((reg & PCS_LSTS_FDX) != 0)
   8748 			mii->mii_media_active |= IFM_FDX;
   8749 		else
   8750 			mii->mii_media_active |= IFM_HDX;
   8751 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8752 			/* Check flow */
   8753 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8754 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8755 				DPRINTF(WM_DEBUG_LINK,
   8756 				    ("XXX LINKOK but not ACOMP\n"));
   8757 				return;
   8758 			}
   8759 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8760 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8761 			DPRINTF(WM_DEBUG_LINK,
   8762 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8763 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8764 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8765 				mii->mii_media_active |= IFM_FLOW
   8766 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8767 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8768 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8769 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8770 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8771 				mii->mii_media_active |= IFM_FLOW
   8772 				    | IFM_ETH_TXPAUSE;
   8773 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8774 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8775 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8776 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8777 				mii->mii_media_active |= IFM_FLOW
   8778 				    | IFM_ETH_RXPAUSE;
   8779 		}
   8780 		/* Update LED */
   8781 		wm_tbi_serdes_set_linkled(sc);
   8782 	} else {
   8783 		DPRINTF(WM_DEBUG_LINK,
   8784 		    ("%s: LINK: Receive sequence error\n",
   8785 		    device_xname(sc->sc_dev)));
   8786 	}
   8787 }
   8788 
   8789 /*
   8790  * wm_linkintr:
   8791  *
   8792  *	Helper; handle link interrupts.
   8793  */
   8794 static void
   8795 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8796 {
   8797 
   8798 	KASSERT(WM_CORE_LOCKED(sc));
   8799 
   8800 	if (sc->sc_flags & WM_F_HAS_MII)
   8801 		wm_linkintr_gmii(sc, icr);
   8802 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8803 	    && (sc->sc_type >= WM_T_82575))
   8804 		wm_linkintr_serdes(sc, icr);
   8805 	else
   8806 		wm_linkintr_tbi(sc, icr);
   8807 }
   8808 
   8809 /*
   8810  * wm_intr_legacy:
   8811  *
   8812  *	Interrupt service routine for INTx and MSI.
   8813  */
   8814 static int
   8815 wm_intr_legacy(void *arg)
   8816 {
   8817 	struct wm_softc *sc = arg;
   8818 	struct wm_queue *wmq = &sc->sc_queue[0];
   8819 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8820 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8821 	uint32_t icr, rndval = 0;
   8822 	int handled = 0;
   8823 
   8824 	while (1 /* CONSTCOND */) {
   8825 		icr = CSR_READ(sc, WMREG_ICR);
   8826 		if ((icr & sc->sc_icr) == 0)
   8827 			break;
   8828 		if (handled == 0) {
   8829 			DPRINTF(WM_DEBUG_TX,
   8830 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8831 		}
   8832 		if (rndval == 0)
   8833 			rndval = icr;
   8834 
   8835 		mutex_enter(rxq->rxq_lock);
   8836 
   8837 		if (rxq->rxq_stopping) {
   8838 			mutex_exit(rxq->rxq_lock);
   8839 			break;
   8840 		}
   8841 
   8842 		handled = 1;
   8843 
   8844 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8845 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8846 			DPRINTF(WM_DEBUG_RX,
   8847 			    ("%s: RX: got Rx intr 0x%08x\n",
   8848 			    device_xname(sc->sc_dev),
   8849 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8850 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8851 		}
   8852 #endif
   8853 		/*
   8854 		 * wm_rxeof() does *not* call upper layer functions directly,
   8855 		 * as if_percpuq_enqueue() just call softint_schedule().
   8856 		 * So, we can call wm_rxeof() in interrupt context.
   8857 		 */
   8858 		wm_rxeof(rxq, UINT_MAX);
   8859 
   8860 		mutex_exit(rxq->rxq_lock);
   8861 		mutex_enter(txq->txq_lock);
   8862 
   8863 		if (txq->txq_stopping) {
   8864 			mutex_exit(txq->txq_lock);
   8865 			break;
   8866 		}
   8867 
   8868 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8869 		if (icr & ICR_TXDW) {
   8870 			DPRINTF(WM_DEBUG_TX,
   8871 			    ("%s: TX: got TXDW interrupt\n",
   8872 			    device_xname(sc->sc_dev)));
   8873 			WM_Q_EVCNT_INCR(txq, txdw);
   8874 		}
   8875 #endif
   8876 		wm_txeof(sc, txq);
   8877 
   8878 		mutex_exit(txq->txq_lock);
   8879 		WM_CORE_LOCK(sc);
   8880 
   8881 		if (sc->sc_core_stopping) {
   8882 			WM_CORE_UNLOCK(sc);
   8883 			break;
   8884 		}
   8885 
   8886 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8887 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8888 			wm_linkintr(sc, icr);
   8889 		}
   8890 
   8891 		WM_CORE_UNLOCK(sc);
   8892 
   8893 		if (icr & ICR_RXO) {
   8894 #if defined(WM_DEBUG)
   8895 			log(LOG_WARNING, "%s: Receive overrun\n",
   8896 			    device_xname(sc->sc_dev));
   8897 #endif /* defined(WM_DEBUG) */
   8898 		}
   8899 	}
   8900 
   8901 	rnd_add_uint32(&sc->rnd_source, rndval);
   8902 
   8903 	if (handled) {
   8904 		/* Try to get more packets going. */
   8905 		softint_schedule(wmq->wmq_si);
   8906 	}
   8907 
   8908 	return handled;
   8909 }
   8910 
   8911 static inline void
   8912 wm_txrxintr_disable(struct wm_queue *wmq)
   8913 {
   8914 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8915 
   8916 	if (sc->sc_type == WM_T_82574)
   8917 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8918 	else if (sc->sc_type == WM_T_82575)
   8919 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8920 	else
   8921 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8922 }
   8923 
   8924 static inline void
   8925 wm_txrxintr_enable(struct wm_queue *wmq)
   8926 {
   8927 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8928 
   8929 	wm_itrs_calculate(sc, wmq);
   8930 
   8931 	if (sc->sc_type == WM_T_82574)
   8932 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8933 	else if (sc->sc_type == WM_T_82575)
   8934 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8935 	else
   8936 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8937 }
   8938 
   8939 static int
   8940 wm_txrxintr_msix(void *arg)
   8941 {
   8942 	struct wm_queue *wmq = arg;
   8943 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8944 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8945 	struct wm_softc *sc = txq->txq_sc;
   8946 	u_int limit = sc->sc_rx_intr_process_limit;
   8947 
   8948 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8949 
   8950 	DPRINTF(WM_DEBUG_TX,
   8951 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8952 
   8953 	wm_txrxintr_disable(wmq);
   8954 
   8955 	mutex_enter(txq->txq_lock);
   8956 
   8957 	if (txq->txq_stopping) {
   8958 		mutex_exit(txq->txq_lock);
   8959 		return 0;
   8960 	}
   8961 
   8962 	WM_Q_EVCNT_INCR(txq, txdw);
   8963 	wm_txeof(sc, txq);
   8964 	/* wm_deferred start() is done in wm_handle_queue(). */
   8965 	mutex_exit(txq->txq_lock);
   8966 
   8967 	DPRINTF(WM_DEBUG_RX,
   8968 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8969 	mutex_enter(rxq->rxq_lock);
   8970 
   8971 	if (rxq->rxq_stopping) {
   8972 		mutex_exit(rxq->rxq_lock);
   8973 		return 0;
   8974 	}
   8975 
   8976 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8977 	wm_rxeof(rxq, limit);
   8978 	mutex_exit(rxq->rxq_lock);
   8979 
   8980 	wm_itrs_writereg(sc, wmq);
   8981 
   8982 	softint_schedule(wmq->wmq_si);
   8983 
   8984 	return 1;
   8985 }
   8986 
   8987 static void
   8988 wm_handle_queue(void *arg)
   8989 {
   8990 	struct wm_queue *wmq = arg;
   8991 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8992 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8993 	struct wm_softc *sc = txq->txq_sc;
   8994 	u_int limit = sc->sc_rx_process_limit;
   8995 
   8996 	mutex_enter(txq->txq_lock);
   8997 	if (txq->txq_stopping) {
   8998 		mutex_exit(txq->txq_lock);
   8999 		return;
   9000 	}
   9001 	wm_txeof(sc, txq);
   9002 	wm_deferred_start_locked(txq);
   9003 	mutex_exit(txq->txq_lock);
   9004 
   9005 	mutex_enter(rxq->rxq_lock);
   9006 	if (rxq->rxq_stopping) {
   9007 		mutex_exit(rxq->rxq_lock);
   9008 		return;
   9009 	}
   9010 	WM_Q_EVCNT_INCR(rxq, rxintr);
   9011 	wm_rxeof(rxq, limit);
   9012 	mutex_exit(rxq->rxq_lock);
   9013 
   9014 	wm_txrxintr_enable(wmq);
   9015 }
   9016 
   9017 /*
   9018  * wm_linkintr_msix:
   9019  *
   9020  *	Interrupt service routine for link status change for MSI-X.
   9021  */
   9022 static int
   9023 wm_linkintr_msix(void *arg)
   9024 {
   9025 	struct wm_softc *sc = arg;
   9026 	uint32_t reg;
   9027 
   9028 	DPRINTF(WM_DEBUG_LINK,
   9029 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9030 
   9031 	reg = CSR_READ(sc, WMREG_ICR);
   9032 	WM_CORE_LOCK(sc);
   9033 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   9034 		goto out;
   9035 
   9036 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9037 	wm_linkintr(sc, ICR_LSC);
   9038 
   9039 out:
   9040 	WM_CORE_UNLOCK(sc);
   9041 
   9042 	if (sc->sc_type == WM_T_82574)
   9043 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9044 	else if (sc->sc_type == WM_T_82575)
   9045 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9046 	else
   9047 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9048 
   9049 	return 1;
   9050 }
   9051 
   9052 /*
   9053  * Media related.
   9054  * GMII, SGMII, TBI (and SERDES)
   9055  */
   9056 
   9057 /* Common */
   9058 
   9059 /*
   9060  * wm_tbi_serdes_set_linkled:
   9061  *
   9062  *	Update the link LED on TBI and SERDES devices.
   9063  */
   9064 static void
   9065 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9066 {
   9067 
   9068 	if (sc->sc_tbi_linkup)
   9069 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9070 	else
   9071 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9072 
   9073 	/* 82540 or newer devices are active low */
   9074 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9075 
   9076 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9077 }
   9078 
   9079 /* GMII related */
   9080 
   9081 /*
   9082  * wm_gmii_reset:
   9083  *
   9084  *	Reset the PHY.
   9085  */
   9086 static void
   9087 wm_gmii_reset(struct wm_softc *sc)
   9088 {
   9089 	uint32_t reg;
   9090 	int rv;
   9091 
   9092 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9093 		device_xname(sc->sc_dev), __func__));
   9094 
   9095 	rv = sc->phy.acquire(sc);
   9096 	if (rv != 0) {
   9097 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9098 		    __func__);
   9099 		return;
   9100 	}
   9101 
   9102 	switch (sc->sc_type) {
   9103 	case WM_T_82542_2_0:
   9104 	case WM_T_82542_2_1:
   9105 		/* null */
   9106 		break;
   9107 	case WM_T_82543:
   9108 		/*
   9109 		 * With 82543, we need to force speed and duplex on the MAC
   9110 		 * equal to what the PHY speed and duplex configuration is.
   9111 		 * In addition, we need to perform a hardware reset on the PHY
   9112 		 * to take it out of reset.
   9113 		 */
   9114 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9115 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9116 
   9117 		/* The PHY reset pin is active-low. */
   9118 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9119 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9120 		    CTRL_EXT_SWDPIN(4));
   9121 		reg |= CTRL_EXT_SWDPIO(4);
   9122 
   9123 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9124 		CSR_WRITE_FLUSH(sc);
   9125 		delay(10*1000);
   9126 
   9127 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9128 		CSR_WRITE_FLUSH(sc);
   9129 		delay(150);
   9130 #if 0
   9131 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9132 #endif
   9133 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9134 		break;
   9135 	case WM_T_82544:	/* reset 10000us */
   9136 	case WM_T_82540:
   9137 	case WM_T_82545:
   9138 	case WM_T_82545_3:
   9139 	case WM_T_82546:
   9140 	case WM_T_82546_3:
   9141 	case WM_T_82541:
   9142 	case WM_T_82541_2:
   9143 	case WM_T_82547:
   9144 	case WM_T_82547_2:
   9145 	case WM_T_82571:	/* reset 100us */
   9146 	case WM_T_82572:
   9147 	case WM_T_82573:
   9148 	case WM_T_82574:
   9149 	case WM_T_82575:
   9150 	case WM_T_82576:
   9151 	case WM_T_82580:
   9152 	case WM_T_I350:
   9153 	case WM_T_I354:
   9154 	case WM_T_I210:
   9155 	case WM_T_I211:
   9156 	case WM_T_82583:
   9157 	case WM_T_80003:
   9158 		/* generic reset */
   9159 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9160 		CSR_WRITE_FLUSH(sc);
   9161 		delay(20000);
   9162 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9163 		CSR_WRITE_FLUSH(sc);
   9164 		delay(20000);
   9165 
   9166 		if ((sc->sc_type == WM_T_82541)
   9167 		    || (sc->sc_type == WM_T_82541_2)
   9168 		    || (sc->sc_type == WM_T_82547)
   9169 		    || (sc->sc_type == WM_T_82547_2)) {
   9170 			/* workaround for igp are done in igp_reset() */
   9171 			/* XXX add code to set LED after phy reset */
   9172 		}
   9173 		break;
   9174 	case WM_T_ICH8:
   9175 	case WM_T_ICH9:
   9176 	case WM_T_ICH10:
   9177 	case WM_T_PCH:
   9178 	case WM_T_PCH2:
   9179 	case WM_T_PCH_LPT:
   9180 	case WM_T_PCH_SPT:
   9181 		/* generic reset */
   9182 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9183 		CSR_WRITE_FLUSH(sc);
   9184 		delay(100);
   9185 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9186 		CSR_WRITE_FLUSH(sc);
   9187 		delay(150);
   9188 		break;
   9189 	default:
   9190 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9191 		    __func__);
   9192 		break;
   9193 	}
   9194 
   9195 	sc->phy.release(sc);
   9196 
   9197 	/* get_cfg_done */
   9198 	wm_get_cfg_done(sc);
   9199 
   9200 	/* extra setup */
   9201 	switch (sc->sc_type) {
   9202 	case WM_T_82542_2_0:
   9203 	case WM_T_82542_2_1:
   9204 	case WM_T_82543:
   9205 	case WM_T_82544:
   9206 	case WM_T_82540:
   9207 	case WM_T_82545:
   9208 	case WM_T_82545_3:
   9209 	case WM_T_82546:
   9210 	case WM_T_82546_3:
   9211 	case WM_T_82541_2:
   9212 	case WM_T_82547_2:
   9213 	case WM_T_82571:
   9214 	case WM_T_82572:
   9215 	case WM_T_82573:
   9216 	case WM_T_82574:
   9217 	case WM_T_82583:
   9218 	case WM_T_82575:
   9219 	case WM_T_82576:
   9220 	case WM_T_82580:
   9221 	case WM_T_I350:
   9222 	case WM_T_I354:
   9223 	case WM_T_I210:
   9224 	case WM_T_I211:
   9225 	case WM_T_80003:
   9226 		/* null */
   9227 		break;
   9228 	case WM_T_82541:
   9229 	case WM_T_82547:
   9230 		/* XXX Configure actively LED after PHY reset */
   9231 		break;
   9232 	case WM_T_ICH8:
   9233 	case WM_T_ICH9:
   9234 	case WM_T_ICH10:
   9235 	case WM_T_PCH:
   9236 	case WM_T_PCH2:
   9237 	case WM_T_PCH_LPT:
   9238 	case WM_T_PCH_SPT:
   9239 		wm_phy_post_reset(sc);
   9240 		break;
   9241 	default:
   9242 		panic("%s: unknown type\n", __func__);
   9243 		break;
   9244 	}
   9245 }
   9246 
   9247 /*
   9248  * Setup sc_phytype and mii_{read|write}reg.
   9249  *
   9250  *  To identify PHY type, correct read/write function should be selected.
   9251  * To select correct read/write function, PCI ID or MAC type are required
   9252  * without accessing PHY registers.
   9253  *
   9254  *  On the first call of this function, PHY ID is not known yet. Check
   9255  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9256  * result might be incorrect.
   9257  *
   9258  *  In the second call, PHY OUI and model is used to identify PHY type.
   9259  * It might not be perfpect because of the lack of compared entry, but it
   9260  * would be better than the first call.
   9261  *
   9262  *  If the detected new result and previous assumption is different,
   9263  * diagnous message will be printed.
   9264  */
   9265 static void
   9266 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9267     uint16_t phy_model)
   9268 {
   9269 	device_t dev = sc->sc_dev;
   9270 	struct mii_data *mii = &sc->sc_mii;
   9271 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9272 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9273 	mii_readreg_t new_readreg;
   9274 	mii_writereg_t new_writereg;
   9275 
   9276 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9277 		device_xname(sc->sc_dev), __func__));
   9278 
   9279 	if (mii->mii_readreg == NULL) {
   9280 		/*
   9281 		 *  This is the first call of this function. For ICH and PCH
   9282 		 * variants, it's difficult to determine the PHY access method
   9283 		 * by sc_type, so use the PCI product ID for some devices.
   9284 		 */
   9285 
   9286 		switch (sc->sc_pcidevid) {
   9287 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9288 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9289 			/* 82577 */
   9290 			new_phytype = WMPHY_82577;
   9291 			break;
   9292 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9293 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9294 			/* 82578 */
   9295 			new_phytype = WMPHY_82578;
   9296 			break;
   9297 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9298 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9299 			/* 82579 */
   9300 			new_phytype = WMPHY_82579;
   9301 			break;
   9302 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9303 		case PCI_PRODUCT_INTEL_82801I_BM:
   9304 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9305 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9306 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9307 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9308 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9309 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9310 			/* ICH8, 9, 10 with 82567 */
   9311 			new_phytype = WMPHY_BM;
   9312 			break;
   9313 		default:
   9314 			break;
   9315 		}
   9316 	} else {
   9317 		/* It's not the first call. Use PHY OUI and model */
   9318 		switch (phy_oui) {
   9319 		case MII_OUI_ATHEROS: /* XXX ??? */
   9320 			switch (phy_model) {
   9321 			case 0x0004: /* XXX */
   9322 				new_phytype = WMPHY_82578;
   9323 				break;
   9324 			default:
   9325 				break;
   9326 			}
   9327 			break;
   9328 		case MII_OUI_xxMARVELL:
   9329 			switch (phy_model) {
   9330 			case MII_MODEL_xxMARVELL_I210:
   9331 				new_phytype = WMPHY_I210;
   9332 				break;
   9333 			case MII_MODEL_xxMARVELL_E1011:
   9334 			case MII_MODEL_xxMARVELL_E1000_3:
   9335 			case MII_MODEL_xxMARVELL_E1000_5:
   9336 			case MII_MODEL_xxMARVELL_E1112:
   9337 				new_phytype = WMPHY_M88;
   9338 				break;
   9339 			case MII_MODEL_xxMARVELL_E1149:
   9340 				new_phytype = WMPHY_BM;
   9341 				break;
   9342 			case MII_MODEL_xxMARVELL_E1111:
   9343 			case MII_MODEL_xxMARVELL_I347:
   9344 			case MII_MODEL_xxMARVELL_E1512:
   9345 			case MII_MODEL_xxMARVELL_E1340M:
   9346 			case MII_MODEL_xxMARVELL_E1543:
   9347 				new_phytype = WMPHY_M88;
   9348 				break;
   9349 			case MII_MODEL_xxMARVELL_I82563:
   9350 				new_phytype = WMPHY_GG82563;
   9351 				break;
   9352 			default:
   9353 				break;
   9354 			}
   9355 			break;
   9356 		case MII_OUI_INTEL:
   9357 			switch (phy_model) {
   9358 			case MII_MODEL_INTEL_I82577:
   9359 				new_phytype = WMPHY_82577;
   9360 				break;
   9361 			case MII_MODEL_INTEL_I82579:
   9362 				new_phytype = WMPHY_82579;
   9363 				break;
   9364 			case MII_MODEL_INTEL_I217:
   9365 				new_phytype = WMPHY_I217;
   9366 				break;
   9367 			case MII_MODEL_INTEL_I82580:
   9368 			case MII_MODEL_INTEL_I350:
   9369 				new_phytype = WMPHY_82580;
   9370 				break;
   9371 			default:
   9372 				break;
   9373 			}
   9374 			break;
   9375 		case MII_OUI_yyINTEL:
   9376 			switch (phy_model) {
   9377 			case MII_MODEL_yyINTEL_I82562G:
   9378 			case MII_MODEL_yyINTEL_I82562EM:
   9379 			case MII_MODEL_yyINTEL_I82562ET:
   9380 				new_phytype = WMPHY_IFE;
   9381 				break;
   9382 			case MII_MODEL_yyINTEL_IGP01E1000:
   9383 				new_phytype = WMPHY_IGP;
   9384 				break;
   9385 			case MII_MODEL_yyINTEL_I82566:
   9386 				new_phytype = WMPHY_IGP_3;
   9387 				break;
   9388 			default:
   9389 				break;
   9390 			}
   9391 			break;
   9392 		default:
   9393 			break;
   9394 		}
   9395 		if (new_phytype == WMPHY_UNKNOWN)
   9396 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9397 			    __func__);
   9398 
   9399 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9400 		    && (sc->sc_phytype != new_phytype )) {
   9401 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9402 			    "was incorrect. PHY type from PHY ID = %u\n",
   9403 			    sc->sc_phytype, new_phytype);
   9404 		}
   9405 	}
   9406 
   9407 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9408 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9409 		/* SGMII */
   9410 		new_readreg = wm_sgmii_readreg;
   9411 		new_writereg = wm_sgmii_writereg;
   9412 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9413 		/* BM2 (phyaddr == 1) */
   9414 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9415 		    && (new_phytype != WMPHY_BM)
   9416 		    && (new_phytype != WMPHY_UNKNOWN))
   9417 			doubt_phytype = new_phytype;
   9418 		new_phytype = WMPHY_BM;
   9419 		new_readreg = wm_gmii_bm_readreg;
   9420 		new_writereg = wm_gmii_bm_writereg;
   9421 	} else if (sc->sc_type >= WM_T_PCH) {
   9422 		/* All PCH* use _hv_ */
   9423 		new_readreg = wm_gmii_hv_readreg;
   9424 		new_writereg = wm_gmii_hv_writereg;
   9425 	} else if (sc->sc_type >= WM_T_ICH8) {
   9426 		/* non-82567 ICH8, 9 and 10 */
   9427 		new_readreg = wm_gmii_i82544_readreg;
   9428 		new_writereg = wm_gmii_i82544_writereg;
   9429 	} else if (sc->sc_type >= WM_T_80003) {
   9430 		/* 80003 */
   9431 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9432 		    && (new_phytype != WMPHY_GG82563)
   9433 		    && (new_phytype != WMPHY_UNKNOWN))
   9434 			doubt_phytype = new_phytype;
   9435 		new_phytype = WMPHY_GG82563;
   9436 		new_readreg = wm_gmii_i80003_readreg;
   9437 		new_writereg = wm_gmii_i80003_writereg;
   9438 	} else if (sc->sc_type >= WM_T_I210) {
   9439 		/* I210 and I211 */
   9440 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9441 		    && (new_phytype != WMPHY_I210)
   9442 		    && (new_phytype != WMPHY_UNKNOWN))
   9443 			doubt_phytype = new_phytype;
   9444 		new_phytype = WMPHY_I210;
   9445 		new_readreg = wm_gmii_gs40g_readreg;
   9446 		new_writereg = wm_gmii_gs40g_writereg;
   9447 	} else if (sc->sc_type >= WM_T_82580) {
   9448 		/* 82580, I350 and I354 */
   9449 		new_readreg = wm_gmii_82580_readreg;
   9450 		new_writereg = wm_gmii_82580_writereg;
   9451 	} else if (sc->sc_type >= WM_T_82544) {
   9452 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9453 		new_readreg = wm_gmii_i82544_readreg;
   9454 		new_writereg = wm_gmii_i82544_writereg;
   9455 	} else {
   9456 		new_readreg = wm_gmii_i82543_readreg;
   9457 		new_writereg = wm_gmii_i82543_writereg;
   9458 	}
   9459 
   9460 	if (new_phytype == WMPHY_BM) {
   9461 		/* All BM use _bm_ */
   9462 		new_readreg = wm_gmii_bm_readreg;
   9463 		new_writereg = wm_gmii_bm_writereg;
   9464 	}
   9465 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9466 		/* All PCH* use _hv_ */
   9467 		new_readreg = wm_gmii_hv_readreg;
   9468 		new_writereg = wm_gmii_hv_writereg;
   9469 	}
   9470 
   9471 	/* Diag output */
   9472 	if (doubt_phytype != WMPHY_UNKNOWN)
   9473 		aprint_error_dev(dev, "Assumed new PHY type was "
   9474 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9475 		    new_phytype);
   9476 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9477 	    && (sc->sc_phytype != new_phytype ))
   9478 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9479 		    "was incorrect. New PHY type = %u\n",
   9480 		    sc->sc_phytype, new_phytype);
   9481 
   9482 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9483 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9484 
   9485 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9486 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9487 		    "function was incorrect.\n");
   9488 
   9489 	/* Update now */
   9490 	sc->sc_phytype = new_phytype;
   9491 	mii->mii_readreg = new_readreg;
   9492 	mii->mii_writereg = new_writereg;
   9493 }
   9494 
   9495 /*
   9496  * wm_get_phy_id_82575:
   9497  *
   9498  * Return PHY ID. Return -1 if it failed.
   9499  */
   9500 static int
   9501 wm_get_phy_id_82575(struct wm_softc *sc)
   9502 {
   9503 	uint32_t reg;
   9504 	int phyid = -1;
   9505 
   9506 	/* XXX */
   9507 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9508 		return -1;
   9509 
   9510 	if (wm_sgmii_uses_mdio(sc)) {
   9511 		switch (sc->sc_type) {
   9512 		case WM_T_82575:
   9513 		case WM_T_82576:
   9514 			reg = CSR_READ(sc, WMREG_MDIC);
   9515 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9516 			break;
   9517 		case WM_T_82580:
   9518 		case WM_T_I350:
   9519 		case WM_T_I354:
   9520 		case WM_T_I210:
   9521 		case WM_T_I211:
   9522 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9523 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9524 			break;
   9525 		default:
   9526 			return -1;
   9527 		}
   9528 	}
   9529 
   9530 	return phyid;
   9531 }
   9532 
   9533 
   9534 /*
   9535  * wm_gmii_mediainit:
   9536  *
   9537  *	Initialize media for use on 1000BASE-T devices.
   9538  */
   9539 static void
   9540 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9541 {
   9542 	device_t dev = sc->sc_dev;
   9543 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9544 	struct mii_data *mii = &sc->sc_mii;
   9545 	uint32_t reg;
   9546 
   9547 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9548 		device_xname(sc->sc_dev), __func__));
   9549 
   9550 	/* We have GMII. */
   9551 	sc->sc_flags |= WM_F_HAS_MII;
   9552 
   9553 	if (sc->sc_type == WM_T_80003)
   9554 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9555 	else
   9556 		sc->sc_tipg = TIPG_1000T_DFLT;
   9557 
   9558 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9559 	if ((sc->sc_type == WM_T_82580)
   9560 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9561 	    || (sc->sc_type == WM_T_I211)) {
   9562 		reg = CSR_READ(sc, WMREG_PHPM);
   9563 		reg &= ~PHPM_GO_LINK_D;
   9564 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9565 	}
   9566 
   9567 	/*
   9568 	 * Let the chip set speed/duplex on its own based on
   9569 	 * signals from the PHY.
   9570 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9571 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9572 	 */
   9573 	sc->sc_ctrl |= CTRL_SLU;
   9574 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9575 
   9576 	/* Initialize our media structures and probe the GMII. */
   9577 	mii->mii_ifp = ifp;
   9578 
   9579 	mii->mii_statchg = wm_gmii_statchg;
   9580 
   9581 	/* get PHY control from SMBus to PCIe */
   9582 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9583 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9584 		wm_smbustopci(sc);
   9585 
   9586 	wm_gmii_reset(sc);
   9587 
   9588 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9589 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9590 	    wm_gmii_mediastatus);
   9591 
   9592 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9593 	    || (sc->sc_type == WM_T_82580)
   9594 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9595 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9596 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9597 			/* Attach only one port */
   9598 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9599 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9600 		} else {
   9601 			int i, id;
   9602 			uint32_t ctrl_ext;
   9603 
   9604 			id = wm_get_phy_id_82575(sc);
   9605 			if (id != -1) {
   9606 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9607 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9608 			}
   9609 			if ((id == -1)
   9610 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9611 				/* Power on sgmii phy if it is disabled */
   9612 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9613 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9614 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9615 				CSR_WRITE_FLUSH(sc);
   9616 				delay(300*1000); /* XXX too long */
   9617 
   9618 				/* from 1 to 8 */
   9619 				for (i = 1; i < 8; i++)
   9620 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9621 					    0xffffffff, i, MII_OFFSET_ANY,
   9622 					    MIIF_DOPAUSE);
   9623 
   9624 				/* restore previous sfp cage power state */
   9625 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9626 			}
   9627 		}
   9628 	} else {
   9629 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9630 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9631 	}
   9632 
   9633 	/*
   9634 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9635 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9636 	 */
   9637 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9638 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9639 		wm_set_mdio_slow_mode_hv(sc);
   9640 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9641 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9642 	}
   9643 
   9644 	/*
   9645 	 * (For ICH8 variants)
   9646 	 * If PHY detection failed, use BM's r/w function and retry.
   9647 	 */
   9648 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9649 		/* if failed, retry with *_bm_* */
   9650 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9651 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9652 		    sc->sc_phytype);
   9653 		sc->sc_phytype = WMPHY_BM;
   9654 		mii->mii_readreg = wm_gmii_bm_readreg;
   9655 		mii->mii_writereg = wm_gmii_bm_writereg;
   9656 
   9657 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9658 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9659 	}
   9660 
   9661 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9662 		/* Any PHY wasn't find */
   9663 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9664 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9665 		sc->sc_phytype = WMPHY_NONE;
   9666 	} else {
   9667 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9668 
   9669 		/*
   9670 		 * PHY Found! Check PHY type again by the second call of
   9671 		 * wm_gmii_setup_phytype.
   9672 		 */
   9673 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9674 		    child->mii_mpd_model);
   9675 
   9676 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9677 	}
   9678 }
   9679 
   9680 /*
   9681  * wm_gmii_mediachange:	[ifmedia interface function]
   9682  *
   9683  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9684  */
   9685 static int
   9686 wm_gmii_mediachange(struct ifnet *ifp)
   9687 {
   9688 	struct wm_softc *sc = ifp->if_softc;
   9689 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9690 	int rc;
   9691 
   9692 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9693 		device_xname(sc->sc_dev), __func__));
   9694 	if ((ifp->if_flags & IFF_UP) == 0)
   9695 		return 0;
   9696 
   9697 	/* Disable D0 LPLU. */
   9698 	wm_lplu_d0_disable(sc);
   9699 
   9700 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9701 	sc->sc_ctrl |= CTRL_SLU;
   9702 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9703 	    || (sc->sc_type > WM_T_82543)) {
   9704 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9705 	} else {
   9706 		sc->sc_ctrl &= ~CTRL_ASDE;
   9707 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9708 		if (ife->ifm_media & IFM_FDX)
   9709 			sc->sc_ctrl |= CTRL_FD;
   9710 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9711 		case IFM_10_T:
   9712 			sc->sc_ctrl |= CTRL_SPEED_10;
   9713 			break;
   9714 		case IFM_100_TX:
   9715 			sc->sc_ctrl |= CTRL_SPEED_100;
   9716 			break;
   9717 		case IFM_1000_T:
   9718 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9719 			break;
   9720 		default:
   9721 			panic("wm_gmii_mediachange: bad media 0x%x",
   9722 			    ife->ifm_media);
   9723 		}
   9724 	}
   9725 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9726 	CSR_WRITE_FLUSH(sc);
   9727 	if (sc->sc_type <= WM_T_82543)
   9728 		wm_gmii_reset(sc);
   9729 
   9730 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9731 		return 0;
   9732 	return rc;
   9733 }
   9734 
   9735 /*
   9736  * wm_gmii_mediastatus:	[ifmedia interface function]
   9737  *
   9738  *	Get the current interface media status on a 1000BASE-T device.
   9739  */
   9740 static void
   9741 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9742 {
   9743 	struct wm_softc *sc = ifp->if_softc;
   9744 
   9745 	ether_mediastatus(ifp, ifmr);
   9746 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9747 	    | sc->sc_flowflags;
   9748 }
   9749 
   9750 #define	MDI_IO		CTRL_SWDPIN(2)
   9751 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9752 #define	MDI_CLK		CTRL_SWDPIN(3)
   9753 
   9754 static void
   9755 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9756 {
   9757 	uint32_t i, v;
   9758 
   9759 	v = CSR_READ(sc, WMREG_CTRL);
   9760 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9761 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9762 
   9763 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9764 		if (data & i)
   9765 			v |= MDI_IO;
   9766 		else
   9767 			v &= ~MDI_IO;
   9768 		CSR_WRITE(sc, WMREG_CTRL, v);
   9769 		CSR_WRITE_FLUSH(sc);
   9770 		delay(10);
   9771 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9772 		CSR_WRITE_FLUSH(sc);
   9773 		delay(10);
   9774 		CSR_WRITE(sc, WMREG_CTRL, v);
   9775 		CSR_WRITE_FLUSH(sc);
   9776 		delay(10);
   9777 	}
   9778 }
   9779 
   9780 static uint32_t
   9781 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9782 {
   9783 	uint32_t v, i, data = 0;
   9784 
   9785 	v = CSR_READ(sc, WMREG_CTRL);
   9786 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9787 	v |= CTRL_SWDPIO(3);
   9788 
   9789 	CSR_WRITE(sc, WMREG_CTRL, v);
   9790 	CSR_WRITE_FLUSH(sc);
   9791 	delay(10);
   9792 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9793 	CSR_WRITE_FLUSH(sc);
   9794 	delay(10);
   9795 	CSR_WRITE(sc, WMREG_CTRL, v);
   9796 	CSR_WRITE_FLUSH(sc);
   9797 	delay(10);
   9798 
   9799 	for (i = 0; i < 16; i++) {
   9800 		data <<= 1;
   9801 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9802 		CSR_WRITE_FLUSH(sc);
   9803 		delay(10);
   9804 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9805 			data |= 1;
   9806 		CSR_WRITE(sc, WMREG_CTRL, v);
   9807 		CSR_WRITE_FLUSH(sc);
   9808 		delay(10);
   9809 	}
   9810 
   9811 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9812 	CSR_WRITE_FLUSH(sc);
   9813 	delay(10);
   9814 	CSR_WRITE(sc, WMREG_CTRL, v);
   9815 	CSR_WRITE_FLUSH(sc);
   9816 	delay(10);
   9817 
   9818 	return data;
   9819 }
   9820 
   9821 #undef MDI_IO
   9822 #undef MDI_DIR
   9823 #undef MDI_CLK
   9824 
   9825 /*
   9826  * wm_gmii_i82543_readreg:	[mii interface function]
   9827  *
   9828  *	Read a PHY register on the GMII (i82543 version).
   9829  */
   9830 static int
   9831 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   9832 {
   9833 	struct wm_softc *sc = device_private(dev);
   9834 	int rv;
   9835 
   9836 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9837 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9838 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9839 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9840 
   9841 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9842 	    device_xname(dev), phy, reg, rv));
   9843 
   9844 	return rv;
   9845 }
   9846 
   9847 /*
   9848  * wm_gmii_i82543_writereg:	[mii interface function]
   9849  *
   9850  *	Write a PHY register on the GMII (i82543 version).
   9851  */
   9852 static void
   9853 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   9854 {
   9855 	struct wm_softc *sc = device_private(dev);
   9856 
   9857 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9858 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9859 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9860 	    (MII_COMMAND_START << 30), 32);
   9861 }
   9862 
   9863 /*
   9864  * wm_gmii_mdic_readreg:	[mii interface function]
   9865  *
   9866  *	Read a PHY register on the GMII.
   9867  */
   9868 static int
   9869 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   9870 {
   9871 	struct wm_softc *sc = device_private(dev);
   9872 	uint32_t mdic = 0;
   9873 	int i, rv;
   9874 
   9875 	if (reg > MII_ADDRMASK) {
   9876 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9877 		    __func__, sc->sc_phytype, reg);
   9878 		reg &= MII_ADDRMASK;
   9879 	}
   9880 
   9881 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9882 	    MDIC_REGADD(reg));
   9883 
   9884 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9885 		mdic = CSR_READ(sc, WMREG_MDIC);
   9886 		if (mdic & MDIC_READY)
   9887 			break;
   9888 		delay(50);
   9889 	}
   9890 
   9891 	if ((mdic & MDIC_READY) == 0) {
   9892 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9893 		    device_xname(dev), phy, reg);
   9894 		rv = 0;
   9895 	} else if (mdic & MDIC_E) {
   9896 #if 0 /* This is normal if no PHY is present. */
   9897 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9898 		    device_xname(dev), phy, reg);
   9899 #endif
   9900 		rv = 0;
   9901 	} else {
   9902 		rv = MDIC_DATA(mdic);
   9903 		if (rv == 0xffff)
   9904 			rv = 0;
   9905 	}
   9906 
   9907 	return rv;
   9908 }
   9909 
   9910 /*
   9911  * wm_gmii_mdic_writereg:	[mii interface function]
   9912  *
   9913  *	Write a PHY register on the GMII.
   9914  */
   9915 static void
   9916 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   9917 {
   9918 	struct wm_softc *sc = device_private(dev);
   9919 	uint32_t mdic = 0;
   9920 	int i;
   9921 
   9922 	if (reg > MII_ADDRMASK) {
   9923 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9924 		    __func__, sc->sc_phytype, reg);
   9925 		reg &= MII_ADDRMASK;
   9926 	}
   9927 
   9928 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9929 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9930 
   9931 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9932 		mdic = CSR_READ(sc, WMREG_MDIC);
   9933 		if (mdic & MDIC_READY)
   9934 			break;
   9935 		delay(50);
   9936 	}
   9937 
   9938 	if ((mdic & MDIC_READY) == 0)
   9939 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9940 		    device_xname(dev), phy, reg);
   9941 	else if (mdic & MDIC_E)
   9942 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9943 		    device_xname(dev), phy, reg);
   9944 }
   9945 
   9946 /*
   9947  * wm_gmii_i82544_readreg:	[mii interface function]
   9948  *
   9949  *	Read a PHY register on the GMII.
   9950  */
   9951 static int
   9952 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   9953 {
   9954 	struct wm_softc *sc = device_private(dev);
   9955 	int rv;
   9956 
   9957 	if (sc->phy.acquire(sc)) {
   9958 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9959 		return 0;
   9960 	}
   9961 
   9962 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9963 		switch (sc->sc_phytype) {
   9964 		case WMPHY_IGP:
   9965 		case WMPHY_IGP_2:
   9966 		case WMPHY_IGP_3:
   9967 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9968 			break;
   9969 		default:
   9970 #ifdef WM_DEBUG
   9971 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   9972 			    __func__, sc->sc_phytype, reg);
   9973 #endif
   9974 			break;
   9975 		}
   9976 	}
   9977 
   9978 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9979 	sc->phy.release(sc);
   9980 
   9981 	return rv;
   9982 }
   9983 
   9984 /*
   9985  * wm_gmii_i82544_writereg:	[mii interface function]
   9986  *
   9987  *	Write a PHY register on the GMII.
   9988  */
   9989 static void
   9990 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   9991 {
   9992 	struct wm_softc *sc = device_private(dev);
   9993 
   9994 	if (sc->phy.acquire(sc)) {
   9995 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9996 		return;
   9997 	}
   9998 
   9999 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10000 		switch (sc->sc_phytype) {
   10001 		case WMPHY_IGP:
   10002 		case WMPHY_IGP_2:
   10003 		case WMPHY_IGP_3:
   10004 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   10005 			break;
   10006 		default:
   10007 #ifdef WM_DEBUG
   10008 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10009 			    __func__, sc->sc_phytype, reg);
   10010 #endif
   10011 			break;
   10012 		}
   10013 	}
   10014 
   10015 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10016 	sc->phy.release(sc);
   10017 }
   10018 
   10019 /*
   10020  * wm_gmii_i80003_readreg:	[mii interface function]
   10021  *
   10022  *	Read a PHY register on the kumeran
   10023  * This could be handled by the PHY layer if we didn't have to lock the
   10024  * ressource ...
   10025  */
   10026 static int
   10027 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   10028 {
   10029 	struct wm_softc *sc = device_private(dev);
   10030 	int page_select, temp;
   10031 	int rv;
   10032 
   10033 	if (phy != 1) /* only one PHY on kumeran bus */
   10034 		return 0;
   10035 
   10036 	if (sc->phy.acquire(sc)) {
   10037 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10038 		return 0;
   10039 	}
   10040 
   10041 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10042 		page_select = GG82563_PHY_PAGE_SELECT;
   10043 	else {
   10044 		/*
   10045 		 * Use Alternative Page Select register to access registers
   10046 		 * 30 and 31.
   10047 		 */
   10048 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10049 	}
   10050 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10051 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10052 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10053 		/*
   10054 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10055 		 * register.
   10056 		 */
   10057 		delay(200);
   10058 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10059 			device_printf(dev, "%s failed\n", __func__);
   10060 			rv = 0; /* XXX */
   10061 			goto out;
   10062 		}
   10063 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10064 		delay(200);
   10065 	} else
   10066 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10067 
   10068 out:
   10069 	sc->phy.release(sc);
   10070 	return rv;
   10071 }
   10072 
   10073 /*
   10074  * wm_gmii_i80003_writereg:	[mii interface function]
   10075  *
   10076  *	Write a PHY register on the kumeran.
   10077  * This could be handled by the PHY layer if we didn't have to lock the
   10078  * ressource ...
   10079  */
   10080 static void
   10081 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10082 {
   10083 	struct wm_softc *sc = device_private(dev);
   10084 	int page_select, temp;
   10085 
   10086 	if (phy != 1) /* only one PHY on kumeran bus */
   10087 		return;
   10088 
   10089 	if (sc->phy.acquire(sc)) {
   10090 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10091 		return;
   10092 	}
   10093 
   10094 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10095 		page_select = GG82563_PHY_PAGE_SELECT;
   10096 	else {
   10097 		/*
   10098 		 * Use Alternative Page Select register to access registers
   10099 		 * 30 and 31.
   10100 		 */
   10101 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10102 	}
   10103 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10104 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10105 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10106 		/*
   10107 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10108 		 * register.
   10109 		 */
   10110 		delay(200);
   10111 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10112 			device_printf(dev, "%s failed\n", __func__);
   10113 			goto out;
   10114 		}
   10115 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10116 		delay(200);
   10117 	} else
   10118 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10119 
   10120 out:
   10121 	sc->phy.release(sc);
   10122 }
   10123 
   10124 /*
   10125  * wm_gmii_bm_readreg:	[mii interface function]
   10126  *
   10127  *	Read a PHY register on the kumeran
   10128  * This could be handled by the PHY layer if we didn't have to lock the
   10129  * ressource ...
   10130  */
   10131 static int
   10132 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10133 {
   10134 	struct wm_softc *sc = device_private(dev);
   10135 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10136 	uint16_t val;
   10137 	int rv;
   10138 
   10139 	if (sc->phy.acquire(sc)) {
   10140 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10141 		return 0;
   10142 	}
   10143 
   10144 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10145 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10146 		    || (reg == 31)) ? 1 : phy;
   10147 	/* Page 800 works differently than the rest so it has its own func */
   10148 	if (page == BM_WUC_PAGE) {
   10149 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10150 		rv = val;
   10151 		goto release;
   10152 	}
   10153 
   10154 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10155 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10156 		    && (sc->sc_type != WM_T_82583))
   10157 			wm_gmii_mdic_writereg(dev, phy,
   10158 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10159 		else
   10160 			wm_gmii_mdic_writereg(dev, phy,
   10161 			    BME1000_PHY_PAGE_SELECT, page);
   10162 	}
   10163 
   10164 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10165 
   10166 release:
   10167 	sc->phy.release(sc);
   10168 	return rv;
   10169 }
   10170 
   10171 /*
   10172  * wm_gmii_bm_writereg:	[mii interface function]
   10173  *
   10174  *	Write a PHY register on the kumeran.
   10175  * This could be handled by the PHY layer if we didn't have to lock the
   10176  * ressource ...
   10177  */
   10178 static void
   10179 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10180 {
   10181 	struct wm_softc *sc = device_private(dev);
   10182 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10183 
   10184 	if (sc->phy.acquire(sc)) {
   10185 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10186 		return;
   10187 	}
   10188 
   10189 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10190 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10191 		    || (reg == 31)) ? 1 : phy;
   10192 	/* Page 800 works differently than the rest so it has its own func */
   10193 	if (page == BM_WUC_PAGE) {
   10194 		uint16_t tmp;
   10195 
   10196 		tmp = val;
   10197 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10198 		goto release;
   10199 	}
   10200 
   10201 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10202 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10203 		    && (sc->sc_type != WM_T_82583))
   10204 			wm_gmii_mdic_writereg(dev, phy,
   10205 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10206 		else
   10207 			wm_gmii_mdic_writereg(dev, phy,
   10208 			    BME1000_PHY_PAGE_SELECT, page);
   10209 	}
   10210 
   10211 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10212 
   10213 release:
   10214 	sc->phy.release(sc);
   10215 }
   10216 
   10217 static void
   10218 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10219 {
   10220 	struct wm_softc *sc = device_private(dev);
   10221 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10222 	uint16_t wuce, reg;
   10223 
   10224 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10225 		device_xname(dev), __func__));
   10226 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10227 	if (sc->sc_type == WM_T_PCH) {
   10228 		/* XXX e1000 driver do nothing... why? */
   10229 	}
   10230 
   10231 	/*
   10232 	 * 1) Enable PHY wakeup register first.
   10233 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10234 	 */
   10235 
   10236 	/* Set page 769 */
   10237 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10238 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10239 
   10240 	/* Read WUCE and save it */
   10241 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10242 
   10243 	reg = wuce | BM_WUC_ENABLE_BIT;
   10244 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10245 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10246 
   10247 	/* Select page 800 */
   10248 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10249 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10250 
   10251 	/*
   10252 	 * 2) Access PHY wakeup register.
   10253 	 * See e1000_access_phy_wakeup_reg_bm.
   10254 	 */
   10255 
   10256 	/* Write page 800 */
   10257 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10258 
   10259 	if (rd)
   10260 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10261 	else
   10262 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10263 
   10264 	/*
   10265 	 * 3) Disable PHY wakeup register.
   10266 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10267 	 */
   10268 	/* Set page 769 */
   10269 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10270 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10271 
   10272 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10273 }
   10274 
   10275 /*
   10276  * wm_gmii_hv_readreg:	[mii interface function]
   10277  *
   10278  *	Read a PHY register on the kumeran
   10279  * This could be handled by the PHY layer if we didn't have to lock the
   10280  * ressource ...
   10281  */
   10282 static int
   10283 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10284 {
   10285 	struct wm_softc *sc = device_private(dev);
   10286 	int rv;
   10287 
   10288 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10289 		device_xname(dev), __func__));
   10290 	if (sc->phy.acquire(sc)) {
   10291 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10292 		return 0;
   10293 	}
   10294 
   10295 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10296 	sc->phy.release(sc);
   10297 	return rv;
   10298 }
   10299 
   10300 static int
   10301 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10302 {
   10303 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10304 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10305 	uint16_t val;
   10306 	int rv;
   10307 
   10308 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10309 
   10310 	/* Page 800 works differently than the rest so it has its own func */
   10311 	if (page == BM_WUC_PAGE) {
   10312 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10313 		return val;
   10314 	}
   10315 
   10316 	/*
   10317 	 * Lower than page 768 works differently than the rest so it has its
   10318 	 * own func
   10319 	 */
   10320 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10321 		printf("gmii_hv_readreg!!!\n");
   10322 		return 0;
   10323 	}
   10324 
   10325 	/*
   10326 	 * XXX I21[789] documents say that the SMBus Address register is at
   10327 	 * PHY address 01, Page 0 (not 768), Register 26.
   10328 	 */
   10329 	if (page == HV_INTC_FC_PAGE_START)
   10330 		page = 0;
   10331 
   10332 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10333 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10334 		    page << BME1000_PAGE_SHIFT);
   10335 	}
   10336 
   10337 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10338 	return rv;
   10339 }
   10340 
   10341 /*
   10342  * wm_gmii_hv_writereg:	[mii interface function]
   10343  *
   10344  *	Write a PHY register on the kumeran.
   10345  * This could be handled by the PHY layer if we didn't have to lock the
   10346  * ressource ...
   10347  */
   10348 static void
   10349 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10350 {
   10351 	struct wm_softc *sc = device_private(dev);
   10352 
   10353 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10354 		device_xname(dev), __func__));
   10355 
   10356 	if (sc->phy.acquire(sc)) {
   10357 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10358 		return;
   10359 	}
   10360 
   10361 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10362 	sc->phy.release(sc);
   10363 }
   10364 
   10365 static void
   10366 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10367 {
   10368 	struct wm_softc *sc = device_private(dev);
   10369 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10370 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10371 
   10372 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10373 
   10374 	/* Page 800 works differently than the rest so it has its own func */
   10375 	if (page == BM_WUC_PAGE) {
   10376 		uint16_t tmp;
   10377 
   10378 		tmp = val;
   10379 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10380 		return;
   10381 	}
   10382 
   10383 	/*
   10384 	 * Lower than page 768 works differently than the rest so it has its
   10385 	 * own func
   10386 	 */
   10387 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10388 		printf("gmii_hv_writereg!!!\n");
   10389 		return;
   10390 	}
   10391 
   10392 	{
   10393 		/*
   10394 		 * XXX I21[789] documents say that the SMBus Address register
   10395 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10396 		 */
   10397 		if (page == HV_INTC_FC_PAGE_START)
   10398 			page = 0;
   10399 
   10400 		/*
   10401 		 * XXX Workaround MDIO accesses being disabled after entering
   10402 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10403 		 * register is set)
   10404 		 */
   10405 		if (sc->sc_phytype == WMPHY_82578) {
   10406 			struct mii_softc *child;
   10407 
   10408 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10409 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10410 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10411 			    && ((val & (1 << 11)) != 0)) {
   10412 				printf("XXX need workaround\n");
   10413 			}
   10414 		}
   10415 
   10416 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10417 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10418 			    page << BME1000_PAGE_SHIFT);
   10419 		}
   10420 	}
   10421 
   10422 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10423 }
   10424 
   10425 /*
   10426  * wm_gmii_82580_readreg:	[mii interface function]
   10427  *
   10428  *	Read a PHY register on the 82580 and I350.
   10429  * This could be handled by the PHY layer if we didn't have to lock the
   10430  * ressource ...
   10431  */
   10432 static int
   10433 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10434 {
   10435 	struct wm_softc *sc = device_private(dev);
   10436 	int rv;
   10437 
   10438 	if (sc->phy.acquire(sc) != 0) {
   10439 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10440 		return 0;
   10441 	}
   10442 
   10443 #ifdef DIAGNOSTIC
   10444 	if (reg > MII_ADDRMASK) {
   10445 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10446 		    __func__, sc->sc_phytype, reg);
   10447 		reg &= MII_ADDRMASK;
   10448 	}
   10449 #endif
   10450 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10451 
   10452 	sc->phy.release(sc);
   10453 	return rv;
   10454 }
   10455 
   10456 /*
   10457  * wm_gmii_82580_writereg:	[mii interface function]
   10458  *
   10459  *	Write a PHY register on the 82580 and I350.
   10460  * This could be handled by the PHY layer if we didn't have to lock the
   10461  * ressource ...
   10462  */
   10463 static void
   10464 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10465 {
   10466 	struct wm_softc *sc = device_private(dev);
   10467 
   10468 	if (sc->phy.acquire(sc) != 0) {
   10469 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10470 		return;
   10471 	}
   10472 
   10473 #ifdef DIAGNOSTIC
   10474 	if (reg > MII_ADDRMASK) {
   10475 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10476 		    __func__, sc->sc_phytype, reg);
   10477 		reg &= MII_ADDRMASK;
   10478 	}
   10479 #endif
   10480 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10481 
   10482 	sc->phy.release(sc);
   10483 }
   10484 
   10485 /*
   10486  * wm_gmii_gs40g_readreg:	[mii interface function]
   10487  *
   10488  *	Read a PHY register on the I2100 and I211.
   10489  * This could be handled by the PHY layer if we didn't have to lock the
   10490  * ressource ...
   10491  */
   10492 static int
   10493 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10494 {
   10495 	struct wm_softc *sc = device_private(dev);
   10496 	int page, offset;
   10497 	int rv;
   10498 
   10499 	/* Acquire semaphore */
   10500 	if (sc->phy.acquire(sc)) {
   10501 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10502 		return 0;
   10503 	}
   10504 
   10505 	/* Page select */
   10506 	page = reg >> GS40G_PAGE_SHIFT;
   10507 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10508 
   10509 	/* Read reg */
   10510 	offset = reg & GS40G_OFFSET_MASK;
   10511 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10512 
   10513 	sc->phy.release(sc);
   10514 	return rv;
   10515 }
   10516 
   10517 /*
   10518  * wm_gmii_gs40g_writereg:	[mii interface function]
   10519  *
   10520  *	Write a PHY register on the I210 and I211.
   10521  * This could be handled by the PHY layer if we didn't have to lock the
   10522  * ressource ...
   10523  */
   10524 static void
   10525 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10526 {
   10527 	struct wm_softc *sc = device_private(dev);
   10528 	int page, offset;
   10529 
   10530 	/* Acquire semaphore */
   10531 	if (sc->phy.acquire(sc)) {
   10532 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10533 		return;
   10534 	}
   10535 
   10536 	/* Page select */
   10537 	page = reg >> GS40G_PAGE_SHIFT;
   10538 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10539 
   10540 	/* Write reg */
   10541 	offset = reg & GS40G_OFFSET_MASK;
   10542 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10543 
   10544 	/* Release semaphore */
   10545 	sc->phy.release(sc);
   10546 }
   10547 
   10548 /*
   10549  * wm_gmii_statchg:	[mii interface function]
   10550  *
   10551  *	Callback from MII layer when media changes.
   10552  */
   10553 static void
   10554 wm_gmii_statchg(struct ifnet *ifp)
   10555 {
   10556 	struct wm_softc *sc = ifp->if_softc;
   10557 	struct mii_data *mii = &sc->sc_mii;
   10558 
   10559 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10560 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10561 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10562 
   10563 	/*
   10564 	 * Get flow control negotiation result.
   10565 	 */
   10566 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10567 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10568 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10569 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10570 	}
   10571 
   10572 	if (sc->sc_flowflags & IFM_FLOW) {
   10573 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10574 			sc->sc_ctrl |= CTRL_TFCE;
   10575 			sc->sc_fcrtl |= FCRTL_XONE;
   10576 		}
   10577 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10578 			sc->sc_ctrl |= CTRL_RFCE;
   10579 	}
   10580 
   10581 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10582 		DPRINTF(WM_DEBUG_LINK,
   10583 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10584 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10585 	} else {
   10586 		DPRINTF(WM_DEBUG_LINK,
   10587 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10588 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10589 	}
   10590 
   10591 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10592 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10593 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10594 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10595 	if (sc->sc_type == WM_T_80003) {
   10596 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10597 		case IFM_1000_T:
   10598 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10599 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10600 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10601 			break;
   10602 		default:
   10603 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10604 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10605 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10606 			break;
   10607 		}
   10608 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10609 	}
   10610 }
   10611 
   10612 /* kumeran related (80003, ICH* and PCH*) */
   10613 
   10614 /*
   10615  * wm_kmrn_readreg:
   10616  *
   10617  *	Read a kumeran register
   10618  */
   10619 static int
   10620 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10621 {
   10622 	int rv;
   10623 
   10624 	if (sc->sc_type == WM_T_80003)
   10625 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10626 	else
   10627 		rv = sc->phy.acquire(sc);
   10628 	if (rv != 0) {
   10629 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10630 		    __func__);
   10631 		return rv;
   10632 	}
   10633 
   10634 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   10635 
   10636 	if (sc->sc_type == WM_T_80003)
   10637 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10638 	else
   10639 		sc->phy.release(sc);
   10640 
   10641 	return rv;
   10642 }
   10643 
   10644 static int
   10645 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   10646 {
   10647 
   10648 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10649 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10650 	    KUMCTRLSTA_REN);
   10651 	CSR_WRITE_FLUSH(sc);
   10652 	delay(2);
   10653 
   10654 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10655 
   10656 	return 0;
   10657 }
   10658 
   10659 /*
   10660  * wm_kmrn_writereg:
   10661  *
   10662  *	Write a kumeran register
   10663  */
   10664 static int
   10665 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   10666 {
   10667 	int rv;
   10668 
   10669 	if (sc->sc_type == WM_T_80003)
   10670 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10671 	else
   10672 		rv = sc->phy.acquire(sc);
   10673 	if (rv != 0) {
   10674 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10675 		    __func__);
   10676 		return rv;
   10677 	}
   10678 
   10679 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   10680 
   10681 	if (sc->sc_type == WM_T_80003)
   10682 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10683 	else
   10684 		sc->phy.release(sc);
   10685 
   10686 	return rv;
   10687 }
   10688 
   10689 static int
   10690 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   10691 {
   10692 
   10693 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10694 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   10695 
   10696 	return 0;
   10697 }
   10698 
   10699 /* SGMII related */
   10700 
   10701 /*
   10702  * wm_sgmii_uses_mdio
   10703  *
   10704  * Check whether the transaction is to the internal PHY or the external
   10705  * MDIO interface. Return true if it's MDIO.
   10706  */
   10707 static bool
   10708 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10709 {
   10710 	uint32_t reg;
   10711 	bool ismdio = false;
   10712 
   10713 	switch (sc->sc_type) {
   10714 	case WM_T_82575:
   10715 	case WM_T_82576:
   10716 		reg = CSR_READ(sc, WMREG_MDIC);
   10717 		ismdio = ((reg & MDIC_DEST) != 0);
   10718 		break;
   10719 	case WM_T_82580:
   10720 	case WM_T_I350:
   10721 	case WM_T_I354:
   10722 	case WM_T_I210:
   10723 	case WM_T_I211:
   10724 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10725 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10726 		break;
   10727 	default:
   10728 		break;
   10729 	}
   10730 
   10731 	return ismdio;
   10732 }
   10733 
   10734 /*
   10735  * wm_sgmii_readreg:	[mii interface function]
   10736  *
   10737  *	Read a PHY register on the SGMII
   10738  * This could be handled by the PHY layer if we didn't have to lock the
   10739  * ressource ...
   10740  */
   10741 static int
   10742 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10743 {
   10744 	struct wm_softc *sc = device_private(dev);
   10745 	uint32_t i2ccmd;
   10746 	int i, rv;
   10747 
   10748 	if (sc->phy.acquire(sc)) {
   10749 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10750 		return 0;
   10751 	}
   10752 
   10753 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10754 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10755 	    | I2CCMD_OPCODE_READ;
   10756 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10757 
   10758 	/* Poll the ready bit */
   10759 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10760 		delay(50);
   10761 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10762 		if (i2ccmd & I2CCMD_READY)
   10763 			break;
   10764 	}
   10765 	if ((i2ccmd & I2CCMD_READY) == 0)
   10766 		device_printf(dev, "I2CCMD Read did not complete\n");
   10767 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10768 		device_printf(dev, "I2CCMD Error bit set\n");
   10769 
   10770 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10771 
   10772 	sc->phy.release(sc);
   10773 	return rv;
   10774 }
   10775 
   10776 /*
   10777  * wm_sgmii_writereg:	[mii interface function]
   10778  *
   10779  *	Write a PHY register on the SGMII.
   10780  * This could be handled by the PHY layer if we didn't have to lock the
   10781  * ressource ...
   10782  */
   10783 static void
   10784 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   10785 {
   10786 	struct wm_softc *sc = device_private(dev);
   10787 	uint32_t i2ccmd;
   10788 	int i;
   10789 	int val_swapped;
   10790 
   10791 	if (sc->phy.acquire(sc) != 0) {
   10792 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10793 		return;
   10794 	}
   10795 	/* Swap the data bytes for the I2C interface */
   10796 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10797 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10798 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10799 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10800 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10801 
   10802 	/* Poll the ready bit */
   10803 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10804 		delay(50);
   10805 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10806 		if (i2ccmd & I2CCMD_READY)
   10807 			break;
   10808 	}
   10809 	if ((i2ccmd & I2CCMD_READY) == 0)
   10810 		device_printf(dev, "I2CCMD Write did not complete\n");
   10811 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10812 		device_printf(dev, "I2CCMD Error bit set\n");
   10813 
   10814 	sc->phy.release(sc);
   10815 }
   10816 
   10817 /* TBI related */
   10818 
   10819 /*
   10820  * wm_tbi_mediainit:
   10821  *
   10822  *	Initialize media for use on 1000BASE-X devices.
   10823  */
   10824 static void
   10825 wm_tbi_mediainit(struct wm_softc *sc)
   10826 {
   10827 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10828 	const char *sep = "";
   10829 
   10830 	if (sc->sc_type < WM_T_82543)
   10831 		sc->sc_tipg = TIPG_WM_DFLT;
   10832 	else
   10833 		sc->sc_tipg = TIPG_LG_DFLT;
   10834 
   10835 	sc->sc_tbi_serdes_anegticks = 5;
   10836 
   10837 	/* Initialize our media structures */
   10838 	sc->sc_mii.mii_ifp = ifp;
   10839 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10840 
   10841 	if ((sc->sc_type >= WM_T_82575)
   10842 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10843 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10844 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10845 	else
   10846 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10847 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10848 
   10849 	/*
   10850 	 * SWD Pins:
   10851 	 *
   10852 	 *	0 = Link LED (output)
   10853 	 *	1 = Loss Of Signal (input)
   10854 	 */
   10855 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10856 
   10857 	/* XXX Perhaps this is only for TBI */
   10858 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10859 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10860 
   10861 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10862 		sc->sc_ctrl &= ~CTRL_LRST;
   10863 
   10864 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10865 
   10866 #define	ADD(ss, mm, dd)							\
   10867 do {									\
   10868 	aprint_normal("%s%s", sep, ss);					\
   10869 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10870 	sep = ", ";							\
   10871 } while (/*CONSTCOND*/0)
   10872 
   10873 	aprint_normal_dev(sc->sc_dev, "");
   10874 
   10875 	if (sc->sc_type == WM_T_I354) {
   10876 		uint32_t status;
   10877 
   10878 		status = CSR_READ(sc, WMREG_STATUS);
   10879 		if (((status & STATUS_2P5_SKU) != 0)
   10880 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10881 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10882 		} else
   10883 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10884 	} else if (sc->sc_type == WM_T_82545) {
   10885 		/* Only 82545 is LX (XXX except SFP) */
   10886 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10887 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10888 	} else {
   10889 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10890 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10891 	}
   10892 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10893 	aprint_normal("\n");
   10894 
   10895 #undef ADD
   10896 
   10897 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10898 }
   10899 
   10900 /*
   10901  * wm_tbi_mediachange:	[ifmedia interface function]
   10902  *
   10903  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10904  */
   10905 static int
   10906 wm_tbi_mediachange(struct ifnet *ifp)
   10907 {
   10908 	struct wm_softc *sc = ifp->if_softc;
   10909 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10910 	uint32_t status;
   10911 	int i;
   10912 
   10913 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10914 		/* XXX need some work for >= 82571 and < 82575 */
   10915 		if (sc->sc_type < WM_T_82575)
   10916 			return 0;
   10917 	}
   10918 
   10919 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10920 	    || (sc->sc_type >= WM_T_82575))
   10921 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10922 
   10923 	sc->sc_ctrl &= ~CTRL_LRST;
   10924 	sc->sc_txcw = TXCW_ANE;
   10925 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10926 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10927 	else if (ife->ifm_media & IFM_FDX)
   10928 		sc->sc_txcw |= TXCW_FD;
   10929 	else
   10930 		sc->sc_txcw |= TXCW_HD;
   10931 
   10932 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10933 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10934 
   10935 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10936 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10937 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10938 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10939 	CSR_WRITE_FLUSH(sc);
   10940 	delay(1000);
   10941 
   10942 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10943 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10944 
   10945 	/*
   10946 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10947 	 * optics detect a signal, 0 if they don't.
   10948 	 */
   10949 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10950 		/* Have signal; wait for the link to come up. */
   10951 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10952 			delay(10000);
   10953 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10954 				break;
   10955 		}
   10956 
   10957 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10958 			    device_xname(sc->sc_dev),i));
   10959 
   10960 		status = CSR_READ(sc, WMREG_STATUS);
   10961 		DPRINTF(WM_DEBUG_LINK,
   10962 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10963 			device_xname(sc->sc_dev),status, STATUS_LU));
   10964 		if (status & STATUS_LU) {
   10965 			/* Link is up. */
   10966 			DPRINTF(WM_DEBUG_LINK,
   10967 			    ("%s: LINK: set media -> link up %s\n",
   10968 			    device_xname(sc->sc_dev),
   10969 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10970 
   10971 			/*
   10972 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10973 			 * so we should update sc->sc_ctrl
   10974 			 */
   10975 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10976 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10977 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10978 			if (status & STATUS_FD)
   10979 				sc->sc_tctl |=
   10980 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10981 			else
   10982 				sc->sc_tctl |=
   10983 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10984 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10985 				sc->sc_fcrtl |= FCRTL_XONE;
   10986 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10987 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10988 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10989 				      sc->sc_fcrtl);
   10990 			sc->sc_tbi_linkup = 1;
   10991 		} else {
   10992 			if (i == WM_LINKUP_TIMEOUT)
   10993 				wm_check_for_link(sc);
   10994 			/* Link is down. */
   10995 			DPRINTF(WM_DEBUG_LINK,
   10996 			    ("%s: LINK: set media -> link down\n",
   10997 			    device_xname(sc->sc_dev)));
   10998 			sc->sc_tbi_linkup = 0;
   10999 		}
   11000 	} else {
   11001 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11002 		    device_xname(sc->sc_dev)));
   11003 		sc->sc_tbi_linkup = 0;
   11004 	}
   11005 
   11006 	wm_tbi_serdes_set_linkled(sc);
   11007 
   11008 	return 0;
   11009 }
   11010 
   11011 /*
   11012  * wm_tbi_mediastatus:	[ifmedia interface function]
   11013  *
   11014  *	Get the current interface media status on a 1000BASE-X device.
   11015  */
   11016 static void
   11017 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11018 {
   11019 	struct wm_softc *sc = ifp->if_softc;
   11020 	uint32_t ctrl, status;
   11021 
   11022 	ifmr->ifm_status = IFM_AVALID;
   11023 	ifmr->ifm_active = IFM_ETHER;
   11024 
   11025 	status = CSR_READ(sc, WMREG_STATUS);
   11026 	if ((status & STATUS_LU) == 0) {
   11027 		ifmr->ifm_active |= IFM_NONE;
   11028 		return;
   11029 	}
   11030 
   11031 	ifmr->ifm_status |= IFM_ACTIVE;
   11032 	/* Only 82545 is LX */
   11033 	if (sc->sc_type == WM_T_82545)
   11034 		ifmr->ifm_active |= IFM_1000_LX;
   11035 	else
   11036 		ifmr->ifm_active |= IFM_1000_SX;
   11037 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11038 		ifmr->ifm_active |= IFM_FDX;
   11039 	else
   11040 		ifmr->ifm_active |= IFM_HDX;
   11041 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11042 	if (ctrl & CTRL_RFCE)
   11043 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11044 	if (ctrl & CTRL_TFCE)
   11045 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11046 }
   11047 
   11048 /* XXX TBI only */
   11049 static int
   11050 wm_check_for_link(struct wm_softc *sc)
   11051 {
   11052 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11053 	uint32_t rxcw;
   11054 	uint32_t ctrl;
   11055 	uint32_t status;
   11056 	uint32_t sig;
   11057 
   11058 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11059 		/* XXX need some work for >= 82571 */
   11060 		if (sc->sc_type >= WM_T_82571) {
   11061 			sc->sc_tbi_linkup = 1;
   11062 			return 0;
   11063 		}
   11064 	}
   11065 
   11066 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11067 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11068 	status = CSR_READ(sc, WMREG_STATUS);
   11069 
   11070 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   11071 
   11072 	DPRINTF(WM_DEBUG_LINK,
   11073 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   11074 		device_xname(sc->sc_dev), __func__,
   11075 		((ctrl & CTRL_SWDPIN(1)) == sig),
   11076 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11077 
   11078 	/*
   11079 	 * SWDPIN   LU RXCW
   11080 	 *      0    0    0
   11081 	 *      0    0    1	(should not happen)
   11082 	 *      0    1    0	(should not happen)
   11083 	 *      0    1    1	(should not happen)
   11084 	 *      1    0    0	Disable autonego and force linkup
   11085 	 *      1    0    1	got /C/ but not linkup yet
   11086 	 *      1    1    0	(linkup)
   11087 	 *      1    1    1	If IFM_AUTO, back to autonego
   11088 	 *
   11089 	 */
   11090 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11091 	    && ((status & STATUS_LU) == 0)
   11092 	    && ((rxcw & RXCW_C) == 0)) {
   11093 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   11094 			__func__));
   11095 		sc->sc_tbi_linkup = 0;
   11096 		/* Disable auto-negotiation in the TXCW register */
   11097 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11098 
   11099 		/*
   11100 		 * Force link-up and also force full-duplex.
   11101 		 *
   11102 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11103 		 * so we should update sc->sc_ctrl
   11104 		 */
   11105 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11106 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11107 	} else if (((status & STATUS_LU) != 0)
   11108 	    && ((rxcw & RXCW_C) != 0)
   11109 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11110 		sc->sc_tbi_linkup = 1;
   11111 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   11112 			__func__));
   11113 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11114 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11115 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11116 	    && ((rxcw & RXCW_C) != 0)) {
   11117 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   11118 	} else {
   11119 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   11120 			status));
   11121 	}
   11122 
   11123 	return 0;
   11124 }
   11125 
   11126 /*
   11127  * wm_tbi_tick:
   11128  *
   11129  *	Check the link on TBI devices.
   11130  *	This function acts as mii_tick().
   11131  */
   11132 static void
   11133 wm_tbi_tick(struct wm_softc *sc)
   11134 {
   11135 	struct mii_data *mii = &sc->sc_mii;
   11136 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11137 	uint32_t status;
   11138 
   11139 	KASSERT(WM_CORE_LOCKED(sc));
   11140 
   11141 	status = CSR_READ(sc, WMREG_STATUS);
   11142 
   11143 	/* XXX is this needed? */
   11144 	(void)CSR_READ(sc, WMREG_RXCW);
   11145 	(void)CSR_READ(sc, WMREG_CTRL);
   11146 
   11147 	/* set link status */
   11148 	if ((status & STATUS_LU) == 0) {
   11149 		DPRINTF(WM_DEBUG_LINK,
   11150 		    ("%s: LINK: checklink -> down\n",
   11151 			device_xname(sc->sc_dev)));
   11152 		sc->sc_tbi_linkup = 0;
   11153 	} else if (sc->sc_tbi_linkup == 0) {
   11154 		DPRINTF(WM_DEBUG_LINK,
   11155 		    ("%s: LINK: checklink -> up %s\n",
   11156 			device_xname(sc->sc_dev),
   11157 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11158 		sc->sc_tbi_linkup = 1;
   11159 		sc->sc_tbi_serdes_ticks = 0;
   11160 	}
   11161 
   11162 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11163 		goto setled;
   11164 
   11165 	if ((status & STATUS_LU) == 0) {
   11166 		sc->sc_tbi_linkup = 0;
   11167 		/* If the timer expired, retry autonegotiation */
   11168 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11169 		    && (++sc->sc_tbi_serdes_ticks
   11170 			>= sc->sc_tbi_serdes_anegticks)) {
   11171 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11172 			sc->sc_tbi_serdes_ticks = 0;
   11173 			/*
   11174 			 * Reset the link, and let autonegotiation do
   11175 			 * its thing
   11176 			 */
   11177 			sc->sc_ctrl |= CTRL_LRST;
   11178 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11179 			CSR_WRITE_FLUSH(sc);
   11180 			delay(1000);
   11181 			sc->sc_ctrl &= ~CTRL_LRST;
   11182 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11183 			CSR_WRITE_FLUSH(sc);
   11184 			delay(1000);
   11185 			CSR_WRITE(sc, WMREG_TXCW,
   11186 			    sc->sc_txcw & ~TXCW_ANE);
   11187 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11188 		}
   11189 	}
   11190 
   11191 setled:
   11192 	wm_tbi_serdes_set_linkled(sc);
   11193 }
   11194 
   11195 /* SERDES related */
   11196 static void
   11197 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11198 {
   11199 	uint32_t reg;
   11200 
   11201 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11202 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11203 		return;
   11204 
   11205 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11206 	reg |= PCS_CFG_PCS_EN;
   11207 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11208 
   11209 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11210 	reg &= ~CTRL_EXT_SWDPIN(3);
   11211 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11212 	CSR_WRITE_FLUSH(sc);
   11213 }
   11214 
   11215 static int
   11216 wm_serdes_mediachange(struct ifnet *ifp)
   11217 {
   11218 	struct wm_softc *sc = ifp->if_softc;
   11219 	bool pcs_autoneg = true; /* XXX */
   11220 	uint32_t ctrl_ext, pcs_lctl, reg;
   11221 
   11222 	/* XXX Currently, this function is not called on 8257[12] */
   11223 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11224 	    || (sc->sc_type >= WM_T_82575))
   11225 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11226 
   11227 	wm_serdes_power_up_link_82575(sc);
   11228 
   11229 	sc->sc_ctrl |= CTRL_SLU;
   11230 
   11231 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11232 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11233 
   11234 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11235 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11236 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11237 	case CTRL_EXT_LINK_MODE_SGMII:
   11238 		pcs_autoneg = true;
   11239 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11240 		break;
   11241 	case CTRL_EXT_LINK_MODE_1000KX:
   11242 		pcs_autoneg = false;
   11243 		/* FALLTHROUGH */
   11244 	default:
   11245 		if ((sc->sc_type == WM_T_82575)
   11246 		    || (sc->sc_type == WM_T_82576)) {
   11247 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11248 				pcs_autoneg = false;
   11249 		}
   11250 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11251 		    | CTRL_FRCFDX;
   11252 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11253 	}
   11254 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11255 
   11256 	if (pcs_autoneg) {
   11257 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11258 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11259 
   11260 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11261 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11262 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11263 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11264 	} else
   11265 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11266 
   11267 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11268 
   11269 
   11270 	return 0;
   11271 }
   11272 
   11273 static void
   11274 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11275 {
   11276 	struct wm_softc *sc = ifp->if_softc;
   11277 	struct mii_data *mii = &sc->sc_mii;
   11278 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11279 	uint32_t pcs_adv, pcs_lpab, reg;
   11280 
   11281 	ifmr->ifm_status = IFM_AVALID;
   11282 	ifmr->ifm_active = IFM_ETHER;
   11283 
   11284 	/* Check PCS */
   11285 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11286 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11287 		ifmr->ifm_active |= IFM_NONE;
   11288 		sc->sc_tbi_linkup = 0;
   11289 		goto setled;
   11290 	}
   11291 
   11292 	sc->sc_tbi_linkup = 1;
   11293 	ifmr->ifm_status |= IFM_ACTIVE;
   11294 	if (sc->sc_type == WM_T_I354) {
   11295 		uint32_t status;
   11296 
   11297 		status = CSR_READ(sc, WMREG_STATUS);
   11298 		if (((status & STATUS_2P5_SKU) != 0)
   11299 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11300 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11301 		} else
   11302 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11303 	} else {
   11304 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11305 		case PCS_LSTS_SPEED_10:
   11306 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11307 			break;
   11308 		case PCS_LSTS_SPEED_100:
   11309 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11310 			break;
   11311 		case PCS_LSTS_SPEED_1000:
   11312 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11313 			break;
   11314 		default:
   11315 			device_printf(sc->sc_dev, "Unknown speed\n");
   11316 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11317 			break;
   11318 		}
   11319 	}
   11320 	if ((reg & PCS_LSTS_FDX) != 0)
   11321 		ifmr->ifm_active |= IFM_FDX;
   11322 	else
   11323 		ifmr->ifm_active |= IFM_HDX;
   11324 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11325 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11326 		/* Check flow */
   11327 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11328 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11329 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11330 			goto setled;
   11331 		}
   11332 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11333 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11334 		DPRINTF(WM_DEBUG_LINK,
   11335 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11336 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11337 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11338 			mii->mii_media_active |= IFM_FLOW
   11339 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11340 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11341 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11342 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11343 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11344 			mii->mii_media_active |= IFM_FLOW
   11345 			    | IFM_ETH_TXPAUSE;
   11346 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11347 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11348 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11349 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11350 			mii->mii_media_active |= IFM_FLOW
   11351 			    | IFM_ETH_RXPAUSE;
   11352 		}
   11353 	}
   11354 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11355 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11356 setled:
   11357 	wm_tbi_serdes_set_linkled(sc);
   11358 }
   11359 
   11360 /*
   11361  * wm_serdes_tick:
   11362  *
   11363  *	Check the link on serdes devices.
   11364  */
   11365 static void
   11366 wm_serdes_tick(struct wm_softc *sc)
   11367 {
   11368 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11369 	struct mii_data *mii = &sc->sc_mii;
   11370 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11371 	uint32_t reg;
   11372 
   11373 	KASSERT(WM_CORE_LOCKED(sc));
   11374 
   11375 	mii->mii_media_status = IFM_AVALID;
   11376 	mii->mii_media_active = IFM_ETHER;
   11377 
   11378 	/* Check PCS */
   11379 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11380 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11381 		mii->mii_media_status |= IFM_ACTIVE;
   11382 		sc->sc_tbi_linkup = 1;
   11383 		sc->sc_tbi_serdes_ticks = 0;
   11384 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11385 		if ((reg & PCS_LSTS_FDX) != 0)
   11386 			mii->mii_media_active |= IFM_FDX;
   11387 		else
   11388 			mii->mii_media_active |= IFM_HDX;
   11389 	} else {
   11390 		mii->mii_media_status |= IFM_NONE;
   11391 		sc->sc_tbi_linkup = 0;
   11392 		/* If the timer expired, retry autonegotiation */
   11393 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11394 		    && (++sc->sc_tbi_serdes_ticks
   11395 			>= sc->sc_tbi_serdes_anegticks)) {
   11396 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11397 			sc->sc_tbi_serdes_ticks = 0;
   11398 			/* XXX */
   11399 			wm_serdes_mediachange(ifp);
   11400 		}
   11401 	}
   11402 
   11403 	wm_tbi_serdes_set_linkled(sc);
   11404 }
   11405 
   11406 /* SFP related */
   11407 
   11408 static int
   11409 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11410 {
   11411 	uint32_t i2ccmd;
   11412 	int i;
   11413 
   11414 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11415 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11416 
   11417 	/* Poll the ready bit */
   11418 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11419 		delay(50);
   11420 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11421 		if (i2ccmd & I2CCMD_READY)
   11422 			break;
   11423 	}
   11424 	if ((i2ccmd & I2CCMD_READY) == 0)
   11425 		return -1;
   11426 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11427 		return -1;
   11428 
   11429 	*data = i2ccmd & 0x00ff;
   11430 
   11431 	return 0;
   11432 }
   11433 
   11434 static uint32_t
   11435 wm_sfp_get_media_type(struct wm_softc *sc)
   11436 {
   11437 	uint32_t ctrl_ext;
   11438 	uint8_t val = 0;
   11439 	int timeout = 3;
   11440 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11441 	int rv = -1;
   11442 
   11443 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11444 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11445 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11446 	CSR_WRITE_FLUSH(sc);
   11447 
   11448 	/* Read SFP module data */
   11449 	while (timeout) {
   11450 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11451 		if (rv == 0)
   11452 			break;
   11453 		delay(100*1000); /* XXX too big */
   11454 		timeout--;
   11455 	}
   11456 	if (rv != 0)
   11457 		goto out;
   11458 	switch (val) {
   11459 	case SFF_SFP_ID_SFF:
   11460 		aprint_normal_dev(sc->sc_dev,
   11461 		    "Module/Connector soldered to board\n");
   11462 		break;
   11463 	case SFF_SFP_ID_SFP:
   11464 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11465 		break;
   11466 	case SFF_SFP_ID_UNKNOWN:
   11467 		goto out;
   11468 	default:
   11469 		break;
   11470 	}
   11471 
   11472 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11473 	if (rv != 0) {
   11474 		goto out;
   11475 	}
   11476 
   11477 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11478 		mediatype = WM_MEDIATYPE_SERDES;
   11479 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11480 		sc->sc_flags |= WM_F_SGMII;
   11481 		mediatype = WM_MEDIATYPE_COPPER;
   11482 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11483 		sc->sc_flags |= WM_F_SGMII;
   11484 		mediatype = WM_MEDIATYPE_SERDES;
   11485 	}
   11486 
   11487 out:
   11488 	/* Restore I2C interface setting */
   11489 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11490 
   11491 	return mediatype;
   11492 }
   11493 
   11494 /*
   11495  * NVM related.
   11496  * Microwire, SPI (w/wo EERD) and Flash.
   11497  */
   11498 
   11499 /* Both spi and uwire */
   11500 
   11501 /*
   11502  * wm_eeprom_sendbits:
   11503  *
   11504  *	Send a series of bits to the EEPROM.
   11505  */
   11506 static void
   11507 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11508 {
   11509 	uint32_t reg;
   11510 	int x;
   11511 
   11512 	reg = CSR_READ(sc, WMREG_EECD);
   11513 
   11514 	for (x = nbits; x > 0; x--) {
   11515 		if (bits & (1U << (x - 1)))
   11516 			reg |= EECD_DI;
   11517 		else
   11518 			reg &= ~EECD_DI;
   11519 		CSR_WRITE(sc, WMREG_EECD, reg);
   11520 		CSR_WRITE_FLUSH(sc);
   11521 		delay(2);
   11522 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11523 		CSR_WRITE_FLUSH(sc);
   11524 		delay(2);
   11525 		CSR_WRITE(sc, WMREG_EECD, reg);
   11526 		CSR_WRITE_FLUSH(sc);
   11527 		delay(2);
   11528 	}
   11529 }
   11530 
   11531 /*
   11532  * wm_eeprom_recvbits:
   11533  *
   11534  *	Receive a series of bits from the EEPROM.
   11535  */
   11536 static void
   11537 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11538 {
   11539 	uint32_t reg, val;
   11540 	int x;
   11541 
   11542 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11543 
   11544 	val = 0;
   11545 	for (x = nbits; x > 0; x--) {
   11546 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11547 		CSR_WRITE_FLUSH(sc);
   11548 		delay(2);
   11549 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11550 			val |= (1U << (x - 1));
   11551 		CSR_WRITE(sc, WMREG_EECD, reg);
   11552 		CSR_WRITE_FLUSH(sc);
   11553 		delay(2);
   11554 	}
   11555 	*valp = val;
   11556 }
   11557 
   11558 /* Microwire */
   11559 
   11560 /*
   11561  * wm_nvm_read_uwire:
   11562  *
   11563  *	Read a word from the EEPROM using the MicroWire protocol.
   11564  */
   11565 static int
   11566 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11567 {
   11568 	uint32_t reg, val;
   11569 	int i;
   11570 
   11571 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11572 		device_xname(sc->sc_dev), __func__));
   11573 
   11574 	if (sc->nvm.acquire(sc) != 0)
   11575 		return -1;
   11576 
   11577 	for (i = 0; i < wordcnt; i++) {
   11578 		/* Clear SK and DI. */
   11579 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11580 		CSR_WRITE(sc, WMREG_EECD, reg);
   11581 
   11582 		/*
   11583 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11584 		 * and Xen.
   11585 		 *
   11586 		 * We use this workaround only for 82540 because qemu's
   11587 		 * e1000 act as 82540.
   11588 		 */
   11589 		if (sc->sc_type == WM_T_82540) {
   11590 			reg |= EECD_SK;
   11591 			CSR_WRITE(sc, WMREG_EECD, reg);
   11592 			reg &= ~EECD_SK;
   11593 			CSR_WRITE(sc, WMREG_EECD, reg);
   11594 			CSR_WRITE_FLUSH(sc);
   11595 			delay(2);
   11596 		}
   11597 		/* XXX: end of workaround */
   11598 
   11599 		/* Set CHIP SELECT. */
   11600 		reg |= EECD_CS;
   11601 		CSR_WRITE(sc, WMREG_EECD, reg);
   11602 		CSR_WRITE_FLUSH(sc);
   11603 		delay(2);
   11604 
   11605 		/* Shift in the READ command. */
   11606 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11607 
   11608 		/* Shift in address. */
   11609 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11610 
   11611 		/* Shift out the data. */
   11612 		wm_eeprom_recvbits(sc, &val, 16);
   11613 		data[i] = val & 0xffff;
   11614 
   11615 		/* Clear CHIP SELECT. */
   11616 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11617 		CSR_WRITE(sc, WMREG_EECD, reg);
   11618 		CSR_WRITE_FLUSH(sc);
   11619 		delay(2);
   11620 	}
   11621 
   11622 	sc->nvm.release(sc);
   11623 	return 0;
   11624 }
   11625 
   11626 /* SPI */
   11627 
   11628 /*
   11629  * Set SPI and FLASH related information from the EECD register.
   11630  * For 82541 and 82547, the word size is taken from EEPROM.
   11631  */
   11632 static int
   11633 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11634 {
   11635 	int size;
   11636 	uint32_t reg;
   11637 	uint16_t data;
   11638 
   11639 	reg = CSR_READ(sc, WMREG_EECD);
   11640 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11641 
   11642 	/* Read the size of NVM from EECD by default */
   11643 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11644 	switch (sc->sc_type) {
   11645 	case WM_T_82541:
   11646 	case WM_T_82541_2:
   11647 	case WM_T_82547:
   11648 	case WM_T_82547_2:
   11649 		/* Set dummy value to access EEPROM */
   11650 		sc->sc_nvm_wordsize = 64;
   11651 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   11652 			aprint_error_dev(sc->sc_dev,
   11653 			    "%s: failed to read EEPROM size\n", __func__);
   11654 		}
   11655 		reg = data;
   11656 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11657 		if (size == 0)
   11658 			size = 6; /* 64 word size */
   11659 		else
   11660 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11661 		break;
   11662 	case WM_T_80003:
   11663 	case WM_T_82571:
   11664 	case WM_T_82572:
   11665 	case WM_T_82573: /* SPI case */
   11666 	case WM_T_82574: /* SPI case */
   11667 	case WM_T_82583: /* SPI case */
   11668 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11669 		if (size > 14)
   11670 			size = 14;
   11671 		break;
   11672 	case WM_T_82575:
   11673 	case WM_T_82576:
   11674 	case WM_T_82580:
   11675 	case WM_T_I350:
   11676 	case WM_T_I354:
   11677 	case WM_T_I210:
   11678 	case WM_T_I211:
   11679 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11680 		if (size > 15)
   11681 			size = 15;
   11682 		break;
   11683 	default:
   11684 		aprint_error_dev(sc->sc_dev,
   11685 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11686 		return -1;
   11687 		break;
   11688 	}
   11689 
   11690 	sc->sc_nvm_wordsize = 1 << size;
   11691 
   11692 	return 0;
   11693 }
   11694 
   11695 /*
   11696  * wm_nvm_ready_spi:
   11697  *
   11698  *	Wait for a SPI EEPROM to be ready for commands.
   11699  */
   11700 static int
   11701 wm_nvm_ready_spi(struct wm_softc *sc)
   11702 {
   11703 	uint32_t val;
   11704 	int usec;
   11705 
   11706 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11707 		device_xname(sc->sc_dev), __func__));
   11708 
   11709 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11710 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11711 		wm_eeprom_recvbits(sc, &val, 8);
   11712 		if ((val & SPI_SR_RDY) == 0)
   11713 			break;
   11714 	}
   11715 	if (usec >= SPI_MAX_RETRIES) {
   11716 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11717 		return -1;
   11718 	}
   11719 	return 0;
   11720 }
   11721 
   11722 /*
   11723  * wm_nvm_read_spi:
   11724  *
   11725  *	Read a work from the EEPROM using the SPI protocol.
   11726  */
   11727 static int
   11728 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11729 {
   11730 	uint32_t reg, val;
   11731 	int i;
   11732 	uint8_t opc;
   11733 	int rv = 0;
   11734 
   11735 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11736 		device_xname(sc->sc_dev), __func__));
   11737 
   11738 	if (sc->nvm.acquire(sc) != 0)
   11739 		return -1;
   11740 
   11741 	/* Clear SK and CS. */
   11742 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11743 	CSR_WRITE(sc, WMREG_EECD, reg);
   11744 	CSR_WRITE_FLUSH(sc);
   11745 	delay(2);
   11746 
   11747 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   11748 		goto out;
   11749 
   11750 	/* Toggle CS to flush commands. */
   11751 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11752 	CSR_WRITE_FLUSH(sc);
   11753 	delay(2);
   11754 	CSR_WRITE(sc, WMREG_EECD, reg);
   11755 	CSR_WRITE_FLUSH(sc);
   11756 	delay(2);
   11757 
   11758 	opc = SPI_OPC_READ;
   11759 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11760 		opc |= SPI_OPC_A8;
   11761 
   11762 	wm_eeprom_sendbits(sc, opc, 8);
   11763 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11764 
   11765 	for (i = 0; i < wordcnt; i++) {
   11766 		wm_eeprom_recvbits(sc, &val, 16);
   11767 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11768 	}
   11769 
   11770 	/* Raise CS and clear SK. */
   11771 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11772 	CSR_WRITE(sc, WMREG_EECD, reg);
   11773 	CSR_WRITE_FLUSH(sc);
   11774 	delay(2);
   11775 
   11776 out:
   11777 	sc->nvm.release(sc);
   11778 	return rv;
   11779 }
   11780 
   11781 /* Using with EERD */
   11782 
   11783 static int
   11784 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11785 {
   11786 	uint32_t attempts = 100000;
   11787 	uint32_t i, reg = 0;
   11788 	int32_t done = -1;
   11789 
   11790 	for (i = 0; i < attempts; i++) {
   11791 		reg = CSR_READ(sc, rw);
   11792 
   11793 		if (reg & EERD_DONE) {
   11794 			done = 0;
   11795 			break;
   11796 		}
   11797 		delay(5);
   11798 	}
   11799 
   11800 	return done;
   11801 }
   11802 
   11803 static int
   11804 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11805     uint16_t *data)
   11806 {
   11807 	int i, eerd = 0;
   11808 	int rv = 0;
   11809 
   11810 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11811 		device_xname(sc->sc_dev), __func__));
   11812 
   11813 	if (sc->nvm.acquire(sc) != 0)
   11814 		return -1;
   11815 
   11816 	for (i = 0; i < wordcnt; i++) {
   11817 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11818 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11819 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11820 		if (rv != 0) {
   11821 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   11822 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   11823 			break;
   11824 		}
   11825 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11826 	}
   11827 
   11828 	sc->nvm.release(sc);
   11829 	return rv;
   11830 }
   11831 
   11832 /* Flash */
   11833 
   11834 static int
   11835 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11836 {
   11837 	uint32_t eecd;
   11838 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11839 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11840 	uint8_t sig_byte = 0;
   11841 
   11842 	switch (sc->sc_type) {
   11843 	case WM_T_PCH_SPT:
   11844 		/*
   11845 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11846 		 * sector valid bits from the NVM.
   11847 		 */
   11848 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11849 		if ((*bank == 0) || (*bank == 1)) {
   11850 			aprint_error_dev(sc->sc_dev,
   11851 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11852 				*bank);
   11853 			return -1;
   11854 		} else {
   11855 			*bank = *bank - 2;
   11856 			return 0;
   11857 		}
   11858 	case WM_T_ICH8:
   11859 	case WM_T_ICH9:
   11860 		eecd = CSR_READ(sc, WMREG_EECD);
   11861 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11862 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11863 			return 0;
   11864 		}
   11865 		/* FALLTHROUGH */
   11866 	default:
   11867 		/* Default to 0 */
   11868 		*bank = 0;
   11869 
   11870 		/* Check bank 0 */
   11871 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11872 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11873 			*bank = 0;
   11874 			return 0;
   11875 		}
   11876 
   11877 		/* Check bank 1 */
   11878 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11879 		    &sig_byte);
   11880 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11881 			*bank = 1;
   11882 			return 0;
   11883 		}
   11884 	}
   11885 
   11886 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11887 		device_xname(sc->sc_dev)));
   11888 	return -1;
   11889 }
   11890 
   11891 /******************************************************************************
   11892  * This function does initial flash setup so that a new read/write/erase cycle
   11893  * can be started.
   11894  *
   11895  * sc - The pointer to the hw structure
   11896  ****************************************************************************/
   11897 static int32_t
   11898 wm_ich8_cycle_init(struct wm_softc *sc)
   11899 {
   11900 	uint16_t hsfsts;
   11901 	int32_t error = 1;
   11902 	int32_t i     = 0;
   11903 
   11904 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11905 
   11906 	/* May be check the Flash Des Valid bit in Hw status */
   11907 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11908 		return error;
   11909 	}
   11910 
   11911 	/* Clear FCERR in Hw status by writing 1 */
   11912 	/* Clear DAEL in Hw status by writing a 1 */
   11913 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11914 
   11915 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11916 
   11917 	/*
   11918 	 * Either we should have a hardware SPI cycle in progress bit to check
   11919 	 * against, in order to start a new cycle or FDONE bit should be
   11920 	 * changed in the hardware so that it is 1 after harware reset, which
   11921 	 * can then be used as an indication whether a cycle is in progress or
   11922 	 * has been completed .. we should also have some software semaphore
   11923 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11924 	 * threads access to those bits can be sequentiallized or a way so that
   11925 	 * 2 threads dont start the cycle at the same time
   11926 	 */
   11927 
   11928 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11929 		/*
   11930 		 * There is no cycle running at present, so we can start a
   11931 		 * cycle
   11932 		 */
   11933 
   11934 		/* Begin by setting Flash Cycle Done. */
   11935 		hsfsts |= HSFSTS_DONE;
   11936 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11937 		error = 0;
   11938 	} else {
   11939 		/*
   11940 		 * otherwise poll for sometime so the current cycle has a
   11941 		 * chance to end before giving up.
   11942 		 */
   11943 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11944 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11945 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11946 				error = 0;
   11947 				break;
   11948 			}
   11949 			delay(1);
   11950 		}
   11951 		if (error == 0) {
   11952 			/*
   11953 			 * Successful in waiting for previous cycle to timeout,
   11954 			 * now set the Flash Cycle Done.
   11955 			 */
   11956 			hsfsts |= HSFSTS_DONE;
   11957 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11958 		}
   11959 	}
   11960 	return error;
   11961 }
   11962 
   11963 /******************************************************************************
   11964  * This function starts a flash cycle and waits for its completion
   11965  *
   11966  * sc - The pointer to the hw structure
   11967  ****************************************************************************/
   11968 static int32_t
   11969 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11970 {
   11971 	uint16_t hsflctl;
   11972 	uint16_t hsfsts;
   11973 	int32_t error = 1;
   11974 	uint32_t i = 0;
   11975 
   11976 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11977 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11978 	hsflctl |= HSFCTL_GO;
   11979 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11980 
   11981 	/* Wait till FDONE bit is set to 1 */
   11982 	do {
   11983 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11984 		if (hsfsts & HSFSTS_DONE)
   11985 			break;
   11986 		delay(1);
   11987 		i++;
   11988 	} while (i < timeout);
   11989 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11990 		error = 0;
   11991 
   11992 	return error;
   11993 }
   11994 
   11995 /******************************************************************************
   11996  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11997  *
   11998  * sc - The pointer to the hw structure
   11999  * index - The index of the byte or word to read.
   12000  * size - Size of data to read, 1=byte 2=word, 4=dword
   12001  * data - Pointer to the word to store the value read.
   12002  *****************************************************************************/
   12003 static int32_t
   12004 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12005     uint32_t size, uint32_t *data)
   12006 {
   12007 	uint16_t hsfsts;
   12008 	uint16_t hsflctl;
   12009 	uint32_t flash_linear_address;
   12010 	uint32_t flash_data = 0;
   12011 	int32_t error = 1;
   12012 	int32_t count = 0;
   12013 
   12014 	if (size < 1  || size > 4 || data == 0x0 ||
   12015 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12016 		return error;
   12017 
   12018 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12019 	    sc->sc_ich8_flash_base;
   12020 
   12021 	do {
   12022 		delay(1);
   12023 		/* Steps */
   12024 		error = wm_ich8_cycle_init(sc);
   12025 		if (error)
   12026 			break;
   12027 
   12028 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12029 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12030 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12031 		    & HSFCTL_BCOUNT_MASK;
   12032 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12033 		if (sc->sc_type == WM_T_PCH_SPT) {
   12034 			/*
   12035 			 * In SPT, This register is in Lan memory space, not
   12036 			 * flash. Therefore, only 32 bit access is supported.
   12037 			 */
   12038 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   12039 			    (uint32_t)hsflctl);
   12040 		} else
   12041 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12042 
   12043 		/*
   12044 		 * Write the last 24 bits of index into Flash Linear address
   12045 		 * field in Flash Address
   12046 		 */
   12047 		/* TODO: TBD maybe check the index against the size of flash */
   12048 
   12049 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12050 
   12051 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12052 
   12053 		/*
   12054 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12055 		 * the whole sequence a few more times, else read in (shift in)
   12056 		 * the Flash Data0, the order is least significant byte first
   12057 		 * msb to lsb
   12058 		 */
   12059 		if (error == 0) {
   12060 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12061 			if (size == 1)
   12062 				*data = (uint8_t)(flash_data & 0x000000FF);
   12063 			else if (size == 2)
   12064 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12065 			else if (size == 4)
   12066 				*data = (uint32_t)flash_data;
   12067 			break;
   12068 		} else {
   12069 			/*
   12070 			 * If we've gotten here, then things are probably
   12071 			 * completely hosed, but if the error condition is
   12072 			 * detected, it won't hurt to give it another try...
   12073 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12074 			 */
   12075 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12076 			if (hsfsts & HSFSTS_ERR) {
   12077 				/* Repeat for some time before giving up. */
   12078 				continue;
   12079 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12080 				break;
   12081 		}
   12082 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12083 
   12084 	return error;
   12085 }
   12086 
   12087 /******************************************************************************
   12088  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12089  *
   12090  * sc - pointer to wm_hw structure
   12091  * index - The index of the byte to read.
   12092  * data - Pointer to a byte to store the value read.
   12093  *****************************************************************************/
   12094 static int32_t
   12095 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12096 {
   12097 	int32_t status;
   12098 	uint32_t word = 0;
   12099 
   12100 	status = wm_read_ich8_data(sc, index, 1, &word);
   12101 	if (status == 0)
   12102 		*data = (uint8_t)word;
   12103 	else
   12104 		*data = 0;
   12105 
   12106 	return status;
   12107 }
   12108 
   12109 /******************************************************************************
   12110  * Reads a word from the NVM using the ICH8 flash access registers.
   12111  *
   12112  * sc - pointer to wm_hw structure
   12113  * index - The starting byte index of the word to read.
   12114  * data - Pointer to a word to store the value read.
   12115  *****************************************************************************/
   12116 static int32_t
   12117 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12118 {
   12119 	int32_t status;
   12120 	uint32_t word = 0;
   12121 
   12122 	status = wm_read_ich8_data(sc, index, 2, &word);
   12123 	if (status == 0)
   12124 		*data = (uint16_t)word;
   12125 	else
   12126 		*data = 0;
   12127 
   12128 	return status;
   12129 }
   12130 
   12131 /******************************************************************************
   12132  * Reads a dword from the NVM using the ICH8 flash access registers.
   12133  *
   12134  * sc - pointer to wm_hw structure
   12135  * index - The starting byte index of the word to read.
   12136  * data - Pointer to a word to store the value read.
   12137  *****************************************************************************/
   12138 static int32_t
   12139 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12140 {
   12141 	int32_t status;
   12142 
   12143 	status = wm_read_ich8_data(sc, index, 4, data);
   12144 	return status;
   12145 }
   12146 
   12147 /******************************************************************************
   12148  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12149  * register.
   12150  *
   12151  * sc - Struct containing variables accessed by shared code
   12152  * offset - offset of word in the EEPROM to read
   12153  * data - word read from the EEPROM
   12154  * words - number of words to read
   12155  *****************************************************************************/
   12156 static int
   12157 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12158 {
   12159 	int32_t  rv = 0;
   12160 	uint32_t flash_bank = 0;
   12161 	uint32_t act_offset = 0;
   12162 	uint32_t bank_offset = 0;
   12163 	uint16_t word = 0;
   12164 	uint16_t i = 0;
   12165 
   12166 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12167 		device_xname(sc->sc_dev), __func__));
   12168 
   12169 	if (sc->nvm.acquire(sc) != 0)
   12170 		return -1;
   12171 
   12172 	/*
   12173 	 * We need to know which is the valid flash bank.  In the event
   12174 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12175 	 * managing flash_bank.  So it cannot be trusted and needs
   12176 	 * to be updated with each read.
   12177 	 */
   12178 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12179 	if (rv) {
   12180 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12181 			device_xname(sc->sc_dev)));
   12182 		flash_bank = 0;
   12183 	}
   12184 
   12185 	/*
   12186 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12187 	 * size
   12188 	 */
   12189 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12190 
   12191 	for (i = 0; i < words; i++) {
   12192 		/* The NVM part needs a byte offset, hence * 2 */
   12193 		act_offset = bank_offset + ((offset + i) * 2);
   12194 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12195 		if (rv) {
   12196 			aprint_error_dev(sc->sc_dev,
   12197 			    "%s: failed to read NVM\n", __func__);
   12198 			break;
   12199 		}
   12200 		data[i] = word;
   12201 	}
   12202 
   12203 	sc->nvm.release(sc);
   12204 	return rv;
   12205 }
   12206 
   12207 /******************************************************************************
   12208  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12209  * register.
   12210  *
   12211  * sc - Struct containing variables accessed by shared code
   12212  * offset - offset of word in the EEPROM to read
   12213  * data - word read from the EEPROM
   12214  * words - number of words to read
   12215  *****************************************************************************/
   12216 static int
   12217 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12218 {
   12219 	int32_t  rv = 0;
   12220 	uint32_t flash_bank = 0;
   12221 	uint32_t act_offset = 0;
   12222 	uint32_t bank_offset = 0;
   12223 	uint32_t dword = 0;
   12224 	uint16_t i = 0;
   12225 
   12226 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12227 		device_xname(sc->sc_dev), __func__));
   12228 
   12229 	if (sc->nvm.acquire(sc) != 0)
   12230 		return -1;
   12231 
   12232 	/*
   12233 	 * We need to know which is the valid flash bank.  In the event
   12234 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12235 	 * managing flash_bank.  So it cannot be trusted and needs
   12236 	 * to be updated with each read.
   12237 	 */
   12238 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12239 	if (rv) {
   12240 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12241 			device_xname(sc->sc_dev)));
   12242 		flash_bank = 0;
   12243 	}
   12244 
   12245 	/*
   12246 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12247 	 * size
   12248 	 */
   12249 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12250 
   12251 	for (i = 0; i < words; i++) {
   12252 		/* The NVM part needs a byte offset, hence * 2 */
   12253 		act_offset = bank_offset + ((offset + i) * 2);
   12254 		/* but we must read dword aligned, so mask ... */
   12255 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12256 		if (rv) {
   12257 			aprint_error_dev(sc->sc_dev,
   12258 			    "%s: failed to read NVM\n", __func__);
   12259 			break;
   12260 		}
   12261 		/* ... and pick out low or high word */
   12262 		if ((act_offset & 0x2) == 0)
   12263 			data[i] = (uint16_t)(dword & 0xFFFF);
   12264 		else
   12265 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12266 	}
   12267 
   12268 	sc->nvm.release(sc);
   12269 	return rv;
   12270 }
   12271 
   12272 /* iNVM */
   12273 
   12274 static int
   12275 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12276 {
   12277 	int32_t  rv = 0;
   12278 	uint32_t invm_dword;
   12279 	uint16_t i;
   12280 	uint8_t record_type, word_address;
   12281 
   12282 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12283 		device_xname(sc->sc_dev), __func__));
   12284 
   12285 	for (i = 0; i < INVM_SIZE; i++) {
   12286 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12287 		/* Get record type */
   12288 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12289 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12290 			break;
   12291 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12292 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12293 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12294 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12295 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12296 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12297 			if (word_address == address) {
   12298 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12299 				rv = 0;
   12300 				break;
   12301 			}
   12302 		}
   12303 	}
   12304 
   12305 	return rv;
   12306 }
   12307 
   12308 static int
   12309 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12310 {
   12311 	int rv = 0;
   12312 	int i;
   12313 
   12314 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12315 		device_xname(sc->sc_dev), __func__));
   12316 
   12317 	if (sc->nvm.acquire(sc) != 0)
   12318 		return -1;
   12319 
   12320 	for (i = 0; i < words; i++) {
   12321 		switch (offset + i) {
   12322 		case NVM_OFF_MACADDR:
   12323 		case NVM_OFF_MACADDR1:
   12324 		case NVM_OFF_MACADDR2:
   12325 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12326 			if (rv != 0) {
   12327 				data[i] = 0xffff;
   12328 				rv = -1;
   12329 			}
   12330 			break;
   12331 		case NVM_OFF_CFG2:
   12332 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12333 			if (rv != 0) {
   12334 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12335 				rv = 0;
   12336 			}
   12337 			break;
   12338 		case NVM_OFF_CFG4:
   12339 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12340 			if (rv != 0) {
   12341 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12342 				rv = 0;
   12343 			}
   12344 			break;
   12345 		case NVM_OFF_LED_1_CFG:
   12346 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12347 			if (rv != 0) {
   12348 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12349 				rv = 0;
   12350 			}
   12351 			break;
   12352 		case NVM_OFF_LED_0_2_CFG:
   12353 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12354 			if (rv != 0) {
   12355 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12356 				rv = 0;
   12357 			}
   12358 			break;
   12359 		case NVM_OFF_ID_LED_SETTINGS:
   12360 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12361 			if (rv != 0) {
   12362 				*data = ID_LED_RESERVED_FFFF;
   12363 				rv = 0;
   12364 			}
   12365 			break;
   12366 		default:
   12367 			DPRINTF(WM_DEBUG_NVM,
   12368 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12369 			*data = NVM_RESERVED_WORD;
   12370 			break;
   12371 		}
   12372 	}
   12373 
   12374 	sc->nvm.release(sc);
   12375 	return rv;
   12376 }
   12377 
   12378 /* Lock, detecting NVM type, validate checksum, version and read */
   12379 
   12380 static int
   12381 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12382 {
   12383 	uint32_t eecd = 0;
   12384 
   12385 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12386 	    || sc->sc_type == WM_T_82583) {
   12387 		eecd = CSR_READ(sc, WMREG_EECD);
   12388 
   12389 		/* Isolate bits 15 & 16 */
   12390 		eecd = ((eecd >> 15) & 0x03);
   12391 
   12392 		/* If both bits are set, device is Flash type */
   12393 		if (eecd == 0x03)
   12394 			return 0;
   12395 	}
   12396 	return 1;
   12397 }
   12398 
   12399 static int
   12400 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12401 {
   12402 	uint32_t eec;
   12403 
   12404 	eec = CSR_READ(sc, WMREG_EEC);
   12405 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12406 		return 1;
   12407 
   12408 	return 0;
   12409 }
   12410 
   12411 /*
   12412  * wm_nvm_validate_checksum
   12413  *
   12414  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12415  */
   12416 static int
   12417 wm_nvm_validate_checksum(struct wm_softc *sc)
   12418 {
   12419 	uint16_t checksum;
   12420 	uint16_t eeprom_data;
   12421 #ifdef WM_DEBUG
   12422 	uint16_t csum_wordaddr, valid_checksum;
   12423 #endif
   12424 	int i;
   12425 
   12426 	checksum = 0;
   12427 
   12428 	/* Don't check for I211 */
   12429 	if (sc->sc_type == WM_T_I211)
   12430 		return 0;
   12431 
   12432 #ifdef WM_DEBUG
   12433 	if (sc->sc_type == WM_T_PCH_LPT) {
   12434 		csum_wordaddr = NVM_OFF_COMPAT;
   12435 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12436 	} else {
   12437 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12438 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12439 	}
   12440 
   12441 	/* Dump EEPROM image for debug */
   12442 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12443 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12444 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12445 		/* XXX PCH_SPT? */
   12446 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12447 		if ((eeprom_data & valid_checksum) == 0) {
   12448 			DPRINTF(WM_DEBUG_NVM,
   12449 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12450 				device_xname(sc->sc_dev), eeprom_data,
   12451 				    valid_checksum));
   12452 		}
   12453 	}
   12454 
   12455 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12456 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12457 		for (i = 0; i < NVM_SIZE; i++) {
   12458 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12459 				printf("XXXX ");
   12460 			else
   12461 				printf("%04hx ", eeprom_data);
   12462 			if (i % 8 == 7)
   12463 				printf("\n");
   12464 		}
   12465 	}
   12466 
   12467 #endif /* WM_DEBUG */
   12468 
   12469 	for (i = 0; i < NVM_SIZE; i++) {
   12470 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12471 			return 1;
   12472 		checksum += eeprom_data;
   12473 	}
   12474 
   12475 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12476 #ifdef WM_DEBUG
   12477 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12478 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12479 #endif
   12480 	}
   12481 
   12482 	return 0;
   12483 }
   12484 
   12485 static void
   12486 wm_nvm_version_invm(struct wm_softc *sc)
   12487 {
   12488 	uint32_t dword;
   12489 
   12490 	/*
   12491 	 * Linux's code to decode version is very strange, so we don't
   12492 	 * obey that algorithm and just use word 61 as the document.
   12493 	 * Perhaps it's not perfect though...
   12494 	 *
   12495 	 * Example:
   12496 	 *
   12497 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12498 	 */
   12499 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12500 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12501 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12502 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12503 }
   12504 
   12505 static void
   12506 wm_nvm_version(struct wm_softc *sc)
   12507 {
   12508 	uint16_t major, minor, build, patch;
   12509 	uint16_t uid0, uid1;
   12510 	uint16_t nvm_data;
   12511 	uint16_t off;
   12512 	bool check_version = false;
   12513 	bool check_optionrom = false;
   12514 	bool have_build = false;
   12515 	bool have_uid = true;
   12516 
   12517 	/*
   12518 	 * Version format:
   12519 	 *
   12520 	 * XYYZ
   12521 	 * X0YZ
   12522 	 * X0YY
   12523 	 *
   12524 	 * Example:
   12525 	 *
   12526 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12527 	 *	82571	0x50a6	5.10.6?
   12528 	 *	82572	0x506a	5.6.10?
   12529 	 *	82572EI	0x5069	5.6.9?
   12530 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12531 	 *		0x2013	2.1.3?
   12532 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12533 	 */
   12534 
   12535 	/*
   12536 	 * XXX
   12537 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   12538 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   12539 	 */
   12540 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   12541 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   12542 		have_uid = false;
   12543 
   12544 	switch (sc->sc_type) {
   12545 	case WM_T_82571:
   12546 	case WM_T_82572:
   12547 	case WM_T_82574:
   12548 	case WM_T_82583:
   12549 		check_version = true;
   12550 		check_optionrom = true;
   12551 		have_build = true;
   12552 		break;
   12553 	case WM_T_82575:
   12554 	case WM_T_82576:
   12555 	case WM_T_82580:
   12556 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12557 			check_version = true;
   12558 		break;
   12559 	case WM_T_I211:
   12560 		wm_nvm_version_invm(sc);
   12561 		have_uid = false;
   12562 		goto printver;
   12563 	case WM_T_I210:
   12564 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12565 			wm_nvm_version_invm(sc);
   12566 			have_uid = false;
   12567 			goto printver;
   12568 		}
   12569 		/* FALLTHROUGH */
   12570 	case WM_T_I350:
   12571 	case WM_T_I354:
   12572 		check_version = true;
   12573 		check_optionrom = true;
   12574 		break;
   12575 	default:
   12576 		return;
   12577 	}
   12578 	if (check_version
   12579 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   12580 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12581 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12582 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12583 			build = nvm_data & NVM_BUILD_MASK;
   12584 			have_build = true;
   12585 		} else
   12586 			minor = nvm_data & 0x00ff;
   12587 
   12588 		/* Decimal */
   12589 		minor = (minor / 16) * 10 + (minor % 16);
   12590 		sc->sc_nvm_ver_major = major;
   12591 		sc->sc_nvm_ver_minor = minor;
   12592 
   12593 printver:
   12594 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12595 		    sc->sc_nvm_ver_minor);
   12596 		if (have_build) {
   12597 			sc->sc_nvm_ver_build = build;
   12598 			aprint_verbose(".%d", build);
   12599 		}
   12600 	}
   12601 
   12602 	/* Assume the Option ROM area is at avove NVM_SIZE */
   12603 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   12604 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   12605 		/* Option ROM Version */
   12606 		if ((off != 0x0000) && (off != 0xffff)) {
   12607 			int rv;
   12608 
   12609 			off += NVM_COMBO_VER_OFF;
   12610 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   12611 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   12612 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   12613 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12614 				/* 16bits */
   12615 				major = uid0 >> 8;
   12616 				build = (uid0 << 8) | (uid1 >> 8);
   12617 				patch = uid1 & 0x00ff;
   12618 				aprint_verbose(", option ROM Version %d.%d.%d",
   12619 				    major, build, patch);
   12620 			}
   12621 		}
   12622 	}
   12623 
   12624 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   12625 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12626 }
   12627 
   12628 /*
   12629  * wm_nvm_read:
   12630  *
   12631  *	Read data from the serial EEPROM.
   12632  */
   12633 static int
   12634 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12635 {
   12636 	int rv;
   12637 
   12638 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12639 		device_xname(sc->sc_dev), __func__));
   12640 
   12641 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12642 		return -1;
   12643 
   12644 	rv = sc->nvm.read(sc, word, wordcnt, data);
   12645 
   12646 	return rv;
   12647 }
   12648 
   12649 /*
   12650  * Hardware semaphores.
   12651  * Very complexed...
   12652  */
   12653 
   12654 static int
   12655 wm_get_null(struct wm_softc *sc)
   12656 {
   12657 
   12658 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12659 		device_xname(sc->sc_dev), __func__));
   12660 	return 0;
   12661 }
   12662 
   12663 static void
   12664 wm_put_null(struct wm_softc *sc)
   12665 {
   12666 
   12667 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12668 		device_xname(sc->sc_dev), __func__));
   12669 	return;
   12670 }
   12671 
   12672 static int
   12673 wm_get_eecd(struct wm_softc *sc)
   12674 {
   12675 	uint32_t reg;
   12676 	int x;
   12677 
   12678 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12679 		device_xname(sc->sc_dev), __func__));
   12680 
   12681 	reg = CSR_READ(sc, WMREG_EECD);
   12682 
   12683 	/* Request EEPROM access. */
   12684 	reg |= EECD_EE_REQ;
   12685 	CSR_WRITE(sc, WMREG_EECD, reg);
   12686 
   12687 	/* ..and wait for it to be granted. */
   12688 	for (x = 0; x < 1000; x++) {
   12689 		reg = CSR_READ(sc, WMREG_EECD);
   12690 		if (reg & EECD_EE_GNT)
   12691 			break;
   12692 		delay(5);
   12693 	}
   12694 	if ((reg & EECD_EE_GNT) == 0) {
   12695 		aprint_error_dev(sc->sc_dev,
   12696 		    "could not acquire EEPROM GNT\n");
   12697 		reg &= ~EECD_EE_REQ;
   12698 		CSR_WRITE(sc, WMREG_EECD, reg);
   12699 		return -1;
   12700 	}
   12701 
   12702 	return 0;
   12703 }
   12704 
   12705 static void
   12706 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   12707 {
   12708 
   12709 	*eecd |= EECD_SK;
   12710 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12711 	CSR_WRITE_FLUSH(sc);
   12712 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12713 		delay(1);
   12714 	else
   12715 		delay(50);
   12716 }
   12717 
   12718 static void
   12719 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   12720 {
   12721 
   12722 	*eecd &= ~EECD_SK;
   12723 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12724 	CSR_WRITE_FLUSH(sc);
   12725 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12726 		delay(1);
   12727 	else
   12728 		delay(50);
   12729 }
   12730 
   12731 static void
   12732 wm_put_eecd(struct wm_softc *sc)
   12733 {
   12734 	uint32_t reg;
   12735 
   12736 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12737 		device_xname(sc->sc_dev), __func__));
   12738 
   12739 	/* Stop nvm */
   12740 	reg = CSR_READ(sc, WMREG_EECD);
   12741 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   12742 		/* Pull CS high */
   12743 		reg |= EECD_CS;
   12744 		wm_nvm_eec_clock_lower(sc, &reg);
   12745 	} else {
   12746 		/* CS on Microwire is active-high */
   12747 		reg &= ~(EECD_CS | EECD_DI);
   12748 		CSR_WRITE(sc, WMREG_EECD, reg);
   12749 		wm_nvm_eec_clock_raise(sc, &reg);
   12750 		wm_nvm_eec_clock_lower(sc, &reg);
   12751 	}
   12752 
   12753 	reg = CSR_READ(sc, WMREG_EECD);
   12754 	reg &= ~EECD_EE_REQ;
   12755 	CSR_WRITE(sc, WMREG_EECD, reg);
   12756 
   12757 	return;
   12758 }
   12759 
   12760 /*
   12761  * Get hardware semaphore.
   12762  * Same as e1000_get_hw_semaphore_generic()
   12763  */
   12764 static int
   12765 wm_get_swsm_semaphore(struct wm_softc *sc)
   12766 {
   12767 	int32_t timeout;
   12768 	uint32_t swsm;
   12769 
   12770 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12771 		device_xname(sc->sc_dev), __func__));
   12772 	KASSERT(sc->sc_nvm_wordsize > 0);
   12773 
   12774 retry:
   12775 	/* Get the SW semaphore. */
   12776 	timeout = sc->sc_nvm_wordsize + 1;
   12777 	while (timeout) {
   12778 		swsm = CSR_READ(sc, WMREG_SWSM);
   12779 
   12780 		if ((swsm & SWSM_SMBI) == 0)
   12781 			break;
   12782 
   12783 		delay(50);
   12784 		timeout--;
   12785 	}
   12786 
   12787 	if (timeout == 0) {
   12788 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   12789 			/*
   12790 			 * In rare circumstances, the SW semaphore may already
   12791 			 * be held unintentionally. Clear the semaphore once
   12792 			 * before giving up.
   12793 			 */
   12794 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   12795 			wm_put_swsm_semaphore(sc);
   12796 			goto retry;
   12797 		}
   12798 		aprint_error_dev(sc->sc_dev,
   12799 		    "could not acquire SWSM SMBI\n");
   12800 		return 1;
   12801 	}
   12802 
   12803 	/* Get the FW semaphore. */
   12804 	timeout = sc->sc_nvm_wordsize + 1;
   12805 	while (timeout) {
   12806 		swsm = CSR_READ(sc, WMREG_SWSM);
   12807 		swsm |= SWSM_SWESMBI;
   12808 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12809 		/* If we managed to set the bit we got the semaphore. */
   12810 		swsm = CSR_READ(sc, WMREG_SWSM);
   12811 		if (swsm & SWSM_SWESMBI)
   12812 			break;
   12813 
   12814 		delay(50);
   12815 		timeout--;
   12816 	}
   12817 
   12818 	if (timeout == 0) {
   12819 		aprint_error_dev(sc->sc_dev,
   12820 		    "could not acquire SWSM SWESMBI\n");
   12821 		/* Release semaphores */
   12822 		wm_put_swsm_semaphore(sc);
   12823 		return 1;
   12824 	}
   12825 	return 0;
   12826 }
   12827 
   12828 /*
   12829  * Put hardware semaphore.
   12830  * Same as e1000_put_hw_semaphore_generic()
   12831  */
   12832 static void
   12833 wm_put_swsm_semaphore(struct wm_softc *sc)
   12834 {
   12835 	uint32_t swsm;
   12836 
   12837 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12838 		device_xname(sc->sc_dev), __func__));
   12839 
   12840 	swsm = CSR_READ(sc, WMREG_SWSM);
   12841 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12842 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12843 }
   12844 
   12845 /*
   12846  * Get SW/FW semaphore.
   12847  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   12848  */
   12849 static int
   12850 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12851 {
   12852 	uint32_t swfw_sync;
   12853 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12854 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12855 	int timeout;
   12856 
   12857 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12858 		device_xname(sc->sc_dev), __func__));
   12859 
   12860 	if (sc->sc_type == WM_T_80003)
   12861 		timeout = 50;
   12862 	else
   12863 		timeout = 200;
   12864 
   12865 	for (timeout = 0; timeout < 200; timeout++) {
   12866 		if (wm_get_swsm_semaphore(sc)) {
   12867 			aprint_error_dev(sc->sc_dev,
   12868 			    "%s: failed to get semaphore\n",
   12869 			    __func__);
   12870 			return 1;
   12871 		}
   12872 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12873 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12874 			swfw_sync |= swmask;
   12875 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12876 			wm_put_swsm_semaphore(sc);
   12877 			return 0;
   12878 		}
   12879 		wm_put_swsm_semaphore(sc);
   12880 		delay(5000);
   12881 	}
   12882 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12883 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12884 	return 1;
   12885 }
   12886 
   12887 static void
   12888 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12889 {
   12890 	uint32_t swfw_sync;
   12891 
   12892 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12893 		device_xname(sc->sc_dev), __func__));
   12894 
   12895 	while (wm_get_swsm_semaphore(sc) != 0)
   12896 		continue;
   12897 
   12898 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12899 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12900 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12901 
   12902 	wm_put_swsm_semaphore(sc);
   12903 }
   12904 
   12905 static int
   12906 wm_get_nvm_80003(struct wm_softc *sc)
   12907 {
   12908 	int rv;
   12909 
   12910 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12911 		device_xname(sc->sc_dev), __func__));
   12912 
   12913 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   12914 		aprint_error_dev(sc->sc_dev,
   12915 		    "%s: failed to get semaphore(SWFW)\n",
   12916 		    __func__);
   12917 		return rv;
   12918 	}
   12919 
   12920 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12921 	    && (rv = wm_get_eecd(sc)) != 0) {
   12922 		aprint_error_dev(sc->sc_dev,
   12923 		    "%s: failed to get semaphore(EECD)\n",
   12924 		    __func__);
   12925 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12926 		return rv;
   12927 	}
   12928 
   12929 	return 0;
   12930 }
   12931 
   12932 static void
   12933 wm_put_nvm_80003(struct wm_softc *sc)
   12934 {
   12935 
   12936 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12937 		device_xname(sc->sc_dev), __func__));
   12938 
   12939 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12940 		wm_put_eecd(sc);
   12941 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12942 }
   12943 
   12944 static int
   12945 wm_get_nvm_82571(struct wm_softc *sc)
   12946 {
   12947 	int rv;
   12948 
   12949 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12950 		device_xname(sc->sc_dev), __func__));
   12951 
   12952 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   12953 		return rv;
   12954 
   12955 	switch (sc->sc_type) {
   12956 	case WM_T_82573:
   12957 		break;
   12958 	default:
   12959 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12960 			rv = wm_get_eecd(sc);
   12961 		break;
   12962 	}
   12963 
   12964 	if (rv != 0) {
   12965 		aprint_error_dev(sc->sc_dev,
   12966 		    "%s: failed to get semaphore\n",
   12967 		    __func__);
   12968 		wm_put_swsm_semaphore(sc);
   12969 	}
   12970 
   12971 	return rv;
   12972 }
   12973 
   12974 static void
   12975 wm_put_nvm_82571(struct wm_softc *sc)
   12976 {
   12977 
   12978 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12979 		device_xname(sc->sc_dev), __func__));
   12980 
   12981 	switch (sc->sc_type) {
   12982 	case WM_T_82573:
   12983 		break;
   12984 	default:
   12985 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12986 			wm_put_eecd(sc);
   12987 		break;
   12988 	}
   12989 
   12990 	wm_put_swsm_semaphore(sc);
   12991 }
   12992 
   12993 static int
   12994 wm_get_phy_82575(struct wm_softc *sc)
   12995 {
   12996 
   12997 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12998 		device_xname(sc->sc_dev), __func__));
   12999 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13000 }
   13001 
   13002 static void
   13003 wm_put_phy_82575(struct wm_softc *sc)
   13004 {
   13005 
   13006 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13007 		device_xname(sc->sc_dev), __func__));
   13008 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13009 }
   13010 
   13011 static int
   13012 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13013 {
   13014 	uint32_t ext_ctrl;
   13015 	int timeout = 200;
   13016 
   13017 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13018 		device_xname(sc->sc_dev), __func__));
   13019 
   13020 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13021 	for (timeout = 0; timeout < 200; timeout++) {
   13022 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13023 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13024 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13025 
   13026 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13027 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13028 			return 0;
   13029 		delay(5000);
   13030 	}
   13031 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13032 	    device_xname(sc->sc_dev), ext_ctrl);
   13033 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13034 	return 1;
   13035 }
   13036 
   13037 static void
   13038 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13039 {
   13040 	uint32_t ext_ctrl;
   13041 
   13042 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13043 		device_xname(sc->sc_dev), __func__));
   13044 
   13045 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13046 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13047 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13048 
   13049 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13050 }
   13051 
   13052 static int
   13053 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13054 {
   13055 	uint32_t ext_ctrl;
   13056 	int timeout;
   13057 
   13058 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13059 		device_xname(sc->sc_dev), __func__));
   13060 	mutex_enter(sc->sc_ich_phymtx);
   13061 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13062 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13063 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13064 			break;
   13065 		delay(1000);
   13066 	}
   13067 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13068 		printf("%s: SW has already locked the resource\n",
   13069 		    device_xname(sc->sc_dev));
   13070 		goto out;
   13071 	}
   13072 
   13073 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13074 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13075 	for (timeout = 0; timeout < 1000; timeout++) {
   13076 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13077 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13078 			break;
   13079 		delay(1000);
   13080 	}
   13081 	if (timeout >= 1000) {
   13082 		printf("%s: failed to acquire semaphore\n",
   13083 		    device_xname(sc->sc_dev));
   13084 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13085 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13086 		goto out;
   13087 	}
   13088 	return 0;
   13089 
   13090 out:
   13091 	mutex_exit(sc->sc_ich_phymtx);
   13092 	return 1;
   13093 }
   13094 
   13095 static void
   13096 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13097 {
   13098 	uint32_t ext_ctrl;
   13099 
   13100 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13101 		device_xname(sc->sc_dev), __func__));
   13102 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13103 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13104 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13105 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13106 	} else {
   13107 		printf("%s: Semaphore unexpectedly released\n",
   13108 		    device_xname(sc->sc_dev));
   13109 	}
   13110 
   13111 	mutex_exit(sc->sc_ich_phymtx);
   13112 }
   13113 
   13114 static int
   13115 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13116 {
   13117 
   13118 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13119 		device_xname(sc->sc_dev), __func__));
   13120 	mutex_enter(sc->sc_ich_nvmmtx);
   13121 
   13122 	return 0;
   13123 }
   13124 
   13125 static void
   13126 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13127 {
   13128 
   13129 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13130 		device_xname(sc->sc_dev), __func__));
   13131 	mutex_exit(sc->sc_ich_nvmmtx);
   13132 }
   13133 
   13134 static int
   13135 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13136 {
   13137 	int i = 0;
   13138 	uint32_t reg;
   13139 
   13140 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13141 		device_xname(sc->sc_dev), __func__));
   13142 
   13143 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13144 	do {
   13145 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13146 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13147 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13148 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13149 			break;
   13150 		delay(2*1000);
   13151 		i++;
   13152 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13153 
   13154 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13155 		wm_put_hw_semaphore_82573(sc);
   13156 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13157 		    device_xname(sc->sc_dev));
   13158 		return -1;
   13159 	}
   13160 
   13161 	return 0;
   13162 }
   13163 
   13164 static void
   13165 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13166 {
   13167 	uint32_t reg;
   13168 
   13169 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13170 		device_xname(sc->sc_dev), __func__));
   13171 
   13172 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13173 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13174 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13175 }
   13176 
   13177 /*
   13178  * Management mode and power management related subroutines.
   13179  * BMC, AMT, suspend/resume and EEE.
   13180  */
   13181 
   13182 #ifdef WM_WOL
   13183 static int
   13184 wm_check_mng_mode(struct wm_softc *sc)
   13185 {
   13186 	int rv;
   13187 
   13188 	switch (sc->sc_type) {
   13189 	case WM_T_ICH8:
   13190 	case WM_T_ICH9:
   13191 	case WM_T_ICH10:
   13192 	case WM_T_PCH:
   13193 	case WM_T_PCH2:
   13194 	case WM_T_PCH_LPT:
   13195 	case WM_T_PCH_SPT:
   13196 		rv = wm_check_mng_mode_ich8lan(sc);
   13197 		break;
   13198 	case WM_T_82574:
   13199 	case WM_T_82583:
   13200 		rv = wm_check_mng_mode_82574(sc);
   13201 		break;
   13202 	case WM_T_82571:
   13203 	case WM_T_82572:
   13204 	case WM_T_82573:
   13205 	case WM_T_80003:
   13206 		rv = wm_check_mng_mode_generic(sc);
   13207 		break;
   13208 	default:
   13209 		/* noting to do */
   13210 		rv = 0;
   13211 		break;
   13212 	}
   13213 
   13214 	return rv;
   13215 }
   13216 
   13217 static int
   13218 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13219 {
   13220 	uint32_t fwsm;
   13221 
   13222 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13223 
   13224 	if (((fwsm & FWSM_FW_VALID) != 0)
   13225 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13226 		return 1;
   13227 
   13228 	return 0;
   13229 }
   13230 
   13231 static int
   13232 wm_check_mng_mode_82574(struct wm_softc *sc)
   13233 {
   13234 	uint16_t data;
   13235 
   13236 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13237 
   13238 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13239 		return 1;
   13240 
   13241 	return 0;
   13242 }
   13243 
   13244 static int
   13245 wm_check_mng_mode_generic(struct wm_softc *sc)
   13246 {
   13247 	uint32_t fwsm;
   13248 
   13249 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13250 
   13251 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13252 		return 1;
   13253 
   13254 	return 0;
   13255 }
   13256 #endif /* WM_WOL */
   13257 
   13258 static int
   13259 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13260 {
   13261 	uint32_t manc, fwsm, factps;
   13262 
   13263 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13264 		return 0;
   13265 
   13266 	manc = CSR_READ(sc, WMREG_MANC);
   13267 
   13268 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13269 		device_xname(sc->sc_dev), manc));
   13270 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13271 		return 0;
   13272 
   13273 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13274 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13275 		factps = CSR_READ(sc, WMREG_FACTPS);
   13276 		if (((factps & FACTPS_MNGCG) == 0)
   13277 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13278 			return 1;
   13279 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13280 		uint16_t data;
   13281 
   13282 		factps = CSR_READ(sc, WMREG_FACTPS);
   13283 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13284 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13285 			device_xname(sc->sc_dev), factps, data));
   13286 		if (((factps & FACTPS_MNGCG) == 0)
   13287 		    && ((data & NVM_CFG2_MNGM_MASK)
   13288 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13289 			return 1;
   13290 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13291 	    && ((manc & MANC_ASF_EN) == 0))
   13292 		return 1;
   13293 
   13294 	return 0;
   13295 }
   13296 
   13297 static bool
   13298 wm_phy_resetisblocked(struct wm_softc *sc)
   13299 {
   13300 	bool blocked = false;
   13301 	uint32_t reg;
   13302 	int i = 0;
   13303 
   13304 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13305 		device_xname(sc->sc_dev), __func__));
   13306 
   13307 	switch (sc->sc_type) {
   13308 	case WM_T_ICH8:
   13309 	case WM_T_ICH9:
   13310 	case WM_T_ICH10:
   13311 	case WM_T_PCH:
   13312 	case WM_T_PCH2:
   13313 	case WM_T_PCH_LPT:
   13314 	case WM_T_PCH_SPT:
   13315 		do {
   13316 			reg = CSR_READ(sc, WMREG_FWSM);
   13317 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13318 				blocked = true;
   13319 				delay(10*1000);
   13320 				continue;
   13321 			}
   13322 			blocked = false;
   13323 		} while (blocked && (i++ < 30));
   13324 		return blocked;
   13325 		break;
   13326 	case WM_T_82571:
   13327 	case WM_T_82572:
   13328 	case WM_T_82573:
   13329 	case WM_T_82574:
   13330 	case WM_T_82583:
   13331 	case WM_T_80003:
   13332 		reg = CSR_READ(sc, WMREG_MANC);
   13333 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13334 			return true;
   13335 		else
   13336 			return false;
   13337 		break;
   13338 	default:
   13339 		/* no problem */
   13340 		break;
   13341 	}
   13342 
   13343 	return false;
   13344 }
   13345 
   13346 static void
   13347 wm_get_hw_control(struct wm_softc *sc)
   13348 {
   13349 	uint32_t reg;
   13350 
   13351 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13352 		device_xname(sc->sc_dev), __func__));
   13353 
   13354 	if (sc->sc_type == WM_T_82573) {
   13355 		reg = CSR_READ(sc, WMREG_SWSM);
   13356 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13357 	} else if (sc->sc_type >= WM_T_82571) {
   13358 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13359 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13360 	}
   13361 }
   13362 
   13363 static void
   13364 wm_release_hw_control(struct wm_softc *sc)
   13365 {
   13366 	uint32_t reg;
   13367 
   13368 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13369 		device_xname(sc->sc_dev), __func__));
   13370 
   13371 	if (sc->sc_type == WM_T_82573) {
   13372 		reg = CSR_READ(sc, WMREG_SWSM);
   13373 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13374 	} else if (sc->sc_type >= WM_T_82571) {
   13375 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13376 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13377 	}
   13378 }
   13379 
   13380 static void
   13381 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13382 {
   13383 	uint32_t reg;
   13384 
   13385 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13386 		device_xname(sc->sc_dev), __func__));
   13387 
   13388 	if (sc->sc_type < WM_T_PCH2)
   13389 		return;
   13390 
   13391 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13392 
   13393 	if (gate)
   13394 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13395 	else
   13396 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13397 
   13398 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13399 }
   13400 
   13401 static void
   13402 wm_smbustopci(struct wm_softc *sc)
   13403 {
   13404 	uint32_t fwsm, reg;
   13405 	int rv = 0;
   13406 
   13407 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13408 		device_xname(sc->sc_dev), __func__));
   13409 
   13410 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13411 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13412 
   13413 	/* Disable ULP */
   13414 	wm_ulp_disable(sc);
   13415 
   13416 	/* Acquire PHY semaphore */
   13417 	sc->phy.acquire(sc);
   13418 
   13419 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13420 	switch (sc->sc_type) {
   13421 	case WM_T_PCH_LPT:
   13422 	case WM_T_PCH_SPT:
   13423 		if (wm_phy_is_accessible_pchlan(sc))
   13424 			break;
   13425 
   13426 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13427 		reg |= CTRL_EXT_FORCE_SMBUS;
   13428 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13429 #if 0
   13430 		/* XXX Isn't this required??? */
   13431 		CSR_WRITE_FLUSH(sc);
   13432 #endif
   13433 		delay(50 * 1000);
   13434 		/* FALLTHROUGH */
   13435 	case WM_T_PCH2:
   13436 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13437 			break;
   13438 		/* FALLTHROUGH */
   13439 	case WM_T_PCH:
   13440 		if (sc->sc_type == WM_T_PCH)
   13441 			if ((fwsm & FWSM_FW_VALID) != 0)
   13442 				break;
   13443 
   13444 		if (wm_phy_resetisblocked(sc) == true) {
   13445 			printf("XXX reset is blocked(3)\n");
   13446 			break;
   13447 		}
   13448 
   13449 		wm_toggle_lanphypc_pch_lpt(sc);
   13450 
   13451 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13452 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13453 				break;
   13454 
   13455 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13456 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13457 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13458 
   13459 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13460 				break;
   13461 			rv = -1;
   13462 		}
   13463 		break;
   13464 	default:
   13465 		break;
   13466 	}
   13467 
   13468 	/* Release semaphore */
   13469 	sc->phy.release(sc);
   13470 
   13471 	if (rv == 0) {
   13472 		if (wm_phy_resetisblocked(sc)) {
   13473 			printf("XXX reset is blocked(4)\n");
   13474 			goto out;
   13475 		}
   13476 		wm_reset_phy(sc);
   13477 		if (wm_phy_resetisblocked(sc))
   13478 			printf("XXX reset is blocked(4)\n");
   13479 	}
   13480 
   13481 out:
   13482 	/*
   13483 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13484 	 */
   13485 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13486 		delay(10*1000);
   13487 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13488 	}
   13489 }
   13490 
   13491 static void
   13492 wm_init_manageability(struct wm_softc *sc)
   13493 {
   13494 
   13495 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13496 		device_xname(sc->sc_dev), __func__));
   13497 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13498 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13499 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13500 
   13501 		/* Disable hardware interception of ARP */
   13502 		manc &= ~MANC_ARP_EN;
   13503 
   13504 		/* Enable receiving management packets to the host */
   13505 		if (sc->sc_type >= WM_T_82571) {
   13506 			manc |= MANC_EN_MNG2HOST;
   13507 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   13508 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13509 		}
   13510 
   13511 		CSR_WRITE(sc, WMREG_MANC, manc);
   13512 	}
   13513 }
   13514 
   13515 static void
   13516 wm_release_manageability(struct wm_softc *sc)
   13517 {
   13518 
   13519 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13520 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13521 
   13522 		manc |= MANC_ARP_EN;
   13523 		if (sc->sc_type >= WM_T_82571)
   13524 			manc &= ~MANC_EN_MNG2HOST;
   13525 
   13526 		CSR_WRITE(sc, WMREG_MANC, manc);
   13527 	}
   13528 }
   13529 
   13530 static void
   13531 wm_get_wakeup(struct wm_softc *sc)
   13532 {
   13533 
   13534 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13535 	switch (sc->sc_type) {
   13536 	case WM_T_82573:
   13537 	case WM_T_82583:
   13538 		sc->sc_flags |= WM_F_HAS_AMT;
   13539 		/* FALLTHROUGH */
   13540 	case WM_T_80003:
   13541 	case WM_T_82575:
   13542 	case WM_T_82576:
   13543 	case WM_T_82580:
   13544 	case WM_T_I350:
   13545 	case WM_T_I354:
   13546 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13547 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13548 		/* FALLTHROUGH */
   13549 	case WM_T_82541:
   13550 	case WM_T_82541_2:
   13551 	case WM_T_82547:
   13552 	case WM_T_82547_2:
   13553 	case WM_T_82571:
   13554 	case WM_T_82572:
   13555 	case WM_T_82574:
   13556 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13557 		break;
   13558 	case WM_T_ICH8:
   13559 	case WM_T_ICH9:
   13560 	case WM_T_ICH10:
   13561 	case WM_T_PCH:
   13562 	case WM_T_PCH2:
   13563 	case WM_T_PCH_LPT:
   13564 	case WM_T_PCH_SPT:
   13565 		sc->sc_flags |= WM_F_HAS_AMT;
   13566 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13567 		break;
   13568 	default:
   13569 		break;
   13570 	}
   13571 
   13572 	/* 1: HAS_MANAGE */
   13573 	if (wm_enable_mng_pass_thru(sc) != 0)
   13574 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13575 
   13576 	/*
   13577 	 * Note that the WOL flags is set after the resetting of the eeprom
   13578 	 * stuff
   13579 	 */
   13580 }
   13581 
   13582 /*
   13583  * Unconfigure Ultra Low Power mode.
   13584  * Only for I217 and newer (see below).
   13585  */
   13586 static void
   13587 wm_ulp_disable(struct wm_softc *sc)
   13588 {
   13589 	uint32_t reg;
   13590 	int i = 0;
   13591 
   13592 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13593 		device_xname(sc->sc_dev), __func__));
   13594 	/* Exclude old devices */
   13595 	if ((sc->sc_type < WM_T_PCH_LPT)
   13596 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13597 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13598 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13599 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13600 		return;
   13601 
   13602 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13603 		/* Request ME un-configure ULP mode in the PHY */
   13604 		reg = CSR_READ(sc, WMREG_H2ME);
   13605 		reg &= ~H2ME_ULP;
   13606 		reg |= H2ME_ENFORCE_SETTINGS;
   13607 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13608 
   13609 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13610 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13611 			if (i++ == 30) {
   13612 				printf("%s timed out\n", __func__);
   13613 				return;
   13614 			}
   13615 			delay(10 * 1000);
   13616 		}
   13617 		reg = CSR_READ(sc, WMREG_H2ME);
   13618 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13619 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13620 
   13621 		return;
   13622 	}
   13623 
   13624 	/* Acquire semaphore */
   13625 	sc->phy.acquire(sc);
   13626 
   13627 	/* Toggle LANPHYPC */
   13628 	wm_toggle_lanphypc_pch_lpt(sc);
   13629 
   13630 	/* Unforce SMBus mode in PHY */
   13631 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13632 	if (reg == 0x0000 || reg == 0xffff) {
   13633 		uint32_t reg2;
   13634 
   13635 		printf("%s: Force SMBus first.\n", __func__);
   13636 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13637 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13638 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13639 		delay(50 * 1000);
   13640 
   13641 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13642 	}
   13643 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13644 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13645 
   13646 	/* Unforce SMBus mode in MAC */
   13647 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13648 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13649 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13650 
   13651 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13652 	reg |= HV_PM_CTRL_K1_ENA;
   13653 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13654 
   13655 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13656 	reg &= ~(I218_ULP_CONFIG1_IND
   13657 	    | I218_ULP_CONFIG1_STICKY_ULP
   13658 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13659 	    | I218_ULP_CONFIG1_WOL_HOST
   13660 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13661 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13662 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13663 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13664 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13665 	reg |= I218_ULP_CONFIG1_START;
   13666 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13667 
   13668 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13669 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13670 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13671 
   13672 	/* Release semaphore */
   13673 	sc->phy.release(sc);
   13674 	wm_gmii_reset(sc);
   13675 	delay(50 * 1000);
   13676 }
   13677 
   13678 /* WOL in the newer chipset interfaces (pchlan) */
   13679 static void
   13680 wm_enable_phy_wakeup(struct wm_softc *sc)
   13681 {
   13682 #if 0
   13683 	uint16_t preg;
   13684 
   13685 	/* Copy MAC RARs to PHY RARs */
   13686 
   13687 	/* Copy MAC MTA to PHY MTA */
   13688 
   13689 	/* Configure PHY Rx Control register */
   13690 
   13691 	/* Enable PHY wakeup in MAC register */
   13692 
   13693 	/* Configure and enable PHY wakeup in PHY registers */
   13694 
   13695 	/* Activate PHY wakeup */
   13696 
   13697 	/* XXX */
   13698 #endif
   13699 }
   13700 
   13701 /* Power down workaround on D3 */
   13702 static void
   13703 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13704 {
   13705 	uint32_t reg;
   13706 	int i;
   13707 
   13708 	for (i = 0; i < 2; i++) {
   13709 		/* Disable link */
   13710 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13711 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13712 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13713 
   13714 		/*
   13715 		 * Call gig speed drop workaround on Gig disable before
   13716 		 * accessing any PHY registers
   13717 		 */
   13718 		if (sc->sc_type == WM_T_ICH8)
   13719 			wm_gig_downshift_workaround_ich8lan(sc);
   13720 
   13721 		/* Write VR power-down enable */
   13722 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13723 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13724 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13725 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13726 
   13727 		/* Read it back and test */
   13728 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13729 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13730 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13731 			break;
   13732 
   13733 		/* Issue PHY reset and repeat at most one more time */
   13734 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13735 	}
   13736 }
   13737 
   13738 static void
   13739 wm_enable_wakeup(struct wm_softc *sc)
   13740 {
   13741 	uint32_t reg, pmreg;
   13742 	pcireg_t pmode;
   13743 
   13744 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13745 		device_xname(sc->sc_dev), __func__));
   13746 
   13747 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13748 		&pmreg, NULL) == 0)
   13749 		return;
   13750 
   13751 	/* Advertise the wakeup capability */
   13752 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13753 	    | CTRL_SWDPIN(3));
   13754 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13755 
   13756 	/* ICH workaround */
   13757 	switch (sc->sc_type) {
   13758 	case WM_T_ICH8:
   13759 	case WM_T_ICH9:
   13760 	case WM_T_ICH10:
   13761 	case WM_T_PCH:
   13762 	case WM_T_PCH2:
   13763 	case WM_T_PCH_LPT:
   13764 	case WM_T_PCH_SPT:
   13765 		/* Disable gig during WOL */
   13766 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13767 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13768 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13769 		if (sc->sc_type == WM_T_PCH)
   13770 			wm_gmii_reset(sc);
   13771 
   13772 		/* Power down workaround */
   13773 		if (sc->sc_phytype == WMPHY_82577) {
   13774 			struct mii_softc *child;
   13775 
   13776 			/* Assume that the PHY is copper */
   13777 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13778 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13779 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13780 				    (768 << 5) | 25, 0x0444); /* magic num */
   13781 		}
   13782 		break;
   13783 	default:
   13784 		break;
   13785 	}
   13786 
   13787 	/* Keep the laser running on fiber adapters */
   13788 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13789 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13790 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13791 		reg |= CTRL_EXT_SWDPIN(3);
   13792 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13793 	}
   13794 
   13795 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13796 #if 0	/* for the multicast packet */
   13797 	reg |= WUFC_MC;
   13798 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13799 #endif
   13800 
   13801 	if (sc->sc_type >= WM_T_PCH)
   13802 		wm_enable_phy_wakeup(sc);
   13803 	else {
   13804 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13805 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13806 	}
   13807 
   13808 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13809 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13810 		|| (sc->sc_type == WM_T_PCH2))
   13811 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13812 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13813 
   13814 	/* Request PME */
   13815 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13816 #if 0
   13817 	/* Disable WOL */
   13818 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13819 #else
   13820 	/* For WOL */
   13821 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13822 #endif
   13823 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13824 }
   13825 
   13826 /* Disable ASPM L0s and/or L1 for workaround */
   13827 static void
   13828 wm_disable_aspm(struct wm_softc *sc)
   13829 {
   13830 	pcireg_t reg, mask = 0;
   13831 	unsigned const char *str = "";
   13832 
   13833 	/*
   13834 	 *  Only for PCIe device which has PCIe capability in the PCI config
   13835 	 * space.
   13836 	 */
   13837 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   13838 		return;
   13839 
   13840 	switch (sc->sc_type) {
   13841 	case WM_T_82571:
   13842 	case WM_T_82572:
   13843 		/*
   13844 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   13845 		 * State Power management L1 State (ASPM L1).
   13846 		 */
   13847 		mask = PCIE_LCSR_ASPM_L1;
   13848 		str = "L1 is";
   13849 		break;
   13850 	case WM_T_82573:
   13851 	case WM_T_82574:
   13852 	case WM_T_82583:
   13853 		/*
   13854 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   13855 		 *
   13856 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   13857 		 * some chipset.  The document of 82574 and 82583 says that
   13858 		 * disabling L0s with some specific chipset is sufficient,
   13859 		 * but we follow as of the Intel em driver does.
   13860 		 *
   13861 		 * References:
   13862 		 * Errata 8 of the Specification Update of i82573.
   13863 		 * Errata 20 of the Specification Update of i82574.
   13864 		 * Errata 9 of the Specification Update of i82583.
   13865 		 */
   13866 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   13867 		str = "L0s and L1 are";
   13868 		break;
   13869 	default:
   13870 		return;
   13871 	}
   13872 
   13873 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13874 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   13875 	reg &= ~mask;
   13876 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13877 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   13878 
   13879 	/* Print only in wm_attach() */
   13880 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   13881 		aprint_verbose_dev(sc->sc_dev,
   13882 		    "ASPM %s disabled to workaround the errata.\n",
   13883 			str);
   13884 }
   13885 
   13886 /* LPLU */
   13887 
   13888 static void
   13889 wm_lplu_d0_disable(struct wm_softc *sc)
   13890 {
   13891 	struct mii_data *mii = &sc->sc_mii;
   13892 	uint32_t reg;
   13893 
   13894 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13895 		device_xname(sc->sc_dev), __func__));
   13896 
   13897 	if (sc->sc_phytype == WMPHY_IFE)
   13898 		return;
   13899 
   13900 	switch (sc->sc_type) {
   13901 	case WM_T_82571:
   13902 	case WM_T_82572:
   13903 	case WM_T_82573:
   13904 	case WM_T_82575:
   13905 	case WM_T_82576:
   13906 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   13907 		reg &= ~PMR_D0_LPLU;
   13908 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   13909 		break;
   13910 	case WM_T_82580:
   13911 	case WM_T_I350:
   13912 	case WM_T_I210:
   13913 	case WM_T_I211:
   13914 		reg = CSR_READ(sc, WMREG_PHPM);
   13915 		reg &= ~PHPM_D0A_LPLU;
   13916 		CSR_WRITE(sc, WMREG_PHPM, reg);
   13917 		break;
   13918 	case WM_T_82574:
   13919 	case WM_T_82583:
   13920 	case WM_T_ICH8:
   13921 	case WM_T_ICH9:
   13922 	case WM_T_ICH10:
   13923 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13924 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13925 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13926 		CSR_WRITE_FLUSH(sc);
   13927 		break;
   13928 	case WM_T_PCH:
   13929 	case WM_T_PCH2:
   13930 	case WM_T_PCH_LPT:
   13931 	case WM_T_PCH_SPT:
   13932 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13933 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13934 		if (wm_phy_resetisblocked(sc) == false)
   13935 			reg |= HV_OEM_BITS_ANEGNOW;
   13936 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13937 		break;
   13938 	default:
   13939 		break;
   13940 	}
   13941 }
   13942 
   13943 /* EEE */
   13944 
   13945 static void
   13946 wm_set_eee_i350(struct wm_softc *sc)
   13947 {
   13948 	uint32_t ipcnfg, eeer;
   13949 
   13950 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13951 	eeer = CSR_READ(sc, WMREG_EEER);
   13952 
   13953 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13954 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13955 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13956 		    | EEER_LPI_FC);
   13957 	} else {
   13958 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13959 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13960 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13961 		    | EEER_LPI_FC);
   13962 	}
   13963 
   13964 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13965 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13966 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13967 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13968 }
   13969 
   13970 /*
   13971  * Workarounds (mainly PHY related).
   13972  * Basically, PHY's workarounds are in the PHY drivers.
   13973  */
   13974 
   13975 /* Work-around for 82566 Kumeran PCS lock loss */
   13976 static void
   13977 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13978 {
   13979 	struct mii_data *mii = &sc->sc_mii;
   13980 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   13981 	int i;
   13982 	int reg;
   13983 
   13984 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13985 		device_xname(sc->sc_dev), __func__));
   13986 
   13987 	/* If the link is not up, do nothing */
   13988 	if ((status & STATUS_LU) == 0)
   13989 		return;
   13990 
   13991 	/* Nothing to do if the link is other than 1Gbps */
   13992 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   13993 		return;
   13994 
   13995 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13996 	for (i = 0; i < 10; i++) {
   13997 		/* read twice */
   13998 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13999 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14000 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   14001 			goto out;	/* GOOD! */
   14002 
   14003 		/* Reset the PHY */
   14004 		wm_reset_phy(sc);
   14005 		delay(5*1000);
   14006 	}
   14007 
   14008 	/* Disable GigE link negotiation */
   14009 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14010 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14011 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14012 
   14013 	/*
   14014 	 * Call gig speed drop workaround on Gig disable before accessing
   14015 	 * any PHY registers.
   14016 	 */
   14017 	wm_gig_downshift_workaround_ich8lan(sc);
   14018 
   14019 out:
   14020 	return;
   14021 }
   14022 
   14023 /* WOL from S5 stops working */
   14024 static void
   14025 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   14026 {
   14027 	uint16_t kmreg;
   14028 
   14029 	/* Only for igp3 */
   14030 	if (sc->sc_phytype == WMPHY_IGP_3) {
   14031 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   14032 			return;
   14033 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   14034 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   14035 			return;
   14036 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   14037 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   14038 	}
   14039 }
   14040 
   14041 /*
   14042  * Workaround for pch's PHYs
   14043  * XXX should be moved to new PHY driver?
   14044  */
   14045 static void
   14046 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   14047 {
   14048 
   14049 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14050 		device_xname(sc->sc_dev), __func__));
   14051 	KASSERT(sc->sc_type == WM_T_PCH);
   14052 
   14053 	if (sc->sc_phytype == WMPHY_82577)
   14054 		wm_set_mdio_slow_mode_hv(sc);
   14055 
   14056 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   14057 
   14058 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   14059 
   14060 	/* 82578 */
   14061 	if (sc->sc_phytype == WMPHY_82578) {
   14062 		struct mii_softc *child;
   14063 
   14064 		/*
   14065 		 * Return registers to default by doing a soft reset then
   14066 		 * writing 0x3140 to the control register
   14067 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   14068 		 */
   14069 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14070 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   14071 			PHY_RESET(child);
   14072 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   14073 			    0x3140);
   14074 		}
   14075 	}
   14076 
   14077 	/* Select page 0 */
   14078 	sc->phy.acquire(sc);
   14079 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   14080 	sc->phy.release(sc);
   14081 
   14082 	/*
   14083 	 * Configure the K1 Si workaround during phy reset assuming there is
   14084 	 * link so that it disables K1 if link is in 1Gbps.
   14085 	 */
   14086 	wm_k1_gig_workaround_hv(sc, 1);
   14087 }
   14088 
   14089 static void
   14090 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   14091 {
   14092 
   14093 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14094 		device_xname(sc->sc_dev), __func__));
   14095 	KASSERT(sc->sc_type == WM_T_PCH2);
   14096 
   14097 	wm_set_mdio_slow_mode_hv(sc);
   14098 }
   14099 
   14100 static int
   14101 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   14102 {
   14103 	int k1_enable = sc->sc_nvm_k1_enabled;
   14104 
   14105 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14106 		device_xname(sc->sc_dev), __func__));
   14107 
   14108 	if (sc->phy.acquire(sc) != 0)
   14109 		return -1;
   14110 
   14111 	if (link) {
   14112 		k1_enable = 0;
   14113 
   14114 		/* Link stall fix for link up */
   14115 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   14116 	} else {
   14117 		/* Link stall fix for link down */
   14118 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   14119 	}
   14120 
   14121 	wm_configure_k1_ich8lan(sc, k1_enable);
   14122 	sc->phy.release(sc);
   14123 
   14124 	return 0;
   14125 }
   14126 
   14127 static void
   14128 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   14129 {
   14130 	uint32_t reg;
   14131 
   14132 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   14133 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   14134 	    reg | HV_KMRN_MDIO_SLOW);
   14135 }
   14136 
   14137 static void
   14138 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14139 {
   14140 	uint32_t ctrl, ctrl_ext, tmp;
   14141 	uint16_t kmreg;
   14142 	int rv;
   14143 
   14144 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14145 	if (rv != 0)
   14146 		return;
   14147 
   14148 	if (k1_enable)
   14149 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14150 	else
   14151 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14152 
   14153 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14154 	if (rv != 0)
   14155 		return;
   14156 
   14157 	delay(20);
   14158 
   14159 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14160 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14161 
   14162 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14163 	tmp |= CTRL_FRCSPD;
   14164 
   14165 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14166 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14167 	CSR_WRITE_FLUSH(sc);
   14168 	delay(20);
   14169 
   14170 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14171 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14172 	CSR_WRITE_FLUSH(sc);
   14173 	delay(20);
   14174 
   14175 	return;
   14176 }
   14177 
   14178 /* special case - for 82575 - need to do manual init ... */
   14179 static void
   14180 wm_reset_init_script_82575(struct wm_softc *sc)
   14181 {
   14182 	/*
   14183 	 * remark: this is untested code - we have no board without EEPROM
   14184 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14185 	 */
   14186 
   14187 	/* SerDes configuration via SERDESCTRL */
   14188 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14189 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14190 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14191 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14192 
   14193 	/* CCM configuration via CCMCTL register */
   14194 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14195 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14196 
   14197 	/* PCIe lanes configuration */
   14198 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14199 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14200 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14201 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14202 
   14203 	/* PCIe PLL Configuration */
   14204 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14205 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14206 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14207 }
   14208 
   14209 static void
   14210 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14211 {
   14212 	uint32_t reg;
   14213 	uint16_t nvmword;
   14214 	int rv;
   14215 
   14216 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14217 		return;
   14218 
   14219 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14220 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14221 	if (rv != 0) {
   14222 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14223 		    __func__);
   14224 		return;
   14225 	}
   14226 
   14227 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14228 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14229 		reg |= MDICNFG_DEST;
   14230 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14231 		reg |= MDICNFG_COM_MDIO;
   14232 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14233 }
   14234 
   14235 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14236 
   14237 static bool
   14238 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14239 {
   14240 	int i;
   14241 	uint32_t reg;
   14242 	uint16_t id1, id2;
   14243 
   14244 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14245 		device_xname(sc->sc_dev), __func__));
   14246 	id1 = id2 = 0xffff;
   14247 	for (i = 0; i < 2; i++) {
   14248 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   14249 		if (MII_INVALIDID(id1))
   14250 			continue;
   14251 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   14252 		if (MII_INVALIDID(id2))
   14253 			continue;
   14254 		break;
   14255 	}
   14256 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   14257 		goto out;
   14258 	}
   14259 
   14260 	if (sc->sc_type < WM_T_PCH_LPT) {
   14261 		sc->phy.release(sc);
   14262 		wm_set_mdio_slow_mode_hv(sc);
   14263 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14264 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14265 		sc->phy.acquire(sc);
   14266 	}
   14267 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14268 		printf("XXX return with false\n");
   14269 		return false;
   14270 	}
   14271 out:
   14272 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   14273 		/* Only unforce SMBus if ME is not active */
   14274 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14275 			/* Unforce SMBus mode in PHY */
   14276 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14277 			    CV_SMB_CTRL);
   14278 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14279 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14280 			    CV_SMB_CTRL, reg);
   14281 
   14282 			/* Unforce SMBus mode in MAC */
   14283 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14284 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14285 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14286 		}
   14287 	}
   14288 	return true;
   14289 }
   14290 
   14291 static void
   14292 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14293 {
   14294 	uint32_t reg;
   14295 	int i;
   14296 
   14297 	/* Set PHY Config Counter to 50msec */
   14298 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14299 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14300 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14301 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14302 
   14303 	/* Toggle LANPHYPC */
   14304 	reg = CSR_READ(sc, WMREG_CTRL);
   14305 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14306 	reg &= ~CTRL_LANPHYPC_VALUE;
   14307 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14308 	CSR_WRITE_FLUSH(sc);
   14309 	delay(1000);
   14310 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14311 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14312 	CSR_WRITE_FLUSH(sc);
   14313 
   14314 	if (sc->sc_type < WM_T_PCH_LPT)
   14315 		delay(50 * 1000);
   14316 	else {
   14317 		i = 20;
   14318 
   14319 		do {
   14320 			delay(5 * 1000);
   14321 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14322 		    && i--);
   14323 
   14324 		delay(30 * 1000);
   14325 	}
   14326 }
   14327 
   14328 static int
   14329 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14330 {
   14331 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14332 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14333 	uint32_t rxa;
   14334 	uint16_t scale = 0, lat_enc = 0;
   14335 	int32_t obff_hwm = 0;
   14336 	int64_t lat_ns, value;
   14337 
   14338 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14339 		device_xname(sc->sc_dev), __func__));
   14340 
   14341 	if (link) {
   14342 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14343 		uint32_t status;
   14344 		uint16_t speed;
   14345 		pcireg_t preg;
   14346 
   14347 		status = CSR_READ(sc, WMREG_STATUS);
   14348 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14349 		case STATUS_SPEED_10:
   14350 			speed = 10;
   14351 			break;
   14352 		case STATUS_SPEED_100:
   14353 			speed = 100;
   14354 			break;
   14355 		case STATUS_SPEED_1000:
   14356 			speed = 1000;
   14357 			break;
   14358 		default:
   14359 			device_printf(sc->sc_dev, "Unknown speed "
   14360 			    "(status = %08x)\n", status);
   14361 			return -1;
   14362 		}
   14363 
   14364 		/* Rx Packet Buffer Allocation size (KB) */
   14365 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14366 
   14367 		/*
   14368 		 * Determine the maximum latency tolerated by the device.
   14369 		 *
   14370 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14371 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14372 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14373 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14374 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14375 		 */
   14376 		lat_ns = ((int64_t)rxa * 1024 -
   14377 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14378 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14379 		if (lat_ns < 0)
   14380 			lat_ns = 0;
   14381 		else
   14382 			lat_ns /= speed;
   14383 		value = lat_ns;
   14384 
   14385 		while (value > LTRV_VALUE) {
   14386 			scale ++;
   14387 			value = howmany(value, __BIT(5));
   14388 		}
   14389 		if (scale > LTRV_SCALE_MAX) {
   14390 			printf("%s: Invalid LTR latency scale %d\n",
   14391 			    device_xname(sc->sc_dev), scale);
   14392 			return -1;
   14393 		}
   14394 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14395 
   14396 		/* Determine the maximum latency tolerated by the platform */
   14397 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14398 		    WM_PCI_LTR_CAP_LPT);
   14399 		max_snoop = preg & 0xffff;
   14400 		max_nosnoop = preg >> 16;
   14401 
   14402 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14403 
   14404 		if (lat_enc > max_ltr_enc) {
   14405 			lat_enc = max_ltr_enc;
   14406 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14407 			    * PCI_LTR_SCALETONS(
   14408 				    __SHIFTOUT(lat_enc,
   14409 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14410 		}
   14411 
   14412 		if (lat_ns) {
   14413 			lat_ns *= speed * 1000;
   14414 			lat_ns /= 8;
   14415 			lat_ns /= 1000000000;
   14416 			obff_hwm = (int32_t)(rxa - lat_ns);
   14417 		}
   14418 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14419 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14420 			    "(rxa = %d, lat_ns = %d)\n",
   14421 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14422 			return -1;
   14423 		}
   14424 	}
   14425 	/* Snoop and No-Snoop latencies the same */
   14426 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14427 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14428 
   14429 	/* Set OBFF high water mark */
   14430 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14431 	reg |= obff_hwm;
   14432 	CSR_WRITE(sc, WMREG_SVT, reg);
   14433 
   14434 	/* Enable OBFF */
   14435 	reg = CSR_READ(sc, WMREG_SVCR);
   14436 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14437 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14438 
   14439 	return 0;
   14440 }
   14441 
   14442 /*
   14443  * I210 Errata 25 and I211 Errata 10
   14444  * Slow System Clock.
   14445  */
   14446 static void
   14447 wm_pll_workaround_i210(struct wm_softc *sc)
   14448 {
   14449 	uint32_t mdicnfg, wuc;
   14450 	uint32_t reg;
   14451 	pcireg_t pcireg;
   14452 	uint32_t pmreg;
   14453 	uint16_t nvmword, tmp_nvmword;
   14454 	int phyval;
   14455 	bool wa_done = false;
   14456 	int i;
   14457 
   14458 	/* Save WUC and MDICNFG registers */
   14459 	wuc = CSR_READ(sc, WMREG_WUC);
   14460 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14461 
   14462 	reg = mdicnfg & ~MDICNFG_DEST;
   14463 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14464 
   14465 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14466 		nvmword = INVM_DEFAULT_AL;
   14467 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14468 
   14469 	/* Get Power Management cap offset */
   14470 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14471 		&pmreg, NULL) == 0)
   14472 		return;
   14473 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14474 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14475 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14476 
   14477 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14478 			break; /* OK */
   14479 		}
   14480 
   14481 		wa_done = true;
   14482 		/* Directly reset the internal PHY */
   14483 		reg = CSR_READ(sc, WMREG_CTRL);
   14484 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14485 
   14486 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14487 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14488 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14489 
   14490 		CSR_WRITE(sc, WMREG_WUC, 0);
   14491 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14492 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14493 
   14494 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14495 		    pmreg + PCI_PMCSR);
   14496 		pcireg |= PCI_PMCSR_STATE_D3;
   14497 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14498 		    pmreg + PCI_PMCSR, pcireg);
   14499 		delay(1000);
   14500 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14501 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14502 		    pmreg + PCI_PMCSR, pcireg);
   14503 
   14504 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14505 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14506 
   14507 		/* Restore WUC register */
   14508 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14509 	}
   14510 
   14511 	/* Restore MDICNFG setting */
   14512 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14513 	if (wa_done)
   14514 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14515 }
   14516 
   14517 static void
   14518 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14519 {
   14520 	uint32_t reg;
   14521 
   14522 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14523 		device_xname(sc->sc_dev), __func__));
   14524 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   14525 
   14526 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14527 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14528 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14529 
   14530 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14531 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14532 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14533 }
   14534