Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.527
      1 /*	$NetBSD: if_wm.c,v 1.527 2017/07/18 08:05:03 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.527 2017/07/18 08:05:03 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    165 #else
    166 #define CALLOUT_FLAGS	0
    167 #endif
    168 
    169 /*
    170  * This device driver's max interrupt numbers.
    171  */
    172 #define WM_MAX_NQUEUEINTR	16
    173 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    174 
    175 #ifndef WM_DISABLE_MSI
    176 #define	WM_DISABLE_MSI 0
    177 #endif
    178 #ifndef WM_DISABLE_MSIX
    179 #define	WM_DISABLE_MSIX 0
    180 #endif
    181 
    182 int wm_disable_msi = WM_DISABLE_MSI;
    183 int wm_disable_msix = WM_DISABLE_MSIX;
    184 
    185 /*
    186  * Transmit descriptor list size.  Due to errata, we can only have
    187  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    188  * on >= 82544.  We tell the upper layers that they can queue a lot
    189  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    190  * of them at a time.
    191  *
    192  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    193  * chains containing many small mbufs have been observed in zero-copy
    194  * situations with jumbo frames.
    195  */
    196 #define	WM_NTXSEGS		256
    197 #define	WM_IFQUEUELEN		256
    198 #define	WM_TXQUEUELEN_MAX	64
    199 #define	WM_TXQUEUELEN_MAX_82547	16
    200 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    201 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    202 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    203 #define	WM_NTXDESC_82542	256
    204 #define	WM_NTXDESC_82544	4096
    205 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    206 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    207 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    208 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    209 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    210 
    211 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    212 
    213 #define	WM_TXINTERQSIZE		256
    214 
    215 /*
    216  * Receive descriptor list size.  We have one Rx buffer for normal
    217  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    218  * packet.  We allocate 256 receive descriptors, each with a 2k
    219  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    220  */
    221 #define	WM_NRXDESC		256
    222 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    223 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    224 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    225 
    226 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    227 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    228 #endif
    229 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    230 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    231 #endif
    232 
    233 typedef union txdescs {
    234 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    235 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    236 } txdescs_t;
    237 
    238 typedef union rxdescs {
    239 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    240 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    241 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    242 } rxdescs_t;
    243 
    244 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    245 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    246 
    247 /*
    248  * Software state for transmit jobs.
    249  */
    250 struct wm_txsoft {
    251 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    252 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    253 	int txs_firstdesc;		/* first descriptor in packet */
    254 	int txs_lastdesc;		/* last descriptor in packet */
    255 	int txs_ndesc;			/* # of descriptors used */
    256 };
    257 
    258 /*
    259  * Software state for receive buffers.  Each descriptor gets a
    260  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    261  * more than one buffer, we chain them together.
    262  */
    263 struct wm_rxsoft {
    264 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    265 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    266 };
    267 
    268 #define WM_LINKUP_TIMEOUT	50
    269 
    270 static uint16_t swfwphysem[] = {
    271 	SWFW_PHY0_SM,
    272 	SWFW_PHY1_SM,
    273 	SWFW_PHY2_SM,
    274 	SWFW_PHY3_SM
    275 };
    276 
    277 static const uint32_t wm_82580_rxpbs_table[] = {
    278 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    279 };
    280 
    281 struct wm_softc;
    282 
    283 #ifdef WM_EVENT_COUNTERS
    284 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    285 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    286 	struct evcnt qname##_ev_##evname;
    287 
    288 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    289 	do{								\
    290 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    291 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    292 		    "%s%02d%s", #qname, (qnum), #evname);		\
    293 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    294 		    (evtype), NULL, (xname),				\
    295 		    (q)->qname##_##evname##_evcnt_name);		\
    296 	}while(0)
    297 
    298 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    299 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    300 
    301 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    302 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    303 
    304 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    305 	evcnt_detach(&(q)->qname##_ev_##evname);
    306 #endif /* WM_EVENT_COUNTERS */
    307 
    308 struct wm_txqueue {
    309 	kmutex_t *txq_lock;		/* lock for tx operations */
    310 
    311 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    312 
    313 	/* Software state for the transmit descriptors. */
    314 	int txq_num;			/* must be a power of two */
    315 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    316 
    317 	/* TX control data structures. */
    318 	int txq_ndesc;			/* must be a power of two */
    319 	size_t txq_descsize;		/* a tx descriptor size */
    320 	txdescs_t *txq_descs_u;
    321         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    322 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    323 	int txq_desc_rseg;		/* real number of control segment */
    324 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    325 #define	txq_descs	txq_descs_u->sctxu_txdescs
    326 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    327 
    328 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    329 
    330 	int txq_free;			/* number of free Tx descriptors */
    331 	int txq_next;			/* next ready Tx descriptor */
    332 
    333 	int txq_sfree;			/* number of free Tx jobs */
    334 	int txq_snext;			/* next free Tx job */
    335 	int txq_sdirty;			/* dirty Tx jobs */
    336 
    337 	/* These 4 variables are used only on the 82547. */
    338 	int txq_fifo_size;		/* Tx FIFO size */
    339 	int txq_fifo_head;		/* current head of FIFO */
    340 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    341 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    342 
    343 	/*
    344 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    345 	 * CPUs. This queue intermediate them without block.
    346 	 */
    347 	pcq_t *txq_interq;
    348 
    349 	/*
    350 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    351 	 * to manage Tx H/W queue's busy flag.
    352 	 */
    353 	int txq_flags;			/* flags for H/W queue, see below */
    354 #define	WM_TXQ_NO_SPACE	0x1
    355 
    356 	bool txq_stopping;
    357 
    358 	uint32_t txq_packets;		/* for AIM */
    359 	uint32_t txq_bytes;		/* for AIM */
    360 #ifdef WM_EVENT_COUNTERS
    361 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    362 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    363 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    364 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    365 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    366 						/* XXX not used? */
    367 
    368 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    369 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    370 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    371 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    372 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    373 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    374 
    375 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    376 
    377 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    378 
    379 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    380 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    381 #endif /* WM_EVENT_COUNTERS */
    382 };
    383 
    384 struct wm_rxqueue {
    385 	kmutex_t *rxq_lock;		/* lock for rx operations */
    386 
    387 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    388 
    389 	/* Software state for the receive descriptors. */
    390 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    391 
    392 	/* RX control data structures. */
    393 	int rxq_ndesc;			/* must be a power of two */
    394 	size_t rxq_descsize;		/* a rx descriptor size */
    395 	rxdescs_t *rxq_descs_u;
    396 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    397 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    398 	int rxq_desc_rseg;		/* real number of control segment */
    399 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    400 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    401 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    402 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    403 
    404 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    405 
    406 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    407 	int rxq_discard;
    408 	int rxq_len;
    409 	struct mbuf *rxq_head;
    410 	struct mbuf *rxq_tail;
    411 	struct mbuf **rxq_tailp;
    412 
    413 	bool rxq_stopping;
    414 
    415 	uint32_t rxq_packets;		/* for AIM */
    416 	uint32_t rxq_bytes;		/* for AIM */
    417 #ifdef WM_EVENT_COUNTERS
    418 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    419 
    420 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    421 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    422 #endif
    423 };
    424 
    425 struct wm_queue {
    426 	int wmq_id;			/* index of transmit and receive queues */
    427 	int wmq_intr_idx;		/* index of MSI-X tables */
    428 
    429 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    430 	bool wmq_set_itr;
    431 
    432 	struct wm_txqueue wmq_txq;
    433 	struct wm_rxqueue wmq_rxq;
    434 
    435 	void *wmq_si;
    436 };
    437 
    438 struct wm_phyop {
    439 	int (*acquire)(struct wm_softc *);
    440 	void (*release)(struct wm_softc *);
    441 	int reset_delay_us;
    442 };
    443 
    444 /*
    445  * Software state per device.
    446  */
    447 struct wm_softc {
    448 	device_t sc_dev;		/* generic device information */
    449 	bus_space_tag_t sc_st;		/* bus space tag */
    450 	bus_space_handle_t sc_sh;	/* bus space handle */
    451 	bus_size_t sc_ss;		/* bus space size */
    452 	bus_space_tag_t sc_iot;		/* I/O space tag */
    453 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    454 	bus_size_t sc_ios;		/* I/O space size */
    455 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    456 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    457 	bus_size_t sc_flashs;		/* flash registers space size */
    458 	off_t sc_flashreg_offset;	/*
    459 					 * offset to flash registers from
    460 					 * start of BAR
    461 					 */
    462 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    463 
    464 	struct ethercom sc_ethercom;	/* ethernet common data */
    465 	struct mii_data sc_mii;		/* MII/media information */
    466 
    467 	pci_chipset_tag_t sc_pc;
    468 	pcitag_t sc_pcitag;
    469 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    470 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    471 
    472 	uint16_t sc_pcidevid;		/* PCI device ID */
    473 	wm_chip_type sc_type;		/* MAC type */
    474 	int sc_rev;			/* MAC revision */
    475 	wm_phy_type sc_phytype;		/* PHY type */
    476 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    477 #define	WM_MEDIATYPE_UNKNOWN		0x00
    478 #define	WM_MEDIATYPE_FIBER		0x01
    479 #define	WM_MEDIATYPE_COPPER		0x02
    480 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    481 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    482 	int sc_flags;			/* flags; see below */
    483 	int sc_if_flags;		/* last if_flags */
    484 	int sc_flowflags;		/* 802.3x flow control flags */
    485 	int sc_align_tweak;
    486 
    487 	void *sc_ihs[WM_MAX_NINTR];	/*
    488 					 * interrupt cookie.
    489 					 * - legacy and msi use sc_ihs[0] only
    490 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    491 					 */
    492 	pci_intr_handle_t *sc_intrs;	/*
    493 					 * legacy and msi use sc_intrs[0] only
    494 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    495 					 */
    496 	int sc_nintrs;			/* number of interrupts */
    497 
    498 	int sc_link_intr_idx;		/* index of MSI-X tables */
    499 
    500 	callout_t sc_tick_ch;		/* tick callout */
    501 	bool sc_core_stopping;
    502 
    503 	int sc_nvm_ver_major;
    504 	int sc_nvm_ver_minor;
    505 	int sc_nvm_ver_build;
    506 	int sc_nvm_addrbits;		/* NVM address bits */
    507 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    508 	int sc_ich8_flash_base;
    509 	int sc_ich8_flash_bank_size;
    510 	int sc_nvm_k1_enabled;
    511 
    512 	int sc_nqueues;
    513 	struct wm_queue *sc_queue;
    514 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    515 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    516 
    517 	int sc_affinity_offset;
    518 
    519 #ifdef WM_EVENT_COUNTERS
    520 	/* Event counters. */
    521 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    522 
    523         /* WM_T_82542_2_1 only */
    524 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    525 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    526 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    527 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    528 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    529 #endif /* WM_EVENT_COUNTERS */
    530 
    531 	/* This variable are used only on the 82547. */
    532 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    533 
    534 	uint32_t sc_ctrl;		/* prototype CTRL register */
    535 #if 0
    536 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    537 #endif
    538 	uint32_t sc_icr;		/* prototype interrupt bits */
    539 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    540 	uint32_t sc_tctl;		/* prototype TCTL register */
    541 	uint32_t sc_rctl;		/* prototype RCTL register */
    542 	uint32_t sc_txcw;		/* prototype TXCW register */
    543 	uint32_t sc_tipg;		/* prototype TIPG register */
    544 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    545 	uint32_t sc_pba;		/* prototype PBA register */
    546 
    547 	int sc_tbi_linkup;		/* TBI link status */
    548 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    549 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    550 
    551 	int sc_mchash_type;		/* multicast filter offset */
    552 
    553 	krndsource_t rnd_source;	/* random source */
    554 
    555 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    556 
    557 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    558 	kmutex_t *sc_ich_phymtx;	/*
    559 					 * 82574/82583/ICH/PCH specific PHY
    560 					 * mutex. For 82574/82583, the mutex
    561 					 * is used for both PHY and NVM.
    562 					 */
    563 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    564 
    565 	struct wm_phyop phy;
    566 };
    567 
    568 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    569 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    570 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    571 
    572 #define	WM_RXCHAIN_RESET(rxq)						\
    573 do {									\
    574 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    575 	*(rxq)->rxq_tailp = NULL;					\
    576 	(rxq)->rxq_len = 0;						\
    577 } while (/*CONSTCOND*/0)
    578 
    579 #define	WM_RXCHAIN_LINK(rxq, m)						\
    580 do {									\
    581 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    582 	(rxq)->rxq_tailp = &(m)->m_next;				\
    583 } while (/*CONSTCOND*/0)
    584 
    585 #ifdef WM_EVENT_COUNTERS
    586 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    587 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    588 
    589 #define WM_Q_EVCNT_INCR(qname, evname)			\
    590 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    591 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    592 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    593 #else /* !WM_EVENT_COUNTERS */
    594 #define	WM_EVCNT_INCR(ev)	/* nothing */
    595 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    596 
    597 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    598 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    599 #endif /* !WM_EVENT_COUNTERS */
    600 
    601 #define	CSR_READ(sc, reg)						\
    602 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    603 #define	CSR_WRITE(sc, reg, val)						\
    604 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    605 #define	CSR_WRITE_FLUSH(sc)						\
    606 	(void) CSR_READ((sc), WMREG_STATUS)
    607 
    608 #define ICH8_FLASH_READ32(sc, reg)					\
    609 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    610 	    (reg) + sc->sc_flashreg_offset)
    611 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    612 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    613 	    (reg) + sc->sc_flashreg_offset, (data))
    614 
    615 #define ICH8_FLASH_READ16(sc, reg)					\
    616 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    617 	    (reg) + sc->sc_flashreg_offset)
    618 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    619 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    620 	    (reg) + sc->sc_flashreg_offset, (data))
    621 
    622 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    623 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    624 
    625 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    626 #define	WM_CDTXADDR_HI(txq, x)						\
    627 	(sizeof(bus_addr_t) == 8 ?					\
    628 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    629 
    630 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    631 #define	WM_CDRXADDR_HI(rxq, x)						\
    632 	(sizeof(bus_addr_t) == 8 ?					\
    633 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    634 
    635 /*
    636  * Register read/write functions.
    637  * Other than CSR_{READ|WRITE}().
    638  */
    639 #if 0
    640 static inline uint32_t wm_io_read(struct wm_softc *, int);
    641 #endif
    642 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    643 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    644 	uint32_t, uint32_t);
    645 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    646 
    647 /*
    648  * Descriptor sync/init functions.
    649  */
    650 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    651 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    652 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    653 
    654 /*
    655  * Device driver interface functions and commonly used functions.
    656  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    657  */
    658 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    659 static int	wm_match(device_t, cfdata_t, void *);
    660 static void	wm_attach(device_t, device_t, void *);
    661 static int	wm_detach(device_t, int);
    662 static bool	wm_suspend(device_t, const pmf_qual_t *);
    663 static bool	wm_resume(device_t, const pmf_qual_t *);
    664 static void	wm_watchdog(struct ifnet *);
    665 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    666 static void	wm_tick(void *);
    667 static int	wm_ifflags_cb(struct ethercom *);
    668 static int	wm_ioctl(struct ifnet *, u_long, void *);
    669 /* MAC address related */
    670 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    671 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    672 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    673 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    674 static void	wm_set_filter(struct wm_softc *);
    675 /* Reset and init related */
    676 static void	wm_set_vlan(struct wm_softc *);
    677 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    678 static void	wm_get_auto_rd_done(struct wm_softc *);
    679 static void	wm_lan_init_done(struct wm_softc *);
    680 static void	wm_get_cfg_done(struct wm_softc *);
    681 static void	wm_phy_post_reset(struct wm_softc *);
    682 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    683 static void	wm_initialize_hardware_bits(struct wm_softc *);
    684 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    685 static void	wm_reset_phy(struct wm_softc *);
    686 static void	wm_flush_desc_rings(struct wm_softc *);
    687 static void	wm_reset(struct wm_softc *);
    688 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    689 static void	wm_rxdrain(struct wm_rxqueue *);
    690 static void	wm_rss_getkey(uint8_t *);
    691 static void	wm_init_rss(struct wm_softc *);
    692 static void	wm_adjust_qnum(struct wm_softc *, int);
    693 static inline bool	wm_is_using_msix(struct wm_softc *);
    694 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    695 static int	wm_softint_establish(struct wm_softc *, int, int);
    696 static int	wm_setup_legacy(struct wm_softc *);
    697 static int	wm_setup_msix(struct wm_softc *);
    698 static int	wm_init(struct ifnet *);
    699 static int	wm_init_locked(struct ifnet *);
    700 static void	wm_turnon(struct wm_softc *);
    701 static void	wm_turnoff(struct wm_softc *);
    702 static void	wm_stop(struct ifnet *, int);
    703 static void	wm_stop_locked(struct ifnet *, int);
    704 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    705 static void	wm_82547_txfifo_stall(void *);
    706 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    707 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    708 /* DMA related */
    709 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    710 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    711 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    712 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    713     struct wm_txqueue *);
    714 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    715 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    716 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    717     struct wm_rxqueue *);
    718 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    719 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    720 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    721 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    722 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    723 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    724 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    725     struct wm_txqueue *);
    726 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    727     struct wm_rxqueue *);
    728 static int	wm_alloc_txrx_queues(struct wm_softc *);
    729 static void	wm_free_txrx_queues(struct wm_softc *);
    730 static int	wm_init_txrx_queues(struct wm_softc *);
    731 /* Start */
    732 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    733     struct wm_txsoft *, uint32_t *, uint8_t *);
    734 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    735 static void	wm_start(struct ifnet *);
    736 static void	wm_start_locked(struct ifnet *);
    737 static int	wm_transmit(struct ifnet *, struct mbuf *);
    738 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    739 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    740 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    741     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    742 static void	wm_nq_start(struct ifnet *);
    743 static void	wm_nq_start_locked(struct ifnet *);
    744 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    745 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    746 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    747 static void	wm_deferred_start_locked(struct wm_txqueue *);
    748 static void	wm_handle_queue(void *);
    749 /* Interrupt */
    750 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    751 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    752 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    753 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    754 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    755 static void	wm_linkintr(struct wm_softc *, uint32_t);
    756 static int	wm_intr_legacy(void *);
    757 static inline void	wm_txrxintr_disable(struct wm_queue *);
    758 static inline void	wm_txrxintr_enable(struct wm_queue *);
    759 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    760 static int	wm_txrxintr_msix(void *);
    761 static int	wm_linkintr_msix(void *);
    762 
    763 /*
    764  * Media related.
    765  * GMII, SGMII, TBI, SERDES and SFP.
    766  */
    767 /* Common */
    768 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    769 /* GMII related */
    770 static void	wm_gmii_reset(struct wm_softc *);
    771 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    772 static int	wm_get_phy_id_82575(struct wm_softc *);
    773 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    774 static int	wm_gmii_mediachange(struct ifnet *);
    775 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    776 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    777 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    778 static int	wm_gmii_i82543_readreg(device_t, int, int);
    779 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    780 static int	wm_gmii_mdic_readreg(device_t, int, int);
    781 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    782 static int	wm_gmii_i82544_readreg(device_t, int, int);
    783 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    784 static int	wm_gmii_i80003_readreg(device_t, int, int);
    785 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    786 static int	wm_gmii_bm_readreg(device_t, int, int);
    787 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    788 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    789 static int	wm_gmii_hv_readreg(device_t, int, int);
    790 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    791 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    792 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    793 static int	wm_gmii_82580_readreg(device_t, int, int);
    794 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    795 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    796 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    797 static void	wm_gmii_statchg(struct ifnet *);
    798 /*
    799  * kumeran related (80003, ICH* and PCH*).
    800  * These functions are not for accessing MII registers but for accessing
    801  * kumeran specific registers.
    802  */
    803 static int	wm_kmrn_readreg(struct wm_softc *, int);
    804 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    805 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    806 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    807 /* SGMII */
    808 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    809 static int	wm_sgmii_readreg(device_t, int, int);
    810 static void	wm_sgmii_writereg(device_t, int, int, int);
    811 /* TBI related */
    812 static void	wm_tbi_mediainit(struct wm_softc *);
    813 static int	wm_tbi_mediachange(struct ifnet *);
    814 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    815 static int	wm_check_for_link(struct wm_softc *);
    816 static void	wm_tbi_tick(struct wm_softc *);
    817 /* SERDES related */
    818 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    819 static int	wm_serdes_mediachange(struct ifnet *);
    820 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    821 static void	wm_serdes_tick(struct wm_softc *);
    822 /* SFP related */
    823 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    824 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    825 
    826 /*
    827  * NVM related.
    828  * Microwire, SPI (w/wo EERD) and Flash.
    829  */
    830 /* Misc functions */
    831 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    832 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    833 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    834 /* Microwire */
    835 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    836 /* SPI */
    837 static int	wm_nvm_ready_spi(struct wm_softc *);
    838 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    839 /* Using with EERD */
    840 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    841 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    842 /* Flash */
    843 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    844     unsigned int *);
    845 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    846 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    847 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    848 	uint32_t *);
    849 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    850 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    851 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    852 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    853 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    854 /* iNVM */
    855 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    856 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    857 /* Lock, detecting NVM type, validate checksum and read */
    858 static int	wm_nvm_acquire(struct wm_softc *);
    859 static void	wm_nvm_release(struct wm_softc *);
    860 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    861 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    862 static int	wm_nvm_validate_checksum(struct wm_softc *);
    863 static void	wm_nvm_version_invm(struct wm_softc *);
    864 static void	wm_nvm_version(struct wm_softc *);
    865 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    866 
    867 /*
    868  * Hardware semaphores.
    869  * Very complexed...
    870  */
    871 static int	wm_get_null(struct wm_softc *);
    872 static void	wm_put_null(struct wm_softc *);
    873 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    874 static void	wm_put_swsm_semaphore(struct wm_softc *);
    875 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    876 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    877 static int	wm_get_phy_82575(struct wm_softc *);
    878 static void	wm_put_phy_82575(struct wm_softc *);
    879 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    880 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    881 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    882 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    883 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    884 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    885 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    886 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    887 
    888 /*
    889  * Management mode and power management related subroutines.
    890  * BMC, AMT, suspend/resume and EEE.
    891  */
    892 #if 0
    893 static int	wm_check_mng_mode(struct wm_softc *);
    894 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    895 static int	wm_check_mng_mode_82574(struct wm_softc *);
    896 static int	wm_check_mng_mode_generic(struct wm_softc *);
    897 #endif
    898 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    899 static bool	wm_phy_resetisblocked(struct wm_softc *);
    900 static void	wm_get_hw_control(struct wm_softc *);
    901 static void	wm_release_hw_control(struct wm_softc *);
    902 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    903 static void	wm_smbustopci(struct wm_softc *);
    904 static void	wm_init_manageability(struct wm_softc *);
    905 static void	wm_release_manageability(struct wm_softc *);
    906 static void	wm_get_wakeup(struct wm_softc *);
    907 static void	wm_ulp_disable(struct wm_softc *);
    908 static void	wm_enable_phy_wakeup(struct wm_softc *);
    909 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    910 static void	wm_enable_wakeup(struct wm_softc *);
    911 /* LPLU (Low Power Link Up) */
    912 static void	wm_lplu_d0_disable(struct wm_softc *);
    913 /* EEE */
    914 static void	wm_set_eee_i350(struct wm_softc *);
    915 
    916 /*
    917  * Workarounds (mainly PHY related).
    918  * Basically, PHY's workarounds are in the PHY drivers.
    919  */
    920 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    921 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    922 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    923 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    924 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    925 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    926 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    927 static void	wm_reset_init_script_82575(struct wm_softc *);
    928 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    929 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    930 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    931 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    932 static void	wm_pll_workaround_i210(struct wm_softc *);
    933 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    934 
    935 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    936     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    937 
    938 /*
    939  * Devices supported by this driver.
    940  */
    941 static const struct wm_product {
    942 	pci_vendor_id_t		wmp_vendor;
    943 	pci_product_id_t	wmp_product;
    944 	const char		*wmp_name;
    945 	wm_chip_type		wmp_type;
    946 	uint32_t		wmp_flags;
    947 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    948 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    949 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    950 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    951 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    952 } wm_products[] = {
    953 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    954 	  "Intel i82542 1000BASE-X Ethernet",
    955 	  WM_T_82542_2_1,	WMP_F_FIBER },
    956 
    957 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    958 	  "Intel i82543GC 1000BASE-X Ethernet",
    959 	  WM_T_82543,		WMP_F_FIBER },
    960 
    961 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    962 	  "Intel i82543GC 1000BASE-T Ethernet",
    963 	  WM_T_82543,		WMP_F_COPPER },
    964 
    965 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    966 	  "Intel i82544EI 1000BASE-T Ethernet",
    967 	  WM_T_82544,		WMP_F_COPPER },
    968 
    969 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    970 	  "Intel i82544EI 1000BASE-X Ethernet",
    971 	  WM_T_82544,		WMP_F_FIBER },
    972 
    973 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    974 	  "Intel i82544GC 1000BASE-T Ethernet",
    975 	  WM_T_82544,		WMP_F_COPPER },
    976 
    977 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    978 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    979 	  WM_T_82544,		WMP_F_COPPER },
    980 
    981 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    982 	  "Intel i82540EM 1000BASE-T Ethernet",
    983 	  WM_T_82540,		WMP_F_COPPER },
    984 
    985 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    986 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    987 	  WM_T_82540,		WMP_F_COPPER },
    988 
    989 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    990 	  "Intel i82540EP 1000BASE-T Ethernet",
    991 	  WM_T_82540,		WMP_F_COPPER },
    992 
    993 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    994 	  "Intel i82540EP 1000BASE-T Ethernet",
    995 	  WM_T_82540,		WMP_F_COPPER },
    996 
    997 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    998 	  "Intel i82540EP 1000BASE-T Ethernet",
    999 	  WM_T_82540,		WMP_F_COPPER },
   1000 
   1001 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1002 	  "Intel i82545EM 1000BASE-T Ethernet",
   1003 	  WM_T_82545,		WMP_F_COPPER },
   1004 
   1005 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1006 	  "Intel i82545GM 1000BASE-T Ethernet",
   1007 	  WM_T_82545_3,		WMP_F_COPPER },
   1008 
   1009 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1010 	  "Intel i82545GM 1000BASE-X Ethernet",
   1011 	  WM_T_82545_3,		WMP_F_FIBER },
   1012 
   1013 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1014 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1015 	  WM_T_82545_3,		WMP_F_SERDES },
   1016 
   1017 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1018 	  "Intel i82546EB 1000BASE-T Ethernet",
   1019 	  WM_T_82546,		WMP_F_COPPER },
   1020 
   1021 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1022 	  "Intel i82546EB 1000BASE-T Ethernet",
   1023 	  WM_T_82546,		WMP_F_COPPER },
   1024 
   1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1026 	  "Intel i82545EM 1000BASE-X Ethernet",
   1027 	  WM_T_82545,		WMP_F_FIBER },
   1028 
   1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1030 	  "Intel i82546EB 1000BASE-X Ethernet",
   1031 	  WM_T_82546,		WMP_F_FIBER },
   1032 
   1033 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1034 	  "Intel i82546GB 1000BASE-T Ethernet",
   1035 	  WM_T_82546_3,		WMP_F_COPPER },
   1036 
   1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1038 	  "Intel i82546GB 1000BASE-X Ethernet",
   1039 	  WM_T_82546_3,		WMP_F_FIBER },
   1040 
   1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1042 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1043 	  WM_T_82546_3,		WMP_F_SERDES },
   1044 
   1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1046 	  "i82546GB quad-port Gigabit Ethernet",
   1047 	  WM_T_82546_3,		WMP_F_COPPER },
   1048 
   1049 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1050 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1051 	  WM_T_82546_3,		WMP_F_COPPER },
   1052 
   1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1054 	  "Intel PRO/1000MT (82546GB)",
   1055 	  WM_T_82546_3,		WMP_F_COPPER },
   1056 
   1057 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1058 	  "Intel i82541EI 1000BASE-T Ethernet",
   1059 	  WM_T_82541,		WMP_F_COPPER },
   1060 
   1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1062 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1063 	  WM_T_82541,		WMP_F_COPPER },
   1064 
   1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1066 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1067 	  WM_T_82541,		WMP_F_COPPER },
   1068 
   1069 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1070 	  "Intel i82541ER 1000BASE-T Ethernet",
   1071 	  WM_T_82541_2,		WMP_F_COPPER },
   1072 
   1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1074 	  "Intel i82541GI 1000BASE-T Ethernet",
   1075 	  WM_T_82541_2,		WMP_F_COPPER },
   1076 
   1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1078 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1079 	  WM_T_82541_2,		WMP_F_COPPER },
   1080 
   1081 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1082 	  "Intel i82541PI 1000BASE-T Ethernet",
   1083 	  WM_T_82541_2,		WMP_F_COPPER },
   1084 
   1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1086 	  "Intel i82547EI 1000BASE-T Ethernet",
   1087 	  WM_T_82547,		WMP_F_COPPER },
   1088 
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1090 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1091 	  WM_T_82547,		WMP_F_COPPER },
   1092 
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1094 	  "Intel i82547GI 1000BASE-T Ethernet",
   1095 	  WM_T_82547_2,		WMP_F_COPPER },
   1096 
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1098 	  "Intel PRO/1000 PT (82571EB)",
   1099 	  WM_T_82571,		WMP_F_COPPER },
   1100 
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1102 	  "Intel PRO/1000 PF (82571EB)",
   1103 	  WM_T_82571,		WMP_F_FIBER },
   1104 
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1106 	  "Intel PRO/1000 PB (82571EB)",
   1107 	  WM_T_82571,		WMP_F_SERDES },
   1108 
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1110 	  "Intel PRO/1000 QT (82571EB)",
   1111 	  WM_T_82571,		WMP_F_COPPER },
   1112 
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1114 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1115 	  WM_T_82571,		WMP_F_COPPER, },
   1116 
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1118 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1119 	  WM_T_82571,		WMP_F_COPPER, },
   1120 
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1122 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1123 	  WM_T_82571,		WMP_F_SERDES, },
   1124 
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1126 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1127 	  WM_T_82571,		WMP_F_SERDES, },
   1128 
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1130 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1131 	  WM_T_82571,		WMP_F_FIBER, },
   1132 
   1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1134 	  "Intel i82572EI 1000baseT Ethernet",
   1135 	  WM_T_82572,		WMP_F_COPPER },
   1136 
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1138 	  "Intel i82572EI 1000baseX Ethernet",
   1139 	  WM_T_82572,		WMP_F_FIBER },
   1140 
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1142 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1143 	  WM_T_82572,		WMP_F_SERDES },
   1144 
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1146 	  "Intel i82572EI 1000baseT Ethernet",
   1147 	  WM_T_82572,		WMP_F_COPPER },
   1148 
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1150 	  "Intel i82573E",
   1151 	  WM_T_82573,		WMP_F_COPPER },
   1152 
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1154 	  "Intel i82573E IAMT",
   1155 	  WM_T_82573,		WMP_F_COPPER },
   1156 
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1158 	  "Intel i82573L Gigabit Ethernet",
   1159 	  WM_T_82573,		WMP_F_COPPER },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1162 	  "Intel i82574L",
   1163 	  WM_T_82574,		WMP_F_COPPER },
   1164 
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1166 	  "Intel i82574L",
   1167 	  WM_T_82574,		WMP_F_COPPER },
   1168 
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1170 	  "Intel i82583V",
   1171 	  WM_T_82583,		WMP_F_COPPER },
   1172 
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1174 	  "i80003 dual 1000baseT Ethernet",
   1175 	  WM_T_80003,		WMP_F_COPPER },
   1176 
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1178 	  "i80003 dual 1000baseX Ethernet",
   1179 	  WM_T_80003,		WMP_F_COPPER },
   1180 
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1182 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1183 	  WM_T_80003,		WMP_F_SERDES },
   1184 
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1186 	  "Intel i80003 1000baseT Ethernet",
   1187 	  WM_T_80003,		WMP_F_COPPER },
   1188 
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1190 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1191 	  WM_T_80003,		WMP_F_SERDES },
   1192 
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1194 	  "Intel i82801H (M_AMT) LAN Controller",
   1195 	  WM_T_ICH8,		WMP_F_COPPER },
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1197 	  "Intel i82801H (AMT) LAN Controller",
   1198 	  WM_T_ICH8,		WMP_F_COPPER },
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1200 	  "Intel i82801H LAN Controller",
   1201 	  WM_T_ICH8,		WMP_F_COPPER },
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1203 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1204 	  WM_T_ICH8,		WMP_F_COPPER },
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1206 	  "Intel i82801H (M) LAN Controller",
   1207 	  WM_T_ICH8,		WMP_F_COPPER },
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1209 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1210 	  WM_T_ICH8,		WMP_F_COPPER },
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1212 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1213 	  WM_T_ICH8,		WMP_F_COPPER },
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1215 	  "82567V-3 LAN Controller",
   1216 	  WM_T_ICH8,		WMP_F_COPPER },
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1218 	  "82801I (AMT) LAN Controller",
   1219 	  WM_T_ICH9,		WMP_F_COPPER },
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1221 	  "82801I 10/100 LAN Controller",
   1222 	  WM_T_ICH9,		WMP_F_COPPER },
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1224 	  "82801I (G) 10/100 LAN Controller",
   1225 	  WM_T_ICH9,		WMP_F_COPPER },
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1227 	  "82801I (GT) 10/100 LAN Controller",
   1228 	  WM_T_ICH9,		WMP_F_COPPER },
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1230 	  "82801I (C) LAN Controller",
   1231 	  WM_T_ICH9,		WMP_F_COPPER },
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1233 	  "82801I mobile LAN Controller",
   1234 	  WM_T_ICH9,		WMP_F_COPPER },
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1236 	  "82801I mobile (V) LAN Controller",
   1237 	  WM_T_ICH9,		WMP_F_COPPER },
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1239 	  "82801I mobile (AMT) LAN Controller",
   1240 	  WM_T_ICH9,		WMP_F_COPPER },
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1242 	  "82567LM-4 LAN Controller",
   1243 	  WM_T_ICH9,		WMP_F_COPPER },
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1245 	  "82567LM-2 LAN Controller",
   1246 	  WM_T_ICH10,		WMP_F_COPPER },
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1248 	  "82567LF-2 LAN Controller",
   1249 	  WM_T_ICH10,		WMP_F_COPPER },
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1251 	  "82567LM-3 LAN Controller",
   1252 	  WM_T_ICH10,		WMP_F_COPPER },
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1254 	  "82567LF-3 LAN Controller",
   1255 	  WM_T_ICH10,		WMP_F_COPPER },
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1257 	  "82567V-2 LAN Controller",
   1258 	  WM_T_ICH10,		WMP_F_COPPER },
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1260 	  "82567V-3? LAN Controller",
   1261 	  WM_T_ICH10,		WMP_F_COPPER },
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1263 	  "HANKSVILLE LAN Controller",
   1264 	  WM_T_ICH10,		WMP_F_COPPER },
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1266 	  "PCH LAN (82577LM) Controller",
   1267 	  WM_T_PCH,		WMP_F_COPPER },
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1269 	  "PCH LAN (82577LC) Controller",
   1270 	  WM_T_PCH,		WMP_F_COPPER },
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1272 	  "PCH LAN (82578DM) Controller",
   1273 	  WM_T_PCH,		WMP_F_COPPER },
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1275 	  "PCH LAN (82578DC) Controller",
   1276 	  WM_T_PCH,		WMP_F_COPPER },
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1278 	  "PCH2 LAN (82579LM) Controller",
   1279 	  WM_T_PCH2,		WMP_F_COPPER },
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1281 	  "PCH2 LAN (82579V) Controller",
   1282 	  WM_T_PCH2,		WMP_F_COPPER },
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1284 	  "82575EB dual-1000baseT Ethernet",
   1285 	  WM_T_82575,		WMP_F_COPPER },
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1287 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1288 	  WM_T_82575,		WMP_F_SERDES },
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1290 	  "82575GB quad-1000baseT Ethernet",
   1291 	  WM_T_82575,		WMP_F_COPPER },
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1293 	  "82575GB quad-1000baseT Ethernet (PM)",
   1294 	  WM_T_82575,		WMP_F_COPPER },
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1296 	  "82576 1000BaseT Ethernet",
   1297 	  WM_T_82576,		WMP_F_COPPER },
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1299 	  "82576 1000BaseX Ethernet",
   1300 	  WM_T_82576,		WMP_F_FIBER },
   1301 
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1303 	  "82576 gigabit Ethernet (SERDES)",
   1304 	  WM_T_82576,		WMP_F_SERDES },
   1305 
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1307 	  "82576 quad-1000BaseT Ethernet",
   1308 	  WM_T_82576,		WMP_F_COPPER },
   1309 
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1311 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1312 	  WM_T_82576,		WMP_F_COPPER },
   1313 
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1315 	  "82576 gigabit Ethernet",
   1316 	  WM_T_82576,		WMP_F_COPPER },
   1317 
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1319 	  "82576 gigabit Ethernet (SERDES)",
   1320 	  WM_T_82576,		WMP_F_SERDES },
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1322 	  "82576 quad-gigabit Ethernet (SERDES)",
   1323 	  WM_T_82576,		WMP_F_SERDES },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1326 	  "82580 1000BaseT Ethernet",
   1327 	  WM_T_82580,		WMP_F_COPPER },
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1329 	  "82580 1000BaseX Ethernet",
   1330 	  WM_T_82580,		WMP_F_FIBER },
   1331 
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1333 	  "82580 1000BaseT Ethernet (SERDES)",
   1334 	  WM_T_82580,		WMP_F_SERDES },
   1335 
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1337 	  "82580 gigabit Ethernet (SGMII)",
   1338 	  WM_T_82580,		WMP_F_COPPER },
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1340 	  "82580 dual-1000BaseT Ethernet",
   1341 	  WM_T_82580,		WMP_F_COPPER },
   1342 
   1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1344 	  "82580 quad-1000BaseX Ethernet",
   1345 	  WM_T_82580,		WMP_F_FIBER },
   1346 
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1348 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1349 	  WM_T_82580,		WMP_F_COPPER },
   1350 
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1352 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1353 	  WM_T_82580,		WMP_F_SERDES },
   1354 
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1356 	  "DH89XXCC 1000BASE-KX Ethernet",
   1357 	  WM_T_82580,		WMP_F_SERDES },
   1358 
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1360 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1361 	  WM_T_82580,		WMP_F_SERDES },
   1362 
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1364 	  "I350 Gigabit Network Connection",
   1365 	  WM_T_I350,		WMP_F_COPPER },
   1366 
   1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1368 	  "I350 Gigabit Fiber Network Connection",
   1369 	  WM_T_I350,		WMP_F_FIBER },
   1370 
   1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1372 	  "I350 Gigabit Backplane Connection",
   1373 	  WM_T_I350,		WMP_F_SERDES },
   1374 
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1376 	  "I350 Quad Port Gigabit Ethernet",
   1377 	  WM_T_I350,		WMP_F_SERDES },
   1378 
   1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1380 	  "I350 Gigabit Connection",
   1381 	  WM_T_I350,		WMP_F_COPPER },
   1382 
   1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1384 	  "I354 Gigabit Ethernet (KX)",
   1385 	  WM_T_I354,		WMP_F_SERDES },
   1386 
   1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1388 	  "I354 Gigabit Ethernet (SGMII)",
   1389 	  WM_T_I354,		WMP_F_COPPER },
   1390 
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1392 	  "I354 Gigabit Ethernet (2.5G)",
   1393 	  WM_T_I354,		WMP_F_COPPER },
   1394 
   1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1396 	  "I210-T1 Ethernet Server Adapter",
   1397 	  WM_T_I210,		WMP_F_COPPER },
   1398 
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1400 	  "I210 Ethernet (Copper OEM)",
   1401 	  WM_T_I210,		WMP_F_COPPER },
   1402 
   1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1404 	  "I210 Ethernet (Copper IT)",
   1405 	  WM_T_I210,		WMP_F_COPPER },
   1406 
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1408 	  "I210 Ethernet (FLASH less)",
   1409 	  WM_T_I210,		WMP_F_COPPER },
   1410 
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1412 	  "I210 Gigabit Ethernet (Fiber)",
   1413 	  WM_T_I210,		WMP_F_FIBER },
   1414 
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1416 	  "I210 Gigabit Ethernet (SERDES)",
   1417 	  WM_T_I210,		WMP_F_SERDES },
   1418 
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1420 	  "I210 Gigabit Ethernet (FLASH less)",
   1421 	  WM_T_I210,		WMP_F_SERDES },
   1422 
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1424 	  "I210 Gigabit Ethernet (SGMII)",
   1425 	  WM_T_I210,		WMP_F_COPPER },
   1426 
   1427 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1428 	  "I211 Ethernet (COPPER)",
   1429 	  WM_T_I211,		WMP_F_COPPER },
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1431 	  "I217 V Ethernet Connection",
   1432 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1434 	  "I217 LM Ethernet Connection",
   1435 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1437 	  "I218 V Ethernet Connection",
   1438 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1439 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1440 	  "I218 V Ethernet Connection",
   1441 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1443 	  "I218 V Ethernet Connection",
   1444 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1446 	  "I218 LM Ethernet Connection",
   1447 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1449 	  "I218 LM Ethernet Connection",
   1450 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1451 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1452 	  "I218 LM Ethernet Connection",
   1453 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1454 #if 0
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1456 	  "I219 V Ethernet Connection",
   1457 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1459 	  "I219 V Ethernet Connection",
   1460 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1462 	  "I219 V Ethernet Connection",
   1463 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1465 	  "I219 V Ethernet Connection",
   1466 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1468 	  "I219 LM Ethernet Connection",
   1469 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1471 	  "I219 LM Ethernet Connection",
   1472 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1474 	  "I219 LM Ethernet Connection",
   1475 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1477 	  "I219 LM Ethernet Connection",
   1478 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1480 	  "I219 LM Ethernet Connection",
   1481 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1482 #endif
   1483 	{ 0,			0,
   1484 	  NULL,
   1485 	  0,			0 },
   1486 };
   1487 
   1488 /*
   1489  * Register read/write functions.
   1490  * Other than CSR_{READ|WRITE}().
   1491  */
   1492 
   1493 #if 0 /* Not currently used */
   1494 static inline uint32_t
   1495 wm_io_read(struct wm_softc *sc, int reg)
   1496 {
   1497 
   1498 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1499 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1500 }
   1501 #endif
   1502 
   1503 static inline void
   1504 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1505 {
   1506 
   1507 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1508 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1509 }
   1510 
   1511 static inline void
   1512 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1513     uint32_t data)
   1514 {
   1515 	uint32_t regval;
   1516 	int i;
   1517 
   1518 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1519 
   1520 	CSR_WRITE(sc, reg, regval);
   1521 
   1522 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1523 		delay(5);
   1524 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1525 			break;
   1526 	}
   1527 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1528 		aprint_error("%s: WARNING:"
   1529 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1530 		    device_xname(sc->sc_dev), reg);
   1531 	}
   1532 }
   1533 
   1534 static inline void
   1535 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1536 {
   1537 	wa->wa_low = htole32(v & 0xffffffffU);
   1538 	if (sizeof(bus_addr_t) == 8)
   1539 		wa->wa_high = htole32((uint64_t) v >> 32);
   1540 	else
   1541 		wa->wa_high = 0;
   1542 }
   1543 
   1544 /*
   1545  * Descriptor sync/init functions.
   1546  */
   1547 static inline void
   1548 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1549 {
   1550 	struct wm_softc *sc = txq->txq_sc;
   1551 
   1552 	/* If it will wrap around, sync to the end of the ring. */
   1553 	if ((start + num) > WM_NTXDESC(txq)) {
   1554 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1555 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1556 		    (WM_NTXDESC(txq) - start), ops);
   1557 		num -= (WM_NTXDESC(txq) - start);
   1558 		start = 0;
   1559 	}
   1560 
   1561 	/* Now sync whatever is left. */
   1562 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1563 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1564 }
   1565 
   1566 static inline void
   1567 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1568 {
   1569 	struct wm_softc *sc = rxq->rxq_sc;
   1570 
   1571 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1572 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1573 }
   1574 
   1575 static inline void
   1576 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1577 {
   1578 	struct wm_softc *sc = rxq->rxq_sc;
   1579 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1580 	struct mbuf *m = rxs->rxs_mbuf;
   1581 
   1582 	/*
   1583 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1584 	 * so that the payload after the Ethernet header is aligned
   1585 	 * to a 4-byte boundary.
   1586 
   1587 	 * XXX BRAINDAMAGE ALERT!
   1588 	 * The stupid chip uses the same size for every buffer, which
   1589 	 * is set in the Receive Control register.  We are using the 2K
   1590 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1591 	 * reason, we can't "scoot" packets longer than the standard
   1592 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1593 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1594 	 * the upper layer copy the headers.
   1595 	 */
   1596 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1597 
   1598 	if (sc->sc_type == WM_T_82574) {
   1599 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1600 		rxd->erx_data.erxd_addr =
   1601 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1602 		rxd->erx_data.erxd_dd = 0;
   1603 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1604 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1605 
   1606 		rxd->nqrx_data.nrxd_paddr =
   1607 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1608 		/* Currently, split header is not supported. */
   1609 		rxd->nqrx_data.nrxd_haddr = 0;
   1610 	} else {
   1611 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1612 
   1613 		wm_set_dma_addr(&rxd->wrx_addr,
   1614 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1615 		rxd->wrx_len = 0;
   1616 		rxd->wrx_cksum = 0;
   1617 		rxd->wrx_status = 0;
   1618 		rxd->wrx_errors = 0;
   1619 		rxd->wrx_special = 0;
   1620 	}
   1621 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1622 
   1623 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1624 }
   1625 
   1626 /*
   1627  * Device driver interface functions and commonly used functions.
   1628  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1629  */
   1630 
   1631 /* Lookup supported device table */
   1632 static const struct wm_product *
   1633 wm_lookup(const struct pci_attach_args *pa)
   1634 {
   1635 	const struct wm_product *wmp;
   1636 
   1637 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1638 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1639 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1640 			return wmp;
   1641 	}
   1642 	return NULL;
   1643 }
   1644 
   1645 /* The match function (ca_match) */
   1646 static int
   1647 wm_match(device_t parent, cfdata_t cf, void *aux)
   1648 {
   1649 	struct pci_attach_args *pa = aux;
   1650 
   1651 	if (wm_lookup(pa) != NULL)
   1652 		return 1;
   1653 
   1654 	return 0;
   1655 }
   1656 
   1657 /* The attach function (ca_attach) */
   1658 static void
   1659 wm_attach(device_t parent, device_t self, void *aux)
   1660 {
   1661 	struct wm_softc *sc = device_private(self);
   1662 	struct pci_attach_args *pa = aux;
   1663 	prop_dictionary_t dict;
   1664 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1665 	pci_chipset_tag_t pc = pa->pa_pc;
   1666 	int counts[PCI_INTR_TYPE_SIZE];
   1667 	pci_intr_type_t max_type;
   1668 	const char *eetype, *xname;
   1669 	bus_space_tag_t memt;
   1670 	bus_space_handle_t memh;
   1671 	bus_size_t memsize;
   1672 	int memh_valid;
   1673 	int i, error;
   1674 	const struct wm_product *wmp;
   1675 	prop_data_t ea;
   1676 	prop_number_t pn;
   1677 	uint8_t enaddr[ETHER_ADDR_LEN];
   1678 	char buf[256];
   1679 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1680 	pcireg_t preg, memtype;
   1681 	uint16_t eeprom_data, apme_mask;
   1682 	bool force_clear_smbi;
   1683 	uint32_t link_mode;
   1684 	uint32_t reg;
   1685 
   1686 	sc->sc_dev = self;
   1687 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1688 	sc->sc_core_stopping = false;
   1689 
   1690 	wmp = wm_lookup(pa);
   1691 #ifdef DIAGNOSTIC
   1692 	if (wmp == NULL) {
   1693 		printf("\n");
   1694 		panic("wm_attach: impossible");
   1695 	}
   1696 #endif
   1697 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1698 
   1699 	sc->sc_pc = pa->pa_pc;
   1700 	sc->sc_pcitag = pa->pa_tag;
   1701 
   1702 	if (pci_dma64_available(pa))
   1703 		sc->sc_dmat = pa->pa_dmat64;
   1704 	else
   1705 		sc->sc_dmat = pa->pa_dmat;
   1706 
   1707 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1708 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1709 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1710 
   1711 	sc->sc_type = wmp->wmp_type;
   1712 
   1713 	/* Set default function pointers */
   1714 	sc->phy.acquire = wm_get_null;
   1715 	sc->phy.release = wm_put_null;
   1716 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1717 
   1718 	if (sc->sc_type < WM_T_82543) {
   1719 		if (sc->sc_rev < 2) {
   1720 			aprint_error_dev(sc->sc_dev,
   1721 			    "i82542 must be at least rev. 2\n");
   1722 			return;
   1723 		}
   1724 		if (sc->sc_rev < 3)
   1725 			sc->sc_type = WM_T_82542_2_0;
   1726 	}
   1727 
   1728 	/*
   1729 	 * Disable MSI for Errata:
   1730 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1731 	 *
   1732 	 *  82544: Errata 25
   1733 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1734 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1735 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1736 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1737 	 *
   1738 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1739 	 *
   1740 	 *  82571 & 82572: Errata 63
   1741 	 */
   1742 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1743 	    || (sc->sc_type == WM_T_82572))
   1744 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1745 
   1746 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1747 	    || (sc->sc_type == WM_T_82580)
   1748 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1749 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1750 		sc->sc_flags |= WM_F_NEWQUEUE;
   1751 
   1752 	/* Set device properties (mactype) */
   1753 	dict = device_properties(sc->sc_dev);
   1754 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1755 
   1756 	/*
   1757 	 * Map the device.  All devices support memory-mapped acccess,
   1758 	 * and it is really required for normal operation.
   1759 	 */
   1760 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1761 	switch (memtype) {
   1762 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1763 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1764 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1765 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1766 		break;
   1767 	default:
   1768 		memh_valid = 0;
   1769 		break;
   1770 	}
   1771 
   1772 	if (memh_valid) {
   1773 		sc->sc_st = memt;
   1774 		sc->sc_sh = memh;
   1775 		sc->sc_ss = memsize;
   1776 	} else {
   1777 		aprint_error_dev(sc->sc_dev,
   1778 		    "unable to map device registers\n");
   1779 		return;
   1780 	}
   1781 
   1782 	/*
   1783 	 * In addition, i82544 and later support I/O mapped indirect
   1784 	 * register access.  It is not desirable (nor supported in
   1785 	 * this driver) to use it for normal operation, though it is
   1786 	 * required to work around bugs in some chip versions.
   1787 	 */
   1788 	if (sc->sc_type >= WM_T_82544) {
   1789 		/* First we have to find the I/O BAR. */
   1790 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1791 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1792 			if (memtype == PCI_MAPREG_TYPE_IO)
   1793 				break;
   1794 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1795 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1796 				i += 4;	/* skip high bits, too */
   1797 		}
   1798 		if (i < PCI_MAPREG_END) {
   1799 			/*
   1800 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1801 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1802 			 * It's no problem because newer chips has no this
   1803 			 * bug.
   1804 			 *
   1805 			 * The i8254x doesn't apparently respond when the
   1806 			 * I/O BAR is 0, which looks somewhat like it's not
   1807 			 * been configured.
   1808 			 */
   1809 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1810 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1811 				aprint_error_dev(sc->sc_dev,
   1812 				    "WARNING: I/O BAR at zero.\n");
   1813 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1814 					0, &sc->sc_iot, &sc->sc_ioh,
   1815 					NULL, &sc->sc_ios) == 0) {
   1816 				sc->sc_flags |= WM_F_IOH_VALID;
   1817 			} else {
   1818 				aprint_error_dev(sc->sc_dev,
   1819 				    "WARNING: unable to map I/O space\n");
   1820 			}
   1821 		}
   1822 
   1823 	}
   1824 
   1825 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1826 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1827 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1828 	if (sc->sc_type < WM_T_82542_2_1)
   1829 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1830 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1831 
   1832 	/* power up chip */
   1833 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1834 	    NULL)) && error != EOPNOTSUPP) {
   1835 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1836 		return;
   1837 	}
   1838 
   1839 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1840 
   1841 	/* Allocation settings */
   1842 	max_type = PCI_INTR_TYPE_MSIX;
   1843 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1844 	counts[PCI_INTR_TYPE_MSI] = 1;
   1845 	counts[PCI_INTR_TYPE_INTX] = 1;
   1846 	/* overridden by disable flags */
   1847 	if (wm_disable_msi != 0) {
   1848 		counts[PCI_INTR_TYPE_MSI] = 0;
   1849 		if (wm_disable_msix != 0) {
   1850 			max_type = PCI_INTR_TYPE_INTX;
   1851 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1852 		}
   1853 	} else if (wm_disable_msix != 0) {
   1854 		max_type = PCI_INTR_TYPE_MSI;
   1855 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1856 	}
   1857 
   1858 alloc_retry:
   1859 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1860 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1861 		return;
   1862 	}
   1863 
   1864 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1865 		error = wm_setup_msix(sc);
   1866 		if (error) {
   1867 			pci_intr_release(pc, sc->sc_intrs,
   1868 			    counts[PCI_INTR_TYPE_MSIX]);
   1869 
   1870 			/* Setup for MSI: Disable MSI-X */
   1871 			max_type = PCI_INTR_TYPE_MSI;
   1872 			counts[PCI_INTR_TYPE_MSI] = 1;
   1873 			counts[PCI_INTR_TYPE_INTX] = 1;
   1874 			goto alloc_retry;
   1875 		}
   1876 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1877 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1878 		error = wm_setup_legacy(sc);
   1879 		if (error) {
   1880 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1881 			    counts[PCI_INTR_TYPE_MSI]);
   1882 
   1883 			/* The next try is for INTx: Disable MSI */
   1884 			max_type = PCI_INTR_TYPE_INTX;
   1885 			counts[PCI_INTR_TYPE_INTX] = 1;
   1886 			goto alloc_retry;
   1887 		}
   1888 	} else {
   1889 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1890 		error = wm_setup_legacy(sc);
   1891 		if (error) {
   1892 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1893 			    counts[PCI_INTR_TYPE_INTX]);
   1894 			return;
   1895 		}
   1896 	}
   1897 
   1898 	/*
   1899 	 * Check the function ID (unit number of the chip).
   1900 	 */
   1901 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1902 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1903 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1904 	    || (sc->sc_type == WM_T_82580)
   1905 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1906 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1907 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1908 	else
   1909 		sc->sc_funcid = 0;
   1910 
   1911 	/*
   1912 	 * Determine a few things about the bus we're connected to.
   1913 	 */
   1914 	if (sc->sc_type < WM_T_82543) {
   1915 		/* We don't really know the bus characteristics here. */
   1916 		sc->sc_bus_speed = 33;
   1917 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1918 		/*
   1919 		 * CSA (Communication Streaming Architecture) is about as fast
   1920 		 * a 32-bit 66MHz PCI Bus.
   1921 		 */
   1922 		sc->sc_flags |= WM_F_CSA;
   1923 		sc->sc_bus_speed = 66;
   1924 		aprint_verbose_dev(sc->sc_dev,
   1925 		    "Communication Streaming Architecture\n");
   1926 		if (sc->sc_type == WM_T_82547) {
   1927 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1928 			callout_setfunc(&sc->sc_txfifo_ch,
   1929 					wm_82547_txfifo_stall, sc);
   1930 			aprint_verbose_dev(sc->sc_dev,
   1931 			    "using 82547 Tx FIFO stall work-around\n");
   1932 		}
   1933 	} else if (sc->sc_type >= WM_T_82571) {
   1934 		sc->sc_flags |= WM_F_PCIE;
   1935 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1936 		    && (sc->sc_type != WM_T_ICH10)
   1937 		    && (sc->sc_type != WM_T_PCH)
   1938 		    && (sc->sc_type != WM_T_PCH2)
   1939 		    && (sc->sc_type != WM_T_PCH_LPT)
   1940 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1941 			/* ICH* and PCH* have no PCIe capability registers */
   1942 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1943 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1944 				NULL) == 0)
   1945 				aprint_error_dev(sc->sc_dev,
   1946 				    "unable to find PCIe capability\n");
   1947 		}
   1948 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1949 	} else {
   1950 		reg = CSR_READ(sc, WMREG_STATUS);
   1951 		if (reg & STATUS_BUS64)
   1952 			sc->sc_flags |= WM_F_BUS64;
   1953 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1954 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1955 
   1956 			sc->sc_flags |= WM_F_PCIX;
   1957 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1958 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1959 				aprint_error_dev(sc->sc_dev,
   1960 				    "unable to find PCIX capability\n");
   1961 			else if (sc->sc_type != WM_T_82545_3 &&
   1962 				 sc->sc_type != WM_T_82546_3) {
   1963 				/*
   1964 				 * Work around a problem caused by the BIOS
   1965 				 * setting the max memory read byte count
   1966 				 * incorrectly.
   1967 				 */
   1968 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1969 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1970 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1971 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1972 
   1973 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1974 				    PCIX_CMD_BYTECNT_SHIFT;
   1975 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1976 				    PCIX_STATUS_MAXB_SHIFT;
   1977 				if (bytecnt > maxb) {
   1978 					aprint_verbose_dev(sc->sc_dev,
   1979 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1980 					    512 << bytecnt, 512 << maxb);
   1981 					pcix_cmd = (pcix_cmd &
   1982 					    ~PCIX_CMD_BYTECNT_MASK) |
   1983 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1984 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1985 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1986 					    pcix_cmd);
   1987 				}
   1988 			}
   1989 		}
   1990 		/*
   1991 		 * The quad port adapter is special; it has a PCIX-PCIX
   1992 		 * bridge on the board, and can run the secondary bus at
   1993 		 * a higher speed.
   1994 		 */
   1995 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1996 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1997 								      : 66;
   1998 		} else if (sc->sc_flags & WM_F_PCIX) {
   1999 			switch (reg & STATUS_PCIXSPD_MASK) {
   2000 			case STATUS_PCIXSPD_50_66:
   2001 				sc->sc_bus_speed = 66;
   2002 				break;
   2003 			case STATUS_PCIXSPD_66_100:
   2004 				sc->sc_bus_speed = 100;
   2005 				break;
   2006 			case STATUS_PCIXSPD_100_133:
   2007 				sc->sc_bus_speed = 133;
   2008 				break;
   2009 			default:
   2010 				aprint_error_dev(sc->sc_dev,
   2011 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2012 				    reg & STATUS_PCIXSPD_MASK);
   2013 				sc->sc_bus_speed = 66;
   2014 				break;
   2015 			}
   2016 		} else
   2017 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2018 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2019 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2020 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2021 	}
   2022 
   2023 	/* clear interesting stat counters */
   2024 	CSR_READ(sc, WMREG_COLC);
   2025 	CSR_READ(sc, WMREG_RXERRC);
   2026 
   2027 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2028 	    || (sc->sc_type >= WM_T_ICH8))
   2029 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2030 	if (sc->sc_type >= WM_T_ICH8)
   2031 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2032 
   2033 	/* Set PHY, NVM mutex related stuff */
   2034 	switch (sc->sc_type) {
   2035 	case WM_T_82542_2_0:
   2036 	case WM_T_82542_2_1:
   2037 	case WM_T_82543:
   2038 	case WM_T_82544:
   2039 		/* Microwire */
   2040 		sc->sc_nvm_wordsize = 64;
   2041 		sc->sc_nvm_addrbits = 6;
   2042 		break;
   2043 	case WM_T_82540:
   2044 	case WM_T_82545:
   2045 	case WM_T_82545_3:
   2046 	case WM_T_82546:
   2047 	case WM_T_82546_3:
   2048 		/* Microwire */
   2049 		reg = CSR_READ(sc, WMREG_EECD);
   2050 		if (reg & EECD_EE_SIZE) {
   2051 			sc->sc_nvm_wordsize = 256;
   2052 			sc->sc_nvm_addrbits = 8;
   2053 		} else {
   2054 			sc->sc_nvm_wordsize = 64;
   2055 			sc->sc_nvm_addrbits = 6;
   2056 		}
   2057 		sc->sc_flags |= WM_F_LOCK_EECD;
   2058 		break;
   2059 	case WM_T_82541:
   2060 	case WM_T_82541_2:
   2061 	case WM_T_82547:
   2062 	case WM_T_82547_2:
   2063 		sc->sc_flags |= WM_F_LOCK_EECD;
   2064 		reg = CSR_READ(sc, WMREG_EECD);
   2065 		if (reg & EECD_EE_TYPE) {
   2066 			/* SPI */
   2067 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2068 			wm_nvm_set_addrbits_size_eecd(sc);
   2069 		} else {
   2070 			/* Microwire */
   2071 			if ((reg & EECD_EE_ABITS) != 0) {
   2072 				sc->sc_nvm_wordsize = 256;
   2073 				sc->sc_nvm_addrbits = 8;
   2074 			} else {
   2075 				sc->sc_nvm_wordsize = 64;
   2076 				sc->sc_nvm_addrbits = 6;
   2077 			}
   2078 		}
   2079 		break;
   2080 	case WM_T_82571:
   2081 	case WM_T_82572:
   2082 		/* SPI */
   2083 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2084 		wm_nvm_set_addrbits_size_eecd(sc);
   2085 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2086 		sc->phy.acquire = wm_get_swsm_semaphore;
   2087 		sc->phy.release = wm_put_swsm_semaphore;
   2088 		break;
   2089 	case WM_T_82573:
   2090 	case WM_T_82574:
   2091 	case WM_T_82583:
   2092 		if (sc->sc_type == WM_T_82573) {
   2093 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2094 			sc->phy.acquire = wm_get_swsm_semaphore;
   2095 			sc->phy.release = wm_put_swsm_semaphore;
   2096 		} else {
   2097 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2098 			/* Both PHY and NVM use the same semaphore. */
   2099 			sc->phy.acquire
   2100 			    = wm_get_swfwhw_semaphore;
   2101 			sc->phy.release
   2102 			    = wm_put_swfwhw_semaphore;
   2103 		}
   2104 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2105 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2106 			sc->sc_nvm_wordsize = 2048;
   2107 		} else {
   2108 			/* SPI */
   2109 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2110 			wm_nvm_set_addrbits_size_eecd(sc);
   2111 		}
   2112 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2113 		break;
   2114 	case WM_T_82575:
   2115 	case WM_T_82576:
   2116 	case WM_T_82580:
   2117 	case WM_T_I350:
   2118 	case WM_T_I354:
   2119 	case WM_T_80003:
   2120 		/* SPI */
   2121 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2122 		wm_nvm_set_addrbits_size_eecd(sc);
   2123 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2124 		    | WM_F_LOCK_SWSM;
   2125 		sc->phy.acquire = wm_get_phy_82575;
   2126 		sc->phy.release = wm_put_phy_82575;
   2127 		break;
   2128 	case WM_T_ICH8:
   2129 	case WM_T_ICH9:
   2130 	case WM_T_ICH10:
   2131 	case WM_T_PCH:
   2132 	case WM_T_PCH2:
   2133 	case WM_T_PCH_LPT:
   2134 		/* FLASH */
   2135 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2136 		sc->sc_nvm_wordsize = 2048;
   2137 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2138 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2139 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2140 			aprint_error_dev(sc->sc_dev,
   2141 			    "can't map FLASH registers\n");
   2142 			goto out;
   2143 		}
   2144 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2145 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2146 		    ICH_FLASH_SECTOR_SIZE;
   2147 		sc->sc_ich8_flash_bank_size =
   2148 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2149 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2150 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2151 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2152 		sc->sc_flashreg_offset = 0;
   2153 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2154 		sc->phy.release = wm_put_swflag_ich8lan;
   2155 		break;
   2156 	case WM_T_PCH_SPT:
   2157 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2158 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2159 		sc->sc_flasht = sc->sc_st;
   2160 		sc->sc_flashh = sc->sc_sh;
   2161 		sc->sc_ich8_flash_base = 0;
   2162 		sc->sc_nvm_wordsize =
   2163 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2164 			* NVM_SIZE_MULTIPLIER;
   2165 		/* It is size in bytes, we want words */
   2166 		sc->sc_nvm_wordsize /= 2;
   2167 		/* assume 2 banks */
   2168 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2169 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2170 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2171 		sc->phy.release = wm_put_swflag_ich8lan;
   2172 		break;
   2173 	case WM_T_I210:
   2174 	case WM_T_I211:
   2175 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2176 			wm_nvm_set_addrbits_size_eecd(sc);
   2177 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2178 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2179 		} else {
   2180 			sc->sc_nvm_wordsize = INVM_SIZE;
   2181 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2182 		}
   2183 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2184 		sc->phy.acquire = wm_get_phy_82575;
   2185 		sc->phy.release = wm_put_phy_82575;
   2186 		break;
   2187 	default:
   2188 		break;
   2189 	}
   2190 
   2191 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2192 	switch (sc->sc_type) {
   2193 	case WM_T_82571:
   2194 	case WM_T_82572:
   2195 		reg = CSR_READ(sc, WMREG_SWSM2);
   2196 		if ((reg & SWSM2_LOCK) == 0) {
   2197 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2198 			force_clear_smbi = true;
   2199 		} else
   2200 			force_clear_smbi = false;
   2201 		break;
   2202 	case WM_T_82573:
   2203 	case WM_T_82574:
   2204 	case WM_T_82583:
   2205 		force_clear_smbi = true;
   2206 		break;
   2207 	default:
   2208 		force_clear_smbi = false;
   2209 		break;
   2210 	}
   2211 	if (force_clear_smbi) {
   2212 		reg = CSR_READ(sc, WMREG_SWSM);
   2213 		if ((reg & SWSM_SMBI) != 0)
   2214 			aprint_error_dev(sc->sc_dev,
   2215 			    "Please update the Bootagent\n");
   2216 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2217 	}
   2218 
   2219 	/*
   2220 	 * Defer printing the EEPROM type until after verifying the checksum
   2221 	 * This allows the EEPROM type to be printed correctly in the case
   2222 	 * that no EEPROM is attached.
   2223 	 */
   2224 	/*
   2225 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2226 	 * this for later, so we can fail future reads from the EEPROM.
   2227 	 */
   2228 	if (wm_nvm_validate_checksum(sc)) {
   2229 		/*
   2230 		 * Read twice again because some PCI-e parts fail the
   2231 		 * first check due to the link being in sleep state.
   2232 		 */
   2233 		if (wm_nvm_validate_checksum(sc))
   2234 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2235 	}
   2236 
   2237 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2238 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2239 	else {
   2240 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2241 		    sc->sc_nvm_wordsize);
   2242 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2243 			aprint_verbose("iNVM");
   2244 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2245 			aprint_verbose("FLASH(HW)");
   2246 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2247 			aprint_verbose("FLASH");
   2248 		else {
   2249 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2250 				eetype = "SPI";
   2251 			else
   2252 				eetype = "MicroWire";
   2253 			aprint_verbose("(%d address bits) %s EEPROM",
   2254 			    sc->sc_nvm_addrbits, eetype);
   2255 		}
   2256 	}
   2257 	wm_nvm_version(sc);
   2258 	aprint_verbose("\n");
   2259 
   2260 	/*
   2261 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2262 	 * incorrect.
   2263 	 */
   2264 	wm_gmii_setup_phytype(sc, 0, 0);
   2265 
   2266 	/* Reset the chip to a known state. */
   2267 	wm_reset(sc);
   2268 
   2269 	/* Check for I21[01] PLL workaround */
   2270 	if (sc->sc_type == WM_T_I210)
   2271 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2272 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2273 		/* NVM image release 3.25 has a workaround */
   2274 		if ((sc->sc_nvm_ver_major < 3)
   2275 		    || ((sc->sc_nvm_ver_major == 3)
   2276 			&& (sc->sc_nvm_ver_minor < 25))) {
   2277 			aprint_verbose_dev(sc->sc_dev,
   2278 			    "ROM image version %d.%d is older than 3.25\n",
   2279 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2280 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2281 		}
   2282 	}
   2283 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2284 		wm_pll_workaround_i210(sc);
   2285 
   2286 	wm_get_wakeup(sc);
   2287 
   2288 	/* Non-AMT based hardware can now take control from firmware */
   2289 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2290 		wm_get_hw_control(sc);
   2291 
   2292 	/*
   2293 	 * Read the Ethernet address from the EEPROM, if not first found
   2294 	 * in device properties.
   2295 	 */
   2296 	ea = prop_dictionary_get(dict, "mac-address");
   2297 	if (ea != NULL) {
   2298 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2299 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2300 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2301 	} else {
   2302 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2303 			aprint_error_dev(sc->sc_dev,
   2304 			    "unable to read Ethernet address\n");
   2305 			goto out;
   2306 		}
   2307 	}
   2308 
   2309 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2310 	    ether_sprintf(enaddr));
   2311 
   2312 	/*
   2313 	 * Read the config info from the EEPROM, and set up various
   2314 	 * bits in the control registers based on their contents.
   2315 	 */
   2316 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2317 	if (pn != NULL) {
   2318 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2319 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2320 	} else {
   2321 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2322 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2323 			goto out;
   2324 		}
   2325 	}
   2326 
   2327 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2328 	if (pn != NULL) {
   2329 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2330 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2331 	} else {
   2332 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2333 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2334 			goto out;
   2335 		}
   2336 	}
   2337 
   2338 	/* check for WM_F_WOL */
   2339 	switch (sc->sc_type) {
   2340 	case WM_T_82542_2_0:
   2341 	case WM_T_82542_2_1:
   2342 	case WM_T_82543:
   2343 		/* dummy? */
   2344 		eeprom_data = 0;
   2345 		apme_mask = NVM_CFG3_APME;
   2346 		break;
   2347 	case WM_T_82544:
   2348 		apme_mask = NVM_CFG2_82544_APM_EN;
   2349 		eeprom_data = cfg2;
   2350 		break;
   2351 	case WM_T_82546:
   2352 	case WM_T_82546_3:
   2353 	case WM_T_82571:
   2354 	case WM_T_82572:
   2355 	case WM_T_82573:
   2356 	case WM_T_82574:
   2357 	case WM_T_82583:
   2358 	case WM_T_80003:
   2359 	default:
   2360 		apme_mask = NVM_CFG3_APME;
   2361 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2362 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2363 		break;
   2364 	case WM_T_82575:
   2365 	case WM_T_82576:
   2366 	case WM_T_82580:
   2367 	case WM_T_I350:
   2368 	case WM_T_I354: /* XXX ok? */
   2369 	case WM_T_ICH8:
   2370 	case WM_T_ICH9:
   2371 	case WM_T_ICH10:
   2372 	case WM_T_PCH:
   2373 	case WM_T_PCH2:
   2374 	case WM_T_PCH_LPT:
   2375 	case WM_T_PCH_SPT:
   2376 		/* XXX The funcid should be checked on some devices */
   2377 		apme_mask = WUC_APME;
   2378 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2379 		break;
   2380 	}
   2381 
   2382 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2383 	if ((eeprom_data & apme_mask) != 0)
   2384 		sc->sc_flags |= WM_F_WOL;
   2385 
   2386 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2387 		/* Check NVM for autonegotiation */
   2388 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2389 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2390 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2391 		}
   2392 	}
   2393 
   2394 	/*
   2395 	 * XXX need special handling for some multiple port cards
   2396 	 * to disable a paticular port.
   2397 	 */
   2398 
   2399 	if (sc->sc_type >= WM_T_82544) {
   2400 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2401 		if (pn != NULL) {
   2402 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2403 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2404 		} else {
   2405 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2406 				aprint_error_dev(sc->sc_dev,
   2407 				    "unable to read SWDPIN\n");
   2408 				goto out;
   2409 			}
   2410 		}
   2411 	}
   2412 
   2413 	if (cfg1 & NVM_CFG1_ILOS)
   2414 		sc->sc_ctrl |= CTRL_ILOS;
   2415 
   2416 	/*
   2417 	 * XXX
   2418 	 * This code isn't correct because pin 2 and 3 are located
   2419 	 * in different position on newer chips. Check all datasheet.
   2420 	 *
   2421 	 * Until resolve this problem, check if a chip < 82580
   2422 	 */
   2423 	if (sc->sc_type <= WM_T_82580) {
   2424 		if (sc->sc_type >= WM_T_82544) {
   2425 			sc->sc_ctrl |=
   2426 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2427 			    CTRL_SWDPIO_SHIFT;
   2428 			sc->sc_ctrl |=
   2429 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2430 			    CTRL_SWDPINS_SHIFT;
   2431 		} else {
   2432 			sc->sc_ctrl |=
   2433 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2434 			    CTRL_SWDPIO_SHIFT;
   2435 		}
   2436 	}
   2437 
   2438 	/* XXX For other than 82580? */
   2439 	if (sc->sc_type == WM_T_82580) {
   2440 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2441 		if (nvmword & __BIT(13))
   2442 			sc->sc_ctrl |= CTRL_ILOS;
   2443 	}
   2444 
   2445 #if 0
   2446 	if (sc->sc_type >= WM_T_82544) {
   2447 		if (cfg1 & NVM_CFG1_IPS0)
   2448 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2449 		if (cfg1 & NVM_CFG1_IPS1)
   2450 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2451 		sc->sc_ctrl_ext |=
   2452 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2453 		    CTRL_EXT_SWDPIO_SHIFT;
   2454 		sc->sc_ctrl_ext |=
   2455 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2456 		    CTRL_EXT_SWDPINS_SHIFT;
   2457 	} else {
   2458 		sc->sc_ctrl_ext |=
   2459 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2460 		    CTRL_EXT_SWDPIO_SHIFT;
   2461 	}
   2462 #endif
   2463 
   2464 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2465 #if 0
   2466 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2467 #endif
   2468 
   2469 	if (sc->sc_type == WM_T_PCH) {
   2470 		uint16_t val;
   2471 
   2472 		/* Save the NVM K1 bit setting */
   2473 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2474 
   2475 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2476 			sc->sc_nvm_k1_enabled = 1;
   2477 		else
   2478 			sc->sc_nvm_k1_enabled = 0;
   2479 	}
   2480 
   2481 	/*
   2482 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2483 	 * media structures accordingly.
   2484 	 */
   2485 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2486 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2487 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2488 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2489 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2490 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2491 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2492 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2493 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2494 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2495 	    || (sc->sc_type ==WM_T_I211)) {
   2496 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2497 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2498 		switch (link_mode) {
   2499 		case CTRL_EXT_LINK_MODE_1000KX:
   2500 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2501 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2502 			break;
   2503 		case CTRL_EXT_LINK_MODE_SGMII:
   2504 			if (wm_sgmii_uses_mdio(sc)) {
   2505 				aprint_verbose_dev(sc->sc_dev,
   2506 				    "SGMII(MDIO)\n");
   2507 				sc->sc_flags |= WM_F_SGMII;
   2508 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2509 				break;
   2510 			}
   2511 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2512 			/*FALLTHROUGH*/
   2513 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2514 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2515 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2516 				if (link_mode
   2517 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2518 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2519 					sc->sc_flags |= WM_F_SGMII;
   2520 				} else {
   2521 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2522 					aprint_verbose_dev(sc->sc_dev,
   2523 					    "SERDES\n");
   2524 				}
   2525 				break;
   2526 			}
   2527 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2528 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2529 
   2530 			/* Change current link mode setting */
   2531 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2532 			switch (sc->sc_mediatype) {
   2533 			case WM_MEDIATYPE_COPPER:
   2534 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2535 				break;
   2536 			case WM_MEDIATYPE_SERDES:
   2537 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2538 				break;
   2539 			default:
   2540 				break;
   2541 			}
   2542 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2543 			break;
   2544 		case CTRL_EXT_LINK_MODE_GMII:
   2545 		default:
   2546 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2547 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2548 			break;
   2549 		}
   2550 
   2551 		reg &= ~CTRL_EXT_I2C_ENA;
   2552 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2553 			reg |= CTRL_EXT_I2C_ENA;
   2554 		else
   2555 			reg &= ~CTRL_EXT_I2C_ENA;
   2556 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2557 
   2558 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2559 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2560 		else
   2561 			wm_tbi_mediainit(sc);
   2562 	} else if (sc->sc_type < WM_T_82543 ||
   2563 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2564 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2565 			aprint_error_dev(sc->sc_dev,
   2566 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2567 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2568 		}
   2569 		wm_tbi_mediainit(sc);
   2570 	} else {
   2571 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2572 			aprint_error_dev(sc->sc_dev,
   2573 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2574 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2575 		}
   2576 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2577 	}
   2578 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2579 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2580 
   2581 	/* Set device properties (macflags) */
   2582 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2583 
   2584 	ifp = &sc->sc_ethercom.ec_if;
   2585 	xname = device_xname(sc->sc_dev);
   2586 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2587 	ifp->if_softc = sc;
   2588 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2589 #ifdef WM_MPSAFE
   2590 	ifp->if_extflags = IFEF_START_MPSAFE;
   2591 #endif
   2592 	ifp->if_ioctl = wm_ioctl;
   2593 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2594 		ifp->if_start = wm_nq_start;
   2595 		/*
   2596 		 * When the number of CPUs is one and the controller can use
   2597 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2598 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2599 		 * and the other is used for link status changing.
   2600 		 * In this situation, wm_nq_transmit() is disadvantageous
   2601 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2602 		 */
   2603 		if (wm_is_using_multiqueue(sc))
   2604 			ifp->if_transmit = wm_nq_transmit;
   2605 	} else {
   2606 		ifp->if_start = wm_start;
   2607 		/*
   2608 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2609 		 */
   2610 		if (wm_is_using_multiqueue(sc))
   2611 			ifp->if_transmit = wm_transmit;
   2612 	}
   2613 	ifp->if_watchdog = wm_watchdog;
   2614 	ifp->if_init = wm_init;
   2615 	ifp->if_stop = wm_stop;
   2616 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2617 	IFQ_SET_READY(&ifp->if_snd);
   2618 
   2619 	/* Check for jumbo frame */
   2620 	switch (sc->sc_type) {
   2621 	case WM_T_82573:
   2622 		/* XXX limited to 9234 if ASPM is disabled */
   2623 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2624 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2625 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2626 		break;
   2627 	case WM_T_82571:
   2628 	case WM_T_82572:
   2629 	case WM_T_82574:
   2630 	case WM_T_82575:
   2631 	case WM_T_82576:
   2632 	case WM_T_82580:
   2633 	case WM_T_I350:
   2634 	case WM_T_I354: /* XXXX ok? */
   2635 	case WM_T_I210:
   2636 	case WM_T_I211:
   2637 	case WM_T_80003:
   2638 	case WM_T_ICH9:
   2639 	case WM_T_ICH10:
   2640 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2641 	case WM_T_PCH_LPT:
   2642 	case WM_T_PCH_SPT:
   2643 		/* XXX limited to 9234 */
   2644 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2645 		break;
   2646 	case WM_T_PCH:
   2647 		/* XXX limited to 4096 */
   2648 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2649 		break;
   2650 	case WM_T_82542_2_0:
   2651 	case WM_T_82542_2_1:
   2652 	case WM_T_82583:
   2653 	case WM_T_ICH8:
   2654 		/* No support for jumbo frame */
   2655 		break;
   2656 	default:
   2657 		/* ETHER_MAX_LEN_JUMBO */
   2658 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2659 		break;
   2660 	}
   2661 
   2662 	/* If we're a i82543 or greater, we can support VLANs. */
   2663 	if (sc->sc_type >= WM_T_82543)
   2664 		sc->sc_ethercom.ec_capabilities |=
   2665 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2666 
   2667 	/*
   2668 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2669 	 * on i82543 and later.
   2670 	 */
   2671 	if (sc->sc_type >= WM_T_82543) {
   2672 		ifp->if_capabilities |=
   2673 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2674 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2675 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2676 		    IFCAP_CSUM_TCPv6_Tx |
   2677 		    IFCAP_CSUM_UDPv6_Tx;
   2678 	}
   2679 
   2680 	/*
   2681 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2682 	 *
   2683 	 *	82541GI (8086:1076) ... no
   2684 	 *	82572EI (8086:10b9) ... yes
   2685 	 */
   2686 	if (sc->sc_type >= WM_T_82571) {
   2687 		ifp->if_capabilities |=
   2688 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2689 	}
   2690 
   2691 	/*
   2692 	 * If we're a i82544 or greater (except i82547), we can do
   2693 	 * TCP segmentation offload.
   2694 	 */
   2695 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2696 		ifp->if_capabilities |= IFCAP_TSOv4;
   2697 	}
   2698 
   2699 	if (sc->sc_type >= WM_T_82571) {
   2700 		ifp->if_capabilities |= IFCAP_TSOv6;
   2701 	}
   2702 
   2703 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2704 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2705 
   2706 #ifdef WM_MPSAFE
   2707 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2708 #else
   2709 	sc->sc_core_lock = NULL;
   2710 #endif
   2711 
   2712 	/* Attach the interface. */
   2713 	if_initialize(ifp);
   2714 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2715 	ether_ifattach(ifp, enaddr);
   2716 	if_register(ifp);
   2717 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2718 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2719 			  RND_FLAG_DEFAULT);
   2720 
   2721 #ifdef WM_EVENT_COUNTERS
   2722 	/* Attach event counters. */
   2723 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2724 	    NULL, xname, "linkintr");
   2725 
   2726 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2727 	    NULL, xname, "tx_xoff");
   2728 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2729 	    NULL, xname, "tx_xon");
   2730 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2731 	    NULL, xname, "rx_xoff");
   2732 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2733 	    NULL, xname, "rx_xon");
   2734 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2735 	    NULL, xname, "rx_macctl");
   2736 #endif /* WM_EVENT_COUNTERS */
   2737 
   2738 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2739 		pmf_class_network_register(self, ifp);
   2740 	else
   2741 		aprint_error_dev(self, "couldn't establish power handler\n");
   2742 
   2743 	sc->sc_flags |= WM_F_ATTACHED;
   2744  out:
   2745 	return;
   2746 }
   2747 
   2748 /* The detach function (ca_detach) */
   2749 static int
   2750 wm_detach(device_t self, int flags __unused)
   2751 {
   2752 	struct wm_softc *sc = device_private(self);
   2753 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2754 	int i;
   2755 
   2756 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2757 		return 0;
   2758 
   2759 	/* Stop the interface. Callouts are stopped in it. */
   2760 	wm_stop(ifp, 1);
   2761 
   2762 	pmf_device_deregister(self);
   2763 
   2764 #ifdef WM_EVENT_COUNTERS
   2765 	evcnt_detach(&sc->sc_ev_linkintr);
   2766 
   2767 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2768 	evcnt_detach(&sc->sc_ev_tx_xon);
   2769 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2770 	evcnt_detach(&sc->sc_ev_rx_xon);
   2771 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2772 #endif /* WM_EVENT_COUNTERS */
   2773 
   2774 	/* Tell the firmware about the release */
   2775 	WM_CORE_LOCK(sc);
   2776 	wm_release_manageability(sc);
   2777 	wm_release_hw_control(sc);
   2778 	wm_enable_wakeup(sc);
   2779 	WM_CORE_UNLOCK(sc);
   2780 
   2781 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2782 
   2783 	/* Delete all remaining media. */
   2784 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2785 
   2786 	ether_ifdetach(ifp);
   2787 	if_detach(ifp);
   2788 	if_percpuq_destroy(sc->sc_ipq);
   2789 
   2790 	/* Unload RX dmamaps and free mbufs */
   2791 	for (i = 0; i < sc->sc_nqueues; i++) {
   2792 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2793 		mutex_enter(rxq->rxq_lock);
   2794 		wm_rxdrain(rxq);
   2795 		mutex_exit(rxq->rxq_lock);
   2796 	}
   2797 	/* Must unlock here */
   2798 
   2799 	/* Disestablish the interrupt handler */
   2800 	for (i = 0; i < sc->sc_nintrs; i++) {
   2801 		if (sc->sc_ihs[i] != NULL) {
   2802 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2803 			sc->sc_ihs[i] = NULL;
   2804 		}
   2805 	}
   2806 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2807 
   2808 	wm_free_txrx_queues(sc);
   2809 
   2810 	/* Unmap the registers */
   2811 	if (sc->sc_ss) {
   2812 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2813 		sc->sc_ss = 0;
   2814 	}
   2815 	if (sc->sc_ios) {
   2816 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2817 		sc->sc_ios = 0;
   2818 	}
   2819 	if (sc->sc_flashs) {
   2820 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2821 		sc->sc_flashs = 0;
   2822 	}
   2823 
   2824 	if (sc->sc_core_lock)
   2825 		mutex_obj_free(sc->sc_core_lock);
   2826 	if (sc->sc_ich_phymtx)
   2827 		mutex_obj_free(sc->sc_ich_phymtx);
   2828 	if (sc->sc_ich_nvmmtx)
   2829 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2830 
   2831 	return 0;
   2832 }
   2833 
   2834 static bool
   2835 wm_suspend(device_t self, const pmf_qual_t *qual)
   2836 {
   2837 	struct wm_softc *sc = device_private(self);
   2838 
   2839 	wm_release_manageability(sc);
   2840 	wm_release_hw_control(sc);
   2841 	wm_enable_wakeup(sc);
   2842 
   2843 	return true;
   2844 }
   2845 
   2846 static bool
   2847 wm_resume(device_t self, const pmf_qual_t *qual)
   2848 {
   2849 	struct wm_softc *sc = device_private(self);
   2850 
   2851 	wm_init_manageability(sc);
   2852 
   2853 	return true;
   2854 }
   2855 
   2856 /*
   2857  * wm_watchdog:		[ifnet interface function]
   2858  *
   2859  *	Watchdog timer handler.
   2860  */
   2861 static void
   2862 wm_watchdog(struct ifnet *ifp)
   2863 {
   2864 	int qid;
   2865 	struct wm_softc *sc = ifp->if_softc;
   2866 
   2867 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2868 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2869 
   2870 		wm_watchdog_txq(ifp, txq);
   2871 	}
   2872 
   2873 	/* Reset the interface. */
   2874 	(void) wm_init(ifp);
   2875 
   2876 	/*
   2877 	 * There are still some upper layer processing which call
   2878 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2879 	 */
   2880 	/* Try to get more packets going. */
   2881 	ifp->if_start(ifp);
   2882 }
   2883 
   2884 static void
   2885 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2886 {
   2887 	struct wm_softc *sc = ifp->if_softc;
   2888 
   2889 	/*
   2890 	 * Since we're using delayed interrupts, sweep up
   2891 	 * before we report an error.
   2892 	 */
   2893 	mutex_enter(txq->txq_lock);
   2894 	wm_txeof(sc, txq);
   2895 	mutex_exit(txq->txq_lock);
   2896 
   2897 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2898 #ifdef WM_DEBUG
   2899 		int i, j;
   2900 		struct wm_txsoft *txs;
   2901 #endif
   2902 		log(LOG_ERR,
   2903 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2904 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2905 		    txq->txq_next);
   2906 		ifp->if_oerrors++;
   2907 #ifdef WM_DEBUG
   2908 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2909 		    i = WM_NEXTTXS(txq, i)) {
   2910 		    txs = &txq->txq_soft[i];
   2911 		    printf("txs %d tx %d -> %d\n",
   2912 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2913 		    for (j = txs->txs_firstdesc; ;
   2914 			j = WM_NEXTTX(txq, j)) {
   2915 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2916 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2917 			printf("\t %#08x%08x\n",
   2918 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2919 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2920 			if (j == txs->txs_lastdesc)
   2921 				break;
   2922 			}
   2923 		}
   2924 #endif
   2925 	}
   2926 }
   2927 
   2928 /*
   2929  * wm_tick:
   2930  *
   2931  *	One second timer, used to check link status, sweep up
   2932  *	completed transmit jobs, etc.
   2933  */
   2934 static void
   2935 wm_tick(void *arg)
   2936 {
   2937 	struct wm_softc *sc = arg;
   2938 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2939 #ifndef WM_MPSAFE
   2940 	int s = splnet();
   2941 #endif
   2942 
   2943 	WM_CORE_LOCK(sc);
   2944 
   2945 	if (sc->sc_core_stopping)
   2946 		goto out;
   2947 
   2948 	if (sc->sc_type >= WM_T_82542_2_1) {
   2949 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2950 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2951 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2952 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2953 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2954 	}
   2955 
   2956 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2957 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   2958 	    + CSR_READ(sc, WMREG_CRCERRS)
   2959 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2960 	    + CSR_READ(sc, WMREG_SYMERRC)
   2961 	    + CSR_READ(sc, WMREG_RXERRC)
   2962 	    + CSR_READ(sc, WMREG_SEC)
   2963 	    + CSR_READ(sc, WMREG_CEXTERR)
   2964 	    + CSR_READ(sc, WMREG_RLEC);
   2965 	/*
   2966 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2967 	 * memory. It does not mean the number of dropped packet. Because
   2968 	 * ethernet controller can receive packets in such case if there is
   2969 	 * space in phy's FIFO.
   2970 	 *
   2971 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2972 	 * own EVCNT instead of if_iqdrops.
   2973 	 */
   2974 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2975 
   2976 	if (sc->sc_flags & WM_F_HAS_MII)
   2977 		mii_tick(&sc->sc_mii);
   2978 	else if ((sc->sc_type >= WM_T_82575)
   2979 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2980 		wm_serdes_tick(sc);
   2981 	else
   2982 		wm_tbi_tick(sc);
   2983 
   2984 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2985 out:
   2986 	WM_CORE_UNLOCK(sc);
   2987 #ifndef WM_MPSAFE
   2988 	splx(s);
   2989 #endif
   2990 }
   2991 
   2992 static int
   2993 wm_ifflags_cb(struct ethercom *ec)
   2994 {
   2995 	struct ifnet *ifp = &ec->ec_if;
   2996 	struct wm_softc *sc = ifp->if_softc;
   2997 	int rc = 0;
   2998 
   2999 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3000 		device_xname(sc->sc_dev), __func__));
   3001 
   3002 	WM_CORE_LOCK(sc);
   3003 
   3004 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3005 	sc->sc_if_flags = ifp->if_flags;
   3006 
   3007 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3008 		rc = ENETRESET;
   3009 		goto out;
   3010 	}
   3011 
   3012 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3013 		wm_set_filter(sc);
   3014 
   3015 	wm_set_vlan(sc);
   3016 
   3017 out:
   3018 	WM_CORE_UNLOCK(sc);
   3019 
   3020 	return rc;
   3021 }
   3022 
   3023 /*
   3024  * wm_ioctl:		[ifnet interface function]
   3025  *
   3026  *	Handle control requests from the operator.
   3027  */
   3028 static int
   3029 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3030 {
   3031 	struct wm_softc *sc = ifp->if_softc;
   3032 	struct ifreq *ifr = (struct ifreq *) data;
   3033 	struct ifaddr *ifa = (struct ifaddr *)data;
   3034 	struct sockaddr_dl *sdl;
   3035 	int s, error;
   3036 
   3037 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3038 		device_xname(sc->sc_dev), __func__));
   3039 
   3040 #ifndef WM_MPSAFE
   3041 	s = splnet();
   3042 #endif
   3043 	switch (cmd) {
   3044 	case SIOCSIFMEDIA:
   3045 	case SIOCGIFMEDIA:
   3046 		WM_CORE_LOCK(sc);
   3047 		/* Flow control requires full-duplex mode. */
   3048 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3049 		    (ifr->ifr_media & IFM_FDX) == 0)
   3050 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3051 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3052 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3053 				/* We can do both TXPAUSE and RXPAUSE. */
   3054 				ifr->ifr_media |=
   3055 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3056 			}
   3057 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3058 		}
   3059 		WM_CORE_UNLOCK(sc);
   3060 #ifdef WM_MPSAFE
   3061 		s = splnet();
   3062 #endif
   3063 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3064 #ifdef WM_MPSAFE
   3065 		splx(s);
   3066 #endif
   3067 		break;
   3068 	case SIOCINITIFADDR:
   3069 		WM_CORE_LOCK(sc);
   3070 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3071 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3072 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3073 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3074 			/* unicast address is first multicast entry */
   3075 			wm_set_filter(sc);
   3076 			error = 0;
   3077 			WM_CORE_UNLOCK(sc);
   3078 			break;
   3079 		}
   3080 		WM_CORE_UNLOCK(sc);
   3081 		/*FALLTHROUGH*/
   3082 	default:
   3083 #ifdef WM_MPSAFE
   3084 		s = splnet();
   3085 #endif
   3086 		/* It may call wm_start, so unlock here */
   3087 		error = ether_ioctl(ifp, cmd, data);
   3088 #ifdef WM_MPSAFE
   3089 		splx(s);
   3090 #endif
   3091 		if (error != ENETRESET)
   3092 			break;
   3093 
   3094 		error = 0;
   3095 
   3096 		if (cmd == SIOCSIFCAP) {
   3097 			error = (*ifp->if_init)(ifp);
   3098 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3099 			;
   3100 		else if (ifp->if_flags & IFF_RUNNING) {
   3101 			/*
   3102 			 * Multicast list has changed; set the hardware filter
   3103 			 * accordingly.
   3104 			 */
   3105 			WM_CORE_LOCK(sc);
   3106 			wm_set_filter(sc);
   3107 			WM_CORE_UNLOCK(sc);
   3108 		}
   3109 		break;
   3110 	}
   3111 
   3112 #ifndef WM_MPSAFE
   3113 	splx(s);
   3114 #endif
   3115 	return error;
   3116 }
   3117 
   3118 /* MAC address related */
   3119 
   3120 /*
   3121  * Get the offset of MAC address and return it.
   3122  * If error occured, use offset 0.
   3123  */
   3124 static uint16_t
   3125 wm_check_alt_mac_addr(struct wm_softc *sc)
   3126 {
   3127 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3128 	uint16_t offset = NVM_OFF_MACADDR;
   3129 
   3130 	/* Try to read alternative MAC address pointer */
   3131 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3132 		return 0;
   3133 
   3134 	/* Check pointer if it's valid or not. */
   3135 	if ((offset == 0x0000) || (offset == 0xffff))
   3136 		return 0;
   3137 
   3138 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3139 	/*
   3140 	 * Check whether alternative MAC address is valid or not.
   3141 	 * Some cards have non 0xffff pointer but those don't use
   3142 	 * alternative MAC address in reality.
   3143 	 *
   3144 	 * Check whether the broadcast bit is set or not.
   3145 	 */
   3146 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3147 		if (((myea[0] & 0xff) & 0x01) == 0)
   3148 			return offset; /* Found */
   3149 
   3150 	/* Not found */
   3151 	return 0;
   3152 }
   3153 
   3154 static int
   3155 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3156 {
   3157 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3158 	uint16_t offset = NVM_OFF_MACADDR;
   3159 	int do_invert = 0;
   3160 
   3161 	switch (sc->sc_type) {
   3162 	case WM_T_82580:
   3163 	case WM_T_I350:
   3164 	case WM_T_I354:
   3165 		/* EEPROM Top Level Partitioning */
   3166 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3167 		break;
   3168 	case WM_T_82571:
   3169 	case WM_T_82575:
   3170 	case WM_T_82576:
   3171 	case WM_T_80003:
   3172 	case WM_T_I210:
   3173 	case WM_T_I211:
   3174 		offset = wm_check_alt_mac_addr(sc);
   3175 		if (offset == 0)
   3176 			if ((sc->sc_funcid & 0x01) == 1)
   3177 				do_invert = 1;
   3178 		break;
   3179 	default:
   3180 		if ((sc->sc_funcid & 0x01) == 1)
   3181 			do_invert = 1;
   3182 		break;
   3183 	}
   3184 
   3185 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3186 		goto bad;
   3187 
   3188 	enaddr[0] = myea[0] & 0xff;
   3189 	enaddr[1] = myea[0] >> 8;
   3190 	enaddr[2] = myea[1] & 0xff;
   3191 	enaddr[3] = myea[1] >> 8;
   3192 	enaddr[4] = myea[2] & 0xff;
   3193 	enaddr[5] = myea[2] >> 8;
   3194 
   3195 	/*
   3196 	 * Toggle the LSB of the MAC address on the second port
   3197 	 * of some dual port cards.
   3198 	 */
   3199 	if (do_invert != 0)
   3200 		enaddr[5] ^= 1;
   3201 
   3202 	return 0;
   3203 
   3204  bad:
   3205 	return -1;
   3206 }
   3207 
   3208 /*
   3209  * wm_set_ral:
   3210  *
   3211  *	Set an entery in the receive address list.
   3212  */
   3213 static void
   3214 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3215 {
   3216 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3217 	uint32_t wlock_mac;
   3218 	int rv;
   3219 
   3220 	if (enaddr != NULL) {
   3221 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3222 		    (enaddr[3] << 24);
   3223 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3224 		ral_hi |= RAL_AV;
   3225 	} else {
   3226 		ral_lo = 0;
   3227 		ral_hi = 0;
   3228 	}
   3229 
   3230 	switch (sc->sc_type) {
   3231 	case WM_T_82542_2_0:
   3232 	case WM_T_82542_2_1:
   3233 	case WM_T_82543:
   3234 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3235 		CSR_WRITE_FLUSH(sc);
   3236 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3237 		CSR_WRITE_FLUSH(sc);
   3238 		break;
   3239 	case WM_T_PCH2:
   3240 	case WM_T_PCH_LPT:
   3241 	case WM_T_PCH_SPT:
   3242 		if (idx == 0) {
   3243 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3244 			CSR_WRITE_FLUSH(sc);
   3245 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3246 			CSR_WRITE_FLUSH(sc);
   3247 			return;
   3248 		}
   3249 		if (sc->sc_type != WM_T_PCH2) {
   3250 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3251 			    FWSM_WLOCK_MAC);
   3252 			addrl = WMREG_SHRAL(idx - 1);
   3253 			addrh = WMREG_SHRAH(idx - 1);
   3254 		} else {
   3255 			wlock_mac = 0;
   3256 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3257 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3258 		}
   3259 
   3260 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3261 			rv = wm_get_swflag_ich8lan(sc);
   3262 			if (rv != 0)
   3263 				return;
   3264 			CSR_WRITE(sc, addrl, ral_lo);
   3265 			CSR_WRITE_FLUSH(sc);
   3266 			CSR_WRITE(sc, addrh, ral_hi);
   3267 			CSR_WRITE_FLUSH(sc);
   3268 			wm_put_swflag_ich8lan(sc);
   3269 		}
   3270 
   3271 		break;
   3272 	default:
   3273 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3274 		CSR_WRITE_FLUSH(sc);
   3275 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3276 		CSR_WRITE_FLUSH(sc);
   3277 		break;
   3278 	}
   3279 }
   3280 
   3281 /*
   3282  * wm_mchash:
   3283  *
   3284  *	Compute the hash of the multicast address for the 4096-bit
   3285  *	multicast filter.
   3286  */
   3287 static uint32_t
   3288 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3289 {
   3290 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3291 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3292 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3293 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3294 	uint32_t hash;
   3295 
   3296 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3297 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3298 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3299 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3300 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3301 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3302 		return (hash & 0x3ff);
   3303 	}
   3304 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3305 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3306 
   3307 	return (hash & 0xfff);
   3308 }
   3309 
   3310 /*
   3311  * wm_set_filter:
   3312  *
   3313  *	Set up the receive filter.
   3314  */
   3315 static void
   3316 wm_set_filter(struct wm_softc *sc)
   3317 {
   3318 	struct ethercom *ec = &sc->sc_ethercom;
   3319 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3320 	struct ether_multi *enm;
   3321 	struct ether_multistep step;
   3322 	bus_addr_t mta_reg;
   3323 	uint32_t hash, reg, bit;
   3324 	int i, size, ralmax;
   3325 
   3326 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3327 		device_xname(sc->sc_dev), __func__));
   3328 
   3329 	if (sc->sc_type >= WM_T_82544)
   3330 		mta_reg = WMREG_CORDOVA_MTA;
   3331 	else
   3332 		mta_reg = WMREG_MTA;
   3333 
   3334 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3335 
   3336 	if (ifp->if_flags & IFF_BROADCAST)
   3337 		sc->sc_rctl |= RCTL_BAM;
   3338 	if (ifp->if_flags & IFF_PROMISC) {
   3339 		sc->sc_rctl |= RCTL_UPE;
   3340 		goto allmulti;
   3341 	}
   3342 
   3343 	/*
   3344 	 * Set the station address in the first RAL slot, and
   3345 	 * clear the remaining slots.
   3346 	 */
   3347 	if (sc->sc_type == WM_T_ICH8)
   3348 		size = WM_RAL_TABSIZE_ICH8 -1;
   3349 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3350 	    || (sc->sc_type == WM_T_PCH))
   3351 		size = WM_RAL_TABSIZE_ICH8;
   3352 	else if (sc->sc_type == WM_T_PCH2)
   3353 		size = WM_RAL_TABSIZE_PCH2;
   3354 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3355 		size = WM_RAL_TABSIZE_PCH_LPT;
   3356 	else if (sc->sc_type == WM_T_82575)
   3357 		size = WM_RAL_TABSIZE_82575;
   3358 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3359 		size = WM_RAL_TABSIZE_82576;
   3360 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3361 		size = WM_RAL_TABSIZE_I350;
   3362 	else
   3363 		size = WM_RAL_TABSIZE;
   3364 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3365 
   3366 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3367 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3368 		switch (i) {
   3369 		case 0:
   3370 			/* We can use all entries */
   3371 			ralmax = size;
   3372 			break;
   3373 		case 1:
   3374 			/* Only RAR[0] */
   3375 			ralmax = 1;
   3376 			break;
   3377 		default:
   3378 			/* available SHRA + RAR[0] */
   3379 			ralmax = i + 1;
   3380 		}
   3381 	} else
   3382 		ralmax = size;
   3383 	for (i = 1; i < size; i++) {
   3384 		if (i < ralmax)
   3385 			wm_set_ral(sc, NULL, i);
   3386 	}
   3387 
   3388 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3389 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3390 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3391 	    || (sc->sc_type == WM_T_PCH_SPT))
   3392 		size = WM_ICH8_MC_TABSIZE;
   3393 	else
   3394 		size = WM_MC_TABSIZE;
   3395 	/* Clear out the multicast table. */
   3396 	for (i = 0; i < size; i++) {
   3397 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3398 		CSR_WRITE_FLUSH(sc);
   3399 	}
   3400 
   3401 	ETHER_LOCK(ec);
   3402 	ETHER_FIRST_MULTI(step, ec, enm);
   3403 	while (enm != NULL) {
   3404 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3405 			ETHER_UNLOCK(ec);
   3406 			/*
   3407 			 * We must listen to a range of multicast addresses.
   3408 			 * For now, just accept all multicasts, rather than
   3409 			 * trying to set only those filter bits needed to match
   3410 			 * the range.  (At this time, the only use of address
   3411 			 * ranges is for IP multicast routing, for which the
   3412 			 * range is big enough to require all bits set.)
   3413 			 */
   3414 			goto allmulti;
   3415 		}
   3416 
   3417 		hash = wm_mchash(sc, enm->enm_addrlo);
   3418 
   3419 		reg = (hash >> 5);
   3420 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3421 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3422 		    || (sc->sc_type == WM_T_PCH2)
   3423 		    || (sc->sc_type == WM_T_PCH_LPT)
   3424 		    || (sc->sc_type == WM_T_PCH_SPT))
   3425 			reg &= 0x1f;
   3426 		else
   3427 			reg &= 0x7f;
   3428 		bit = hash & 0x1f;
   3429 
   3430 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3431 		hash |= 1U << bit;
   3432 
   3433 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3434 			/*
   3435 			 * 82544 Errata 9: Certain register cannot be written
   3436 			 * with particular alignments in PCI-X bus operation
   3437 			 * (FCAH, MTA and VFTA).
   3438 			 */
   3439 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3440 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3441 			CSR_WRITE_FLUSH(sc);
   3442 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3443 			CSR_WRITE_FLUSH(sc);
   3444 		} else {
   3445 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3446 			CSR_WRITE_FLUSH(sc);
   3447 		}
   3448 
   3449 		ETHER_NEXT_MULTI(step, enm);
   3450 	}
   3451 	ETHER_UNLOCK(ec);
   3452 
   3453 	ifp->if_flags &= ~IFF_ALLMULTI;
   3454 	goto setit;
   3455 
   3456  allmulti:
   3457 	ifp->if_flags |= IFF_ALLMULTI;
   3458 	sc->sc_rctl |= RCTL_MPE;
   3459 
   3460  setit:
   3461 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3462 }
   3463 
   3464 /* Reset and init related */
   3465 
   3466 static void
   3467 wm_set_vlan(struct wm_softc *sc)
   3468 {
   3469 
   3470 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3471 		device_xname(sc->sc_dev), __func__));
   3472 
   3473 	/* Deal with VLAN enables. */
   3474 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3475 		sc->sc_ctrl |= CTRL_VME;
   3476 	else
   3477 		sc->sc_ctrl &= ~CTRL_VME;
   3478 
   3479 	/* Write the control registers. */
   3480 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3481 }
   3482 
   3483 static void
   3484 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3485 {
   3486 	uint32_t gcr;
   3487 	pcireg_t ctrl2;
   3488 
   3489 	gcr = CSR_READ(sc, WMREG_GCR);
   3490 
   3491 	/* Only take action if timeout value is defaulted to 0 */
   3492 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3493 		goto out;
   3494 
   3495 	if ((gcr & GCR_CAP_VER2) == 0) {
   3496 		gcr |= GCR_CMPL_TMOUT_10MS;
   3497 		goto out;
   3498 	}
   3499 
   3500 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3501 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3502 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3503 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3504 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3505 
   3506 out:
   3507 	/* Disable completion timeout resend */
   3508 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3509 
   3510 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3511 }
   3512 
   3513 void
   3514 wm_get_auto_rd_done(struct wm_softc *sc)
   3515 {
   3516 	int i;
   3517 
   3518 	/* wait for eeprom to reload */
   3519 	switch (sc->sc_type) {
   3520 	case WM_T_82571:
   3521 	case WM_T_82572:
   3522 	case WM_T_82573:
   3523 	case WM_T_82574:
   3524 	case WM_T_82583:
   3525 	case WM_T_82575:
   3526 	case WM_T_82576:
   3527 	case WM_T_82580:
   3528 	case WM_T_I350:
   3529 	case WM_T_I354:
   3530 	case WM_T_I210:
   3531 	case WM_T_I211:
   3532 	case WM_T_80003:
   3533 	case WM_T_ICH8:
   3534 	case WM_T_ICH9:
   3535 		for (i = 0; i < 10; i++) {
   3536 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3537 				break;
   3538 			delay(1000);
   3539 		}
   3540 		if (i == 10) {
   3541 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3542 			    "complete\n", device_xname(sc->sc_dev));
   3543 		}
   3544 		break;
   3545 	default:
   3546 		break;
   3547 	}
   3548 }
   3549 
   3550 void
   3551 wm_lan_init_done(struct wm_softc *sc)
   3552 {
   3553 	uint32_t reg = 0;
   3554 	int i;
   3555 
   3556 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3557 		device_xname(sc->sc_dev), __func__));
   3558 
   3559 	/* Wait for eeprom to reload */
   3560 	switch (sc->sc_type) {
   3561 	case WM_T_ICH10:
   3562 	case WM_T_PCH:
   3563 	case WM_T_PCH2:
   3564 	case WM_T_PCH_LPT:
   3565 	case WM_T_PCH_SPT:
   3566 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3567 			reg = CSR_READ(sc, WMREG_STATUS);
   3568 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3569 				break;
   3570 			delay(100);
   3571 		}
   3572 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3573 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3574 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3575 		}
   3576 		break;
   3577 	default:
   3578 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3579 		    __func__);
   3580 		break;
   3581 	}
   3582 
   3583 	reg &= ~STATUS_LAN_INIT_DONE;
   3584 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3585 }
   3586 
   3587 void
   3588 wm_get_cfg_done(struct wm_softc *sc)
   3589 {
   3590 	int mask;
   3591 	uint32_t reg;
   3592 	int i;
   3593 
   3594 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3595 		device_xname(sc->sc_dev), __func__));
   3596 
   3597 	/* Wait for eeprom to reload */
   3598 	switch (sc->sc_type) {
   3599 	case WM_T_82542_2_0:
   3600 	case WM_T_82542_2_1:
   3601 		/* null */
   3602 		break;
   3603 	case WM_T_82543:
   3604 	case WM_T_82544:
   3605 	case WM_T_82540:
   3606 	case WM_T_82545:
   3607 	case WM_T_82545_3:
   3608 	case WM_T_82546:
   3609 	case WM_T_82546_3:
   3610 	case WM_T_82541:
   3611 	case WM_T_82541_2:
   3612 	case WM_T_82547:
   3613 	case WM_T_82547_2:
   3614 	case WM_T_82573:
   3615 	case WM_T_82574:
   3616 	case WM_T_82583:
   3617 		/* generic */
   3618 		delay(10*1000);
   3619 		break;
   3620 	case WM_T_80003:
   3621 	case WM_T_82571:
   3622 	case WM_T_82572:
   3623 	case WM_T_82575:
   3624 	case WM_T_82576:
   3625 	case WM_T_82580:
   3626 	case WM_T_I350:
   3627 	case WM_T_I354:
   3628 	case WM_T_I210:
   3629 	case WM_T_I211:
   3630 		if (sc->sc_type == WM_T_82571) {
   3631 			/* Only 82571 shares port 0 */
   3632 			mask = EEMNGCTL_CFGDONE_0;
   3633 		} else
   3634 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3635 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3636 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3637 				break;
   3638 			delay(1000);
   3639 		}
   3640 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3641 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3642 				device_xname(sc->sc_dev), __func__));
   3643 		}
   3644 		break;
   3645 	case WM_T_ICH8:
   3646 	case WM_T_ICH9:
   3647 	case WM_T_ICH10:
   3648 	case WM_T_PCH:
   3649 	case WM_T_PCH2:
   3650 	case WM_T_PCH_LPT:
   3651 	case WM_T_PCH_SPT:
   3652 		delay(10*1000);
   3653 		if (sc->sc_type >= WM_T_ICH10)
   3654 			wm_lan_init_done(sc);
   3655 		else
   3656 			wm_get_auto_rd_done(sc);
   3657 
   3658 		reg = CSR_READ(sc, WMREG_STATUS);
   3659 		if ((reg & STATUS_PHYRA) != 0)
   3660 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3661 		break;
   3662 	default:
   3663 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3664 		    __func__);
   3665 		break;
   3666 	}
   3667 }
   3668 
   3669 void
   3670 wm_phy_post_reset(struct wm_softc *sc)
   3671 {
   3672 	uint32_t reg;
   3673 
   3674 	/* This function is only for ICH8 and newer. */
   3675 	if (sc->sc_type < WM_T_ICH8)
   3676 		return;
   3677 
   3678 	if (wm_phy_resetisblocked(sc)) {
   3679 		/* XXX */
   3680 		device_printf(sc->sc_dev, " PHY is blocked\n");
   3681 		return;
   3682 	}
   3683 
   3684 	/* Allow time for h/w to get to quiescent state after reset */
   3685 	delay(10*1000);
   3686 
   3687 	/* Perform any necessary post-reset workarounds */
   3688 	if (sc->sc_type == WM_T_PCH)
   3689 		wm_hv_phy_workaround_ich8lan(sc);
   3690 	if (sc->sc_type == WM_T_PCH2)
   3691 		wm_lv_phy_workaround_ich8lan(sc);
   3692 
   3693 	/* Clear the host wakeup bit after lcd reset */
   3694 	if (sc->sc_type >= WM_T_PCH) {
   3695 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3696 		    BM_PORT_GEN_CFG);
   3697 		reg &= ~BM_WUC_HOST_WU_BIT;
   3698 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3699 		    BM_PORT_GEN_CFG, reg);
   3700 	}
   3701 
   3702 	/* Configure the LCD with the extended configuration region in NVM */
   3703 	wm_init_lcd_from_nvm(sc);
   3704 
   3705 	/* Configure the LCD with the OEM bits in NVM */
   3706 }
   3707 
   3708 void
   3709 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3710 {
   3711 #if 0
   3712 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3713 	uint16_t phy_page = 0;
   3714 
   3715 	switch (sc->sc_type) {
   3716 	case WM_T_ICH8:
   3717 		if (sc->sc_phytype != WMPHY_IGP_3)
   3718 			return;
   3719 
   3720 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3721 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3722 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3723 			break;
   3724 		}
   3725 		/* FALLTHROUGH */
   3726 	case WM_T_PCH:
   3727 	case WM_T_PCH2:
   3728 	case WM_T_PCH_LPT:
   3729 	case WM_T_PCH_SPT:
   3730 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3731 		break;
   3732 	default:
   3733 		return;
   3734 	}
   3735 
   3736 	sc->phy.acquire(sc);
   3737 
   3738 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3739 	if ((reg & sw_cfg_mask) == 0)
   3740 		goto release;
   3741 
   3742 	/*
   3743 	 * Make sure HW does not configure LCD from PHY extended configuration
   3744 	 * before SW configuration
   3745 	 */
   3746 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3747 	if ((sc->sc_type < WM_T_PCH2)
   3748 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3749 		goto release;
   3750 
   3751 	/* word_addr is in DWORD */
   3752 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3753 
   3754 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3755 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3756 
   3757 	if (((sc->sc_type == WM_T_PCH)
   3758 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3759 	    || (sc->sc_type > WM_T_PCH)) {
   3760 		/*
   3761 		 * HW configures the SMBus address and LEDs when the OEM and
   3762 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3763 		 * are cleared, SW will configure them instead.
   3764 		 */
   3765 		device_printf(sc->sc_dev, "%s: need write_smbus()\n",
   3766 		    __func__);
   3767 
   3768 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3769 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3770 	}
   3771 
   3772 	/* Configure LCD from extended configuration region. */
   3773 	for (i = 0; i < cnf_size; i++) {
   3774 		uint16_t reg_data, reg_addr;
   3775 
   3776 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   3777 			goto release;
   3778 
   3779 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   3780 			goto release;
   3781 
   3782 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   3783 			phy_page = reg_data;
   3784 
   3785 		reg_addr &= IGPHY_MAXREGADDR;
   3786 		reg_addr |= phy_page;
   3787 
   3788 		sc->phy.release(sc); /* XXX */
   3789 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   3790 		sc->phy.acquire(sc); /* XXX */
   3791 	}
   3792 
   3793 release:
   3794 	sc->phy.release(sc);
   3795 	return;
   3796 #endif
   3797 }
   3798 
   3799 
   3800 /* Init hardware bits */
   3801 void
   3802 wm_initialize_hardware_bits(struct wm_softc *sc)
   3803 {
   3804 	uint32_t tarc0, tarc1, reg;
   3805 
   3806 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3807 		device_xname(sc->sc_dev), __func__));
   3808 
   3809 	/* For 82571 variant, 80003 and ICHs */
   3810 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3811 	    || (sc->sc_type >= WM_T_80003)) {
   3812 
   3813 		/* Transmit Descriptor Control 0 */
   3814 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3815 		reg |= TXDCTL_COUNT_DESC;
   3816 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3817 
   3818 		/* Transmit Descriptor Control 1 */
   3819 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3820 		reg |= TXDCTL_COUNT_DESC;
   3821 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3822 
   3823 		/* TARC0 */
   3824 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3825 		switch (sc->sc_type) {
   3826 		case WM_T_82571:
   3827 		case WM_T_82572:
   3828 		case WM_T_82573:
   3829 		case WM_T_82574:
   3830 		case WM_T_82583:
   3831 		case WM_T_80003:
   3832 			/* Clear bits 30..27 */
   3833 			tarc0 &= ~__BITS(30, 27);
   3834 			break;
   3835 		default:
   3836 			break;
   3837 		}
   3838 
   3839 		switch (sc->sc_type) {
   3840 		case WM_T_82571:
   3841 		case WM_T_82572:
   3842 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3843 
   3844 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3845 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3846 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3847 			/* 8257[12] Errata No.7 */
   3848 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3849 
   3850 			/* TARC1 bit 28 */
   3851 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3852 				tarc1 &= ~__BIT(28);
   3853 			else
   3854 				tarc1 |= __BIT(28);
   3855 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3856 
   3857 			/*
   3858 			 * 8257[12] Errata No.13
   3859 			 * Disable Dyamic Clock Gating.
   3860 			 */
   3861 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3862 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3863 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3864 			break;
   3865 		case WM_T_82573:
   3866 		case WM_T_82574:
   3867 		case WM_T_82583:
   3868 			if ((sc->sc_type == WM_T_82574)
   3869 			    || (sc->sc_type == WM_T_82583))
   3870 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3871 
   3872 			/* Extended Device Control */
   3873 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3874 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3875 			reg |= __BIT(22);	/* Set bit 22 */
   3876 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3877 
   3878 			/* Device Control */
   3879 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3880 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3881 
   3882 			/* PCIe Control Register */
   3883 			/*
   3884 			 * 82573 Errata (unknown).
   3885 			 *
   3886 			 * 82574 Errata 25 and 82583 Errata 12
   3887 			 * "Dropped Rx Packets":
   3888 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3889 			 */
   3890 			reg = CSR_READ(sc, WMREG_GCR);
   3891 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3892 			CSR_WRITE(sc, WMREG_GCR, reg);
   3893 
   3894 			if ((sc->sc_type == WM_T_82574)
   3895 			    || (sc->sc_type == WM_T_82583)) {
   3896 				/*
   3897 				 * Document says this bit must be set for
   3898 				 * proper operation.
   3899 				 */
   3900 				reg = CSR_READ(sc, WMREG_GCR);
   3901 				reg |= __BIT(22);
   3902 				CSR_WRITE(sc, WMREG_GCR, reg);
   3903 
   3904 				/*
   3905 				 * Apply workaround for hardware errata
   3906 				 * documented in errata docs Fixes issue where
   3907 				 * some error prone or unreliable PCIe
   3908 				 * completions are occurring, particularly
   3909 				 * with ASPM enabled. Without fix, issue can
   3910 				 * cause Tx timeouts.
   3911 				 */
   3912 				reg = CSR_READ(sc, WMREG_GCR2);
   3913 				reg |= __BIT(0);
   3914 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3915 			}
   3916 			break;
   3917 		case WM_T_80003:
   3918 			/* TARC0 */
   3919 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3920 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3921 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3922 
   3923 			/* TARC1 bit 28 */
   3924 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3925 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3926 				tarc1 &= ~__BIT(28);
   3927 			else
   3928 				tarc1 |= __BIT(28);
   3929 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3930 			break;
   3931 		case WM_T_ICH8:
   3932 		case WM_T_ICH9:
   3933 		case WM_T_ICH10:
   3934 		case WM_T_PCH:
   3935 		case WM_T_PCH2:
   3936 		case WM_T_PCH_LPT:
   3937 		case WM_T_PCH_SPT:
   3938 			/* TARC0 */
   3939 			if ((sc->sc_type == WM_T_ICH8)
   3940 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3941 				/* Set TARC0 bits 29 and 28 */
   3942 				tarc0 |= __BITS(29, 28);
   3943 			}
   3944 			/* Set TARC0 bits 23,24,26,27 */
   3945 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3946 
   3947 			/* CTRL_EXT */
   3948 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3949 			reg |= __BIT(22);	/* Set bit 22 */
   3950 			/*
   3951 			 * Enable PHY low-power state when MAC is at D3
   3952 			 * w/o WoL
   3953 			 */
   3954 			if (sc->sc_type >= WM_T_PCH)
   3955 				reg |= CTRL_EXT_PHYPDEN;
   3956 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3957 
   3958 			/* TARC1 */
   3959 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3960 			/* bit 28 */
   3961 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3962 				tarc1 &= ~__BIT(28);
   3963 			else
   3964 				tarc1 |= __BIT(28);
   3965 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3966 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3967 
   3968 			/* Device Status */
   3969 			if (sc->sc_type == WM_T_ICH8) {
   3970 				reg = CSR_READ(sc, WMREG_STATUS);
   3971 				reg &= ~__BIT(31);
   3972 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3973 
   3974 			}
   3975 
   3976 			/* IOSFPC */
   3977 			if (sc->sc_type == WM_T_PCH_SPT) {
   3978 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3979 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3980 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3981 			}
   3982 			/*
   3983 			 * Work-around descriptor data corruption issue during
   3984 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3985 			 * capability.
   3986 			 */
   3987 			reg = CSR_READ(sc, WMREG_RFCTL);
   3988 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3989 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3990 			break;
   3991 		default:
   3992 			break;
   3993 		}
   3994 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3995 
   3996 		switch (sc->sc_type) {
   3997 		/*
   3998 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3999 		 * Avoid RSS Hash Value bug.
   4000 		 */
   4001 		case WM_T_82571:
   4002 		case WM_T_82572:
   4003 		case WM_T_82573:
   4004 		case WM_T_80003:
   4005 		case WM_T_ICH8:
   4006 			reg = CSR_READ(sc, WMREG_RFCTL);
   4007 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4008 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4009 			break;
   4010 		case WM_T_82574:
   4011 			/* use extened Rx descriptor. */
   4012 			reg = CSR_READ(sc, WMREG_RFCTL);
   4013 			reg |= WMREG_RFCTL_EXSTEN;
   4014 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4015 			break;
   4016 		default:
   4017 			break;
   4018 		}
   4019 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4020 		/*
   4021 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4022 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4023 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4024 		 * Correctly by the Device"
   4025 		 *
   4026 		 * I354(C2000) Errata AVR53:
   4027 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4028 		 * Hang"
   4029 		 */
   4030 		reg = CSR_READ(sc, WMREG_RFCTL);
   4031 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4032 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4033 	}
   4034 }
   4035 
   4036 static uint32_t
   4037 wm_rxpbs_adjust_82580(uint32_t val)
   4038 {
   4039 	uint32_t rv = 0;
   4040 
   4041 	if (val < __arraycount(wm_82580_rxpbs_table))
   4042 		rv = wm_82580_rxpbs_table[val];
   4043 
   4044 	return rv;
   4045 }
   4046 
   4047 /*
   4048  * wm_reset_phy:
   4049  *
   4050  *	generic PHY reset function.
   4051  *	Same as e1000_phy_hw_reset_generic()
   4052  */
   4053 static void
   4054 wm_reset_phy(struct wm_softc *sc)
   4055 {
   4056 	uint32_t reg;
   4057 
   4058 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4059 		device_xname(sc->sc_dev), __func__));
   4060 	if (wm_phy_resetisblocked(sc))
   4061 		return;
   4062 
   4063 	sc->phy.acquire(sc);
   4064 
   4065 	reg = CSR_READ(sc, WMREG_CTRL);
   4066 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4067 	CSR_WRITE_FLUSH(sc);
   4068 
   4069 	delay(sc->phy.reset_delay_us);
   4070 
   4071 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4072 	CSR_WRITE_FLUSH(sc);
   4073 
   4074 	delay(150);
   4075 
   4076 	sc->phy.release(sc);
   4077 
   4078 	wm_get_cfg_done(sc);
   4079 	wm_phy_post_reset(sc);
   4080 }
   4081 
   4082 static void
   4083 wm_flush_desc_rings(struct wm_softc *sc)
   4084 {
   4085 	pcireg_t preg;
   4086 	uint32_t reg;
   4087 	struct wm_txqueue *txq;
   4088 	wiseman_txdesc_t *txd;
   4089 	int nexttx;
   4090 	uint32_t rctl;
   4091 
   4092 	/* First, disable MULR fix in FEXTNVM11 */
   4093 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4094 	reg |= FEXTNVM11_DIS_MULRFIX;
   4095 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4096 
   4097 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4098 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4099 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4100 		return;
   4101 
   4102 	/* TX */
   4103 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4104 	    device_xname(sc->sc_dev), preg, reg);
   4105 	reg = CSR_READ(sc, WMREG_TCTL);
   4106 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4107 
   4108 	txq = &sc->sc_queue[0].wmq_txq;
   4109 	nexttx = txq->txq_next;
   4110 	txd = &txq->txq_descs[nexttx];
   4111 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4112 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   4113 	txd->wtx_fields.wtxu_status = 0;
   4114 	txd->wtx_fields.wtxu_options = 0;
   4115 	txd->wtx_fields.wtxu_vlan = 0;
   4116 
   4117 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4118 	    BUS_SPACE_BARRIER_WRITE);
   4119 
   4120 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4121 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4122 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4123 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4124 	delay(250);
   4125 
   4126 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4127 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4128 		return;
   4129 
   4130 	/* RX */
   4131 	printf("%s: Need RX flush (reg = %08x)\n",
   4132 	    device_xname(sc->sc_dev), preg);
   4133 	rctl = CSR_READ(sc, WMREG_RCTL);
   4134 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4135 	CSR_WRITE_FLUSH(sc);
   4136 	delay(150);
   4137 
   4138 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4139 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4140 	reg &= 0xffffc000;
   4141 	/*
   4142 	 * update thresholds: prefetch threshold to 31, host threshold
   4143 	 * to 1 and make sure the granularity is "descriptors" and not
   4144 	 * "cache lines"
   4145 	 */
   4146 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4147 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4148 
   4149 	/*
   4150 	 * momentarily enable the RX ring for the changes to take
   4151 	 * effect
   4152 	 */
   4153 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4154 	CSR_WRITE_FLUSH(sc);
   4155 	delay(150);
   4156 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4157 }
   4158 
   4159 /*
   4160  * wm_reset:
   4161  *
   4162  *	Reset the i82542 chip.
   4163  */
   4164 static void
   4165 wm_reset(struct wm_softc *sc)
   4166 {
   4167 	int phy_reset = 0;
   4168 	int i, error = 0;
   4169 	uint32_t reg;
   4170 
   4171 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4172 		device_xname(sc->sc_dev), __func__));
   4173 	KASSERT(sc->sc_type != 0);
   4174 
   4175 	/*
   4176 	 * Allocate on-chip memory according to the MTU size.
   4177 	 * The Packet Buffer Allocation register must be written
   4178 	 * before the chip is reset.
   4179 	 */
   4180 	switch (sc->sc_type) {
   4181 	case WM_T_82547:
   4182 	case WM_T_82547_2:
   4183 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4184 		    PBA_22K : PBA_30K;
   4185 		for (i = 0; i < sc->sc_nqueues; i++) {
   4186 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4187 			txq->txq_fifo_head = 0;
   4188 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4189 			txq->txq_fifo_size =
   4190 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4191 			txq->txq_fifo_stall = 0;
   4192 		}
   4193 		break;
   4194 	case WM_T_82571:
   4195 	case WM_T_82572:
   4196 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4197 	case WM_T_80003:
   4198 		sc->sc_pba = PBA_32K;
   4199 		break;
   4200 	case WM_T_82573:
   4201 		sc->sc_pba = PBA_12K;
   4202 		break;
   4203 	case WM_T_82574:
   4204 	case WM_T_82583:
   4205 		sc->sc_pba = PBA_20K;
   4206 		break;
   4207 	case WM_T_82576:
   4208 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4209 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4210 		break;
   4211 	case WM_T_82580:
   4212 	case WM_T_I350:
   4213 	case WM_T_I354:
   4214 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4215 		break;
   4216 	case WM_T_I210:
   4217 	case WM_T_I211:
   4218 		sc->sc_pba = PBA_34K;
   4219 		break;
   4220 	case WM_T_ICH8:
   4221 		/* Workaround for a bit corruption issue in FIFO memory */
   4222 		sc->sc_pba = PBA_8K;
   4223 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4224 		break;
   4225 	case WM_T_ICH9:
   4226 	case WM_T_ICH10:
   4227 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4228 		    PBA_14K : PBA_10K;
   4229 		break;
   4230 	case WM_T_PCH:
   4231 	case WM_T_PCH2:
   4232 	case WM_T_PCH_LPT:
   4233 	case WM_T_PCH_SPT:
   4234 		sc->sc_pba = PBA_26K;
   4235 		break;
   4236 	default:
   4237 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4238 		    PBA_40K : PBA_48K;
   4239 		break;
   4240 	}
   4241 	/*
   4242 	 * Only old or non-multiqueue devices have the PBA register
   4243 	 * XXX Need special handling for 82575.
   4244 	 */
   4245 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4246 	    || (sc->sc_type == WM_T_82575))
   4247 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4248 
   4249 	/* Prevent the PCI-E bus from sticking */
   4250 	if (sc->sc_flags & WM_F_PCIE) {
   4251 		int timeout = 800;
   4252 
   4253 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4254 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4255 
   4256 		while (timeout--) {
   4257 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4258 			    == 0)
   4259 				break;
   4260 			delay(100);
   4261 		}
   4262 		if (timeout == 0)
   4263 			device_printf(sc->sc_dev,
   4264 			    "failed to disable busmastering\n");
   4265 	}
   4266 
   4267 	/* Set the completion timeout for interface */
   4268 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4269 	    || (sc->sc_type == WM_T_82580)
   4270 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4271 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4272 		wm_set_pcie_completion_timeout(sc);
   4273 
   4274 	/* Clear interrupt */
   4275 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4276 	if (wm_is_using_msix(sc)) {
   4277 		if (sc->sc_type != WM_T_82574) {
   4278 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4279 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4280 		} else {
   4281 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4282 		}
   4283 	}
   4284 
   4285 	/* Stop the transmit and receive processes. */
   4286 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4287 	sc->sc_rctl &= ~RCTL_EN;
   4288 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4289 	CSR_WRITE_FLUSH(sc);
   4290 
   4291 	/* XXX set_tbi_sbp_82543() */
   4292 
   4293 	delay(10*1000);
   4294 
   4295 	/* Must acquire the MDIO ownership before MAC reset */
   4296 	switch (sc->sc_type) {
   4297 	case WM_T_82573:
   4298 	case WM_T_82574:
   4299 	case WM_T_82583:
   4300 		error = wm_get_hw_semaphore_82573(sc);
   4301 		break;
   4302 	default:
   4303 		break;
   4304 	}
   4305 
   4306 	/*
   4307 	 * 82541 Errata 29? & 82547 Errata 28?
   4308 	 * See also the description about PHY_RST bit in CTRL register
   4309 	 * in 8254x_GBe_SDM.pdf.
   4310 	 */
   4311 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4312 		CSR_WRITE(sc, WMREG_CTRL,
   4313 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4314 		CSR_WRITE_FLUSH(sc);
   4315 		delay(5000);
   4316 	}
   4317 
   4318 	switch (sc->sc_type) {
   4319 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4320 	case WM_T_82541:
   4321 	case WM_T_82541_2:
   4322 	case WM_T_82547:
   4323 	case WM_T_82547_2:
   4324 		/*
   4325 		 * On some chipsets, a reset through a memory-mapped write
   4326 		 * cycle can cause the chip to reset before completing the
   4327 		 * write cycle.  This causes major headache that can be
   4328 		 * avoided by issuing the reset via indirect register writes
   4329 		 * through I/O space.
   4330 		 *
   4331 		 * So, if we successfully mapped the I/O BAR at attach time,
   4332 		 * use that.  Otherwise, try our luck with a memory-mapped
   4333 		 * reset.
   4334 		 */
   4335 		if (sc->sc_flags & WM_F_IOH_VALID)
   4336 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4337 		else
   4338 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4339 		break;
   4340 	case WM_T_82545_3:
   4341 	case WM_T_82546_3:
   4342 		/* Use the shadow control register on these chips. */
   4343 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4344 		break;
   4345 	case WM_T_80003:
   4346 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4347 		sc->phy.acquire(sc);
   4348 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4349 		sc->phy.release(sc);
   4350 		break;
   4351 	case WM_T_ICH8:
   4352 	case WM_T_ICH9:
   4353 	case WM_T_ICH10:
   4354 	case WM_T_PCH:
   4355 	case WM_T_PCH2:
   4356 	case WM_T_PCH_LPT:
   4357 	case WM_T_PCH_SPT:
   4358 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4359 		if (wm_phy_resetisblocked(sc) == false) {
   4360 			/*
   4361 			 * Gate automatic PHY configuration by hardware on
   4362 			 * non-managed 82579
   4363 			 */
   4364 			if ((sc->sc_type == WM_T_PCH2)
   4365 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4366 				== 0))
   4367 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4368 
   4369 			reg |= CTRL_PHY_RESET;
   4370 			phy_reset = 1;
   4371 		} else
   4372 			printf("XXX reset is blocked!!!\n");
   4373 		sc->phy.acquire(sc);
   4374 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4375 		/* Don't insert a completion barrier when reset */
   4376 		delay(20*1000);
   4377 		mutex_exit(sc->sc_ich_phymtx);
   4378 		break;
   4379 	case WM_T_82580:
   4380 	case WM_T_I350:
   4381 	case WM_T_I354:
   4382 	case WM_T_I210:
   4383 	case WM_T_I211:
   4384 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4385 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4386 			CSR_WRITE_FLUSH(sc);
   4387 		delay(5000);
   4388 		break;
   4389 	case WM_T_82542_2_0:
   4390 	case WM_T_82542_2_1:
   4391 	case WM_T_82543:
   4392 	case WM_T_82540:
   4393 	case WM_T_82545:
   4394 	case WM_T_82546:
   4395 	case WM_T_82571:
   4396 	case WM_T_82572:
   4397 	case WM_T_82573:
   4398 	case WM_T_82574:
   4399 	case WM_T_82575:
   4400 	case WM_T_82576:
   4401 	case WM_T_82583:
   4402 	default:
   4403 		/* Everything else can safely use the documented method. */
   4404 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4405 		break;
   4406 	}
   4407 
   4408 	/* Must release the MDIO ownership after MAC reset */
   4409 	switch (sc->sc_type) {
   4410 	case WM_T_82573:
   4411 	case WM_T_82574:
   4412 	case WM_T_82583:
   4413 		if (error == 0)
   4414 			wm_put_hw_semaphore_82573(sc);
   4415 		break;
   4416 	default:
   4417 		break;
   4418 	}
   4419 
   4420 	if (phy_reset != 0)
   4421 		wm_get_cfg_done(sc);
   4422 
   4423 	/* reload EEPROM */
   4424 	switch (sc->sc_type) {
   4425 	case WM_T_82542_2_0:
   4426 	case WM_T_82542_2_1:
   4427 	case WM_T_82543:
   4428 	case WM_T_82544:
   4429 		delay(10);
   4430 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4431 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4432 		CSR_WRITE_FLUSH(sc);
   4433 		delay(2000);
   4434 		break;
   4435 	case WM_T_82540:
   4436 	case WM_T_82545:
   4437 	case WM_T_82545_3:
   4438 	case WM_T_82546:
   4439 	case WM_T_82546_3:
   4440 		delay(5*1000);
   4441 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4442 		break;
   4443 	case WM_T_82541:
   4444 	case WM_T_82541_2:
   4445 	case WM_T_82547:
   4446 	case WM_T_82547_2:
   4447 		delay(20000);
   4448 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4449 		break;
   4450 	case WM_T_82571:
   4451 	case WM_T_82572:
   4452 	case WM_T_82573:
   4453 	case WM_T_82574:
   4454 	case WM_T_82583:
   4455 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4456 			delay(10);
   4457 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4458 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4459 			CSR_WRITE_FLUSH(sc);
   4460 		}
   4461 		/* check EECD_EE_AUTORD */
   4462 		wm_get_auto_rd_done(sc);
   4463 		/*
   4464 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4465 		 * is set.
   4466 		 */
   4467 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4468 		    || (sc->sc_type == WM_T_82583))
   4469 			delay(25*1000);
   4470 		break;
   4471 	case WM_T_82575:
   4472 	case WM_T_82576:
   4473 	case WM_T_82580:
   4474 	case WM_T_I350:
   4475 	case WM_T_I354:
   4476 	case WM_T_I210:
   4477 	case WM_T_I211:
   4478 	case WM_T_80003:
   4479 		/* check EECD_EE_AUTORD */
   4480 		wm_get_auto_rd_done(sc);
   4481 		break;
   4482 	case WM_T_ICH8:
   4483 	case WM_T_ICH9:
   4484 	case WM_T_ICH10:
   4485 	case WM_T_PCH:
   4486 	case WM_T_PCH2:
   4487 	case WM_T_PCH_LPT:
   4488 	case WM_T_PCH_SPT:
   4489 		break;
   4490 	default:
   4491 		panic("%s: unknown type\n", __func__);
   4492 	}
   4493 
   4494 	/* Check whether EEPROM is present or not */
   4495 	switch (sc->sc_type) {
   4496 	case WM_T_82575:
   4497 	case WM_T_82576:
   4498 	case WM_T_82580:
   4499 	case WM_T_I350:
   4500 	case WM_T_I354:
   4501 	case WM_T_ICH8:
   4502 	case WM_T_ICH9:
   4503 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4504 			/* Not found */
   4505 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4506 			if (sc->sc_type == WM_T_82575)
   4507 				wm_reset_init_script_82575(sc);
   4508 		}
   4509 		break;
   4510 	default:
   4511 		break;
   4512 	}
   4513 
   4514 	if (phy_reset != 0)
   4515 		wm_phy_post_reset(sc);
   4516 
   4517 	if ((sc->sc_type == WM_T_82580)
   4518 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4519 		/* clear global device reset status bit */
   4520 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4521 	}
   4522 
   4523 	/* Clear any pending interrupt events. */
   4524 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4525 	reg = CSR_READ(sc, WMREG_ICR);
   4526 	if (wm_is_using_msix(sc)) {
   4527 		if (sc->sc_type != WM_T_82574) {
   4528 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4529 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4530 		} else
   4531 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4532 	}
   4533 
   4534 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4535 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4536 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4537 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4538 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4539 		reg |= KABGTXD_BGSQLBIAS;
   4540 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4541 	}
   4542 
   4543 	/* reload sc_ctrl */
   4544 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4545 
   4546 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4547 		wm_set_eee_i350(sc);
   4548 
   4549 	/*
   4550 	 * For PCH, this write will make sure that any noise will be detected
   4551 	 * as a CRC error and be dropped rather than show up as a bad packet
   4552 	 * to the DMA engine
   4553 	 */
   4554 	if (sc->sc_type == WM_T_PCH)
   4555 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4556 
   4557 	if (sc->sc_type >= WM_T_82544)
   4558 		CSR_WRITE(sc, WMREG_WUC, 0);
   4559 
   4560 	wm_reset_mdicnfg_82580(sc);
   4561 
   4562 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4563 		wm_pll_workaround_i210(sc);
   4564 }
   4565 
   4566 /*
   4567  * wm_add_rxbuf:
   4568  *
   4569  *	Add a receive buffer to the indiciated descriptor.
   4570  */
   4571 static int
   4572 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4573 {
   4574 	struct wm_softc *sc = rxq->rxq_sc;
   4575 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4576 	struct mbuf *m;
   4577 	int error;
   4578 
   4579 	KASSERT(mutex_owned(rxq->rxq_lock));
   4580 
   4581 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4582 	if (m == NULL)
   4583 		return ENOBUFS;
   4584 
   4585 	MCLGET(m, M_DONTWAIT);
   4586 	if ((m->m_flags & M_EXT) == 0) {
   4587 		m_freem(m);
   4588 		return ENOBUFS;
   4589 	}
   4590 
   4591 	if (rxs->rxs_mbuf != NULL)
   4592 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4593 
   4594 	rxs->rxs_mbuf = m;
   4595 
   4596 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4597 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4598 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4599 	if (error) {
   4600 		/* XXX XXX XXX */
   4601 		aprint_error_dev(sc->sc_dev,
   4602 		    "unable to load rx DMA map %d, error = %d\n",
   4603 		    idx, error);
   4604 		panic("wm_add_rxbuf");
   4605 	}
   4606 
   4607 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4608 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4609 
   4610 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4611 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4612 			wm_init_rxdesc(rxq, idx);
   4613 	} else
   4614 		wm_init_rxdesc(rxq, idx);
   4615 
   4616 	return 0;
   4617 }
   4618 
   4619 /*
   4620  * wm_rxdrain:
   4621  *
   4622  *	Drain the receive queue.
   4623  */
   4624 static void
   4625 wm_rxdrain(struct wm_rxqueue *rxq)
   4626 {
   4627 	struct wm_softc *sc = rxq->rxq_sc;
   4628 	struct wm_rxsoft *rxs;
   4629 	int i;
   4630 
   4631 	KASSERT(mutex_owned(rxq->rxq_lock));
   4632 
   4633 	for (i = 0; i < WM_NRXDESC; i++) {
   4634 		rxs = &rxq->rxq_soft[i];
   4635 		if (rxs->rxs_mbuf != NULL) {
   4636 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4637 			m_freem(rxs->rxs_mbuf);
   4638 			rxs->rxs_mbuf = NULL;
   4639 		}
   4640 	}
   4641 }
   4642 
   4643 
   4644 /*
   4645  * XXX copy from FreeBSD's sys/net/rss_config.c
   4646  */
   4647 /*
   4648  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4649  * effectiveness may be limited by algorithm choice and available entropy
   4650  * during the boot.
   4651  *
   4652  * XXXRW: And that we don't randomize it yet!
   4653  *
   4654  * This is the default Microsoft RSS specification key which is also
   4655  * the Chelsio T5 firmware default key.
   4656  */
   4657 #define RSS_KEYSIZE 40
   4658 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4659 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4660 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4661 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4662 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4663 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4664 };
   4665 
   4666 /*
   4667  * Caller must pass an array of size sizeof(rss_key).
   4668  *
   4669  * XXX
   4670  * As if_ixgbe may use this function, this function should not be
   4671  * if_wm specific function.
   4672  */
   4673 static void
   4674 wm_rss_getkey(uint8_t *key)
   4675 {
   4676 
   4677 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4678 }
   4679 
   4680 /*
   4681  * Setup registers for RSS.
   4682  *
   4683  * XXX not yet VMDq support
   4684  */
   4685 static void
   4686 wm_init_rss(struct wm_softc *sc)
   4687 {
   4688 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4689 	int i;
   4690 
   4691 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4692 
   4693 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4694 		int qid, reta_ent;
   4695 
   4696 		qid  = i % sc->sc_nqueues;
   4697 		switch(sc->sc_type) {
   4698 		case WM_T_82574:
   4699 			reta_ent = __SHIFTIN(qid,
   4700 			    RETA_ENT_QINDEX_MASK_82574);
   4701 			break;
   4702 		case WM_T_82575:
   4703 			reta_ent = __SHIFTIN(qid,
   4704 			    RETA_ENT_QINDEX1_MASK_82575);
   4705 			break;
   4706 		default:
   4707 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4708 			break;
   4709 		}
   4710 
   4711 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4712 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4713 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4714 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4715 	}
   4716 
   4717 	wm_rss_getkey((uint8_t *)rss_key);
   4718 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4719 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4720 
   4721 	if (sc->sc_type == WM_T_82574)
   4722 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4723 	else
   4724 		mrqc = MRQC_ENABLE_RSS_MQ;
   4725 
   4726 	/*
   4727 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4728 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4729 	 */
   4730 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4731 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4732 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4733 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4734 
   4735 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4736 }
   4737 
   4738 /*
   4739  * Adjust TX and RX queue numbers which the system actulally uses.
   4740  *
   4741  * The numbers are affected by below parameters.
   4742  *     - The nubmer of hardware queues
   4743  *     - The number of MSI-X vectors (= "nvectors" argument)
   4744  *     - ncpu
   4745  */
   4746 static void
   4747 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4748 {
   4749 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4750 
   4751 	if (nvectors < 2) {
   4752 		sc->sc_nqueues = 1;
   4753 		return;
   4754 	}
   4755 
   4756 	switch(sc->sc_type) {
   4757 	case WM_T_82572:
   4758 		hw_ntxqueues = 2;
   4759 		hw_nrxqueues = 2;
   4760 		break;
   4761 	case WM_T_82574:
   4762 		hw_ntxqueues = 2;
   4763 		hw_nrxqueues = 2;
   4764 		break;
   4765 	case WM_T_82575:
   4766 		hw_ntxqueues = 4;
   4767 		hw_nrxqueues = 4;
   4768 		break;
   4769 	case WM_T_82576:
   4770 		hw_ntxqueues = 16;
   4771 		hw_nrxqueues = 16;
   4772 		break;
   4773 	case WM_T_82580:
   4774 	case WM_T_I350:
   4775 	case WM_T_I354:
   4776 		hw_ntxqueues = 8;
   4777 		hw_nrxqueues = 8;
   4778 		break;
   4779 	case WM_T_I210:
   4780 		hw_ntxqueues = 4;
   4781 		hw_nrxqueues = 4;
   4782 		break;
   4783 	case WM_T_I211:
   4784 		hw_ntxqueues = 2;
   4785 		hw_nrxqueues = 2;
   4786 		break;
   4787 		/*
   4788 		 * As below ethernet controllers does not support MSI-X,
   4789 		 * this driver let them not use multiqueue.
   4790 		 *     - WM_T_80003
   4791 		 *     - WM_T_ICH8
   4792 		 *     - WM_T_ICH9
   4793 		 *     - WM_T_ICH10
   4794 		 *     - WM_T_PCH
   4795 		 *     - WM_T_PCH2
   4796 		 *     - WM_T_PCH_LPT
   4797 		 */
   4798 	default:
   4799 		hw_ntxqueues = 1;
   4800 		hw_nrxqueues = 1;
   4801 		break;
   4802 	}
   4803 
   4804 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4805 
   4806 	/*
   4807 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4808 	 * the number of queues used actually.
   4809 	 */
   4810 	if (nvectors < hw_nqueues + 1) {
   4811 		sc->sc_nqueues = nvectors - 1;
   4812 	} else {
   4813 		sc->sc_nqueues = hw_nqueues;
   4814 	}
   4815 
   4816 	/*
   4817 	 * As queues more then cpus cannot improve scaling, we limit
   4818 	 * the number of queues used actually.
   4819 	 */
   4820 	if (ncpu < sc->sc_nqueues)
   4821 		sc->sc_nqueues = ncpu;
   4822 }
   4823 
   4824 static inline bool
   4825 wm_is_using_msix(struct wm_softc *sc)
   4826 {
   4827 
   4828 	return (sc->sc_nintrs > 1);
   4829 }
   4830 
   4831 static inline bool
   4832 wm_is_using_multiqueue(struct wm_softc *sc)
   4833 {
   4834 
   4835 	return (sc->sc_nqueues > 1);
   4836 }
   4837 
   4838 static int
   4839 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4840 {
   4841 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4842 	wmq->wmq_id = qidx;
   4843 	wmq->wmq_intr_idx = intr_idx;
   4844 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4845 #ifdef WM_MPSAFE
   4846 	    | SOFTINT_MPSAFE
   4847 #endif
   4848 	    , wm_handle_queue, wmq);
   4849 	if (wmq->wmq_si != NULL)
   4850 		return 0;
   4851 
   4852 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4853 	    wmq->wmq_id);
   4854 
   4855 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4856 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4857 	return ENOMEM;
   4858 }
   4859 
   4860 /*
   4861  * Both single interrupt MSI and INTx can use this function.
   4862  */
   4863 static int
   4864 wm_setup_legacy(struct wm_softc *sc)
   4865 {
   4866 	pci_chipset_tag_t pc = sc->sc_pc;
   4867 	const char *intrstr = NULL;
   4868 	char intrbuf[PCI_INTRSTR_LEN];
   4869 	int error;
   4870 
   4871 	error = wm_alloc_txrx_queues(sc);
   4872 	if (error) {
   4873 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4874 		    error);
   4875 		return ENOMEM;
   4876 	}
   4877 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4878 	    sizeof(intrbuf));
   4879 #ifdef WM_MPSAFE
   4880 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4881 #endif
   4882 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4883 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4884 	if (sc->sc_ihs[0] == NULL) {
   4885 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4886 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4887 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4888 		return ENOMEM;
   4889 	}
   4890 
   4891 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4892 	sc->sc_nintrs = 1;
   4893 
   4894 	return wm_softint_establish(sc, 0, 0);
   4895 }
   4896 
   4897 static int
   4898 wm_setup_msix(struct wm_softc *sc)
   4899 {
   4900 	void *vih;
   4901 	kcpuset_t *affinity;
   4902 	int qidx, error, intr_idx, txrx_established;
   4903 	pci_chipset_tag_t pc = sc->sc_pc;
   4904 	const char *intrstr = NULL;
   4905 	char intrbuf[PCI_INTRSTR_LEN];
   4906 	char intr_xname[INTRDEVNAMEBUF];
   4907 
   4908 	if (sc->sc_nqueues < ncpu) {
   4909 		/*
   4910 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4911 		 * interrupts start from CPU#1.
   4912 		 */
   4913 		sc->sc_affinity_offset = 1;
   4914 	} else {
   4915 		/*
   4916 		 * In this case, this device use all CPUs. So, we unify
   4917 		 * affinitied cpu_index to msix vector number for readability.
   4918 		 */
   4919 		sc->sc_affinity_offset = 0;
   4920 	}
   4921 
   4922 	error = wm_alloc_txrx_queues(sc);
   4923 	if (error) {
   4924 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4925 		    error);
   4926 		return ENOMEM;
   4927 	}
   4928 
   4929 	kcpuset_create(&affinity, false);
   4930 	intr_idx = 0;
   4931 
   4932 	/*
   4933 	 * TX and RX
   4934 	 */
   4935 	txrx_established = 0;
   4936 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4937 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4938 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4939 
   4940 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4941 		    sizeof(intrbuf));
   4942 #ifdef WM_MPSAFE
   4943 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4944 		    PCI_INTR_MPSAFE, true);
   4945 #endif
   4946 		memset(intr_xname, 0, sizeof(intr_xname));
   4947 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4948 		    device_xname(sc->sc_dev), qidx);
   4949 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4950 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4951 		if (vih == NULL) {
   4952 			aprint_error_dev(sc->sc_dev,
   4953 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4954 			    intrstr ? " at " : "",
   4955 			    intrstr ? intrstr : "");
   4956 
   4957 			goto fail;
   4958 		}
   4959 		kcpuset_zero(affinity);
   4960 		/* Round-robin affinity */
   4961 		kcpuset_set(affinity, affinity_to);
   4962 		error = interrupt_distribute(vih, affinity, NULL);
   4963 		if (error == 0) {
   4964 			aprint_normal_dev(sc->sc_dev,
   4965 			    "for TX and RX interrupting at %s affinity to %u\n",
   4966 			    intrstr, affinity_to);
   4967 		} else {
   4968 			aprint_normal_dev(sc->sc_dev,
   4969 			    "for TX and RX interrupting at %s\n", intrstr);
   4970 		}
   4971 		sc->sc_ihs[intr_idx] = vih;
   4972 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   4973 			goto fail;
   4974 		txrx_established++;
   4975 		intr_idx++;
   4976 	}
   4977 
   4978 	/*
   4979 	 * LINK
   4980 	 */
   4981 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4982 	    sizeof(intrbuf));
   4983 #ifdef WM_MPSAFE
   4984 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4985 #endif
   4986 	memset(intr_xname, 0, sizeof(intr_xname));
   4987 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4988 	    device_xname(sc->sc_dev));
   4989 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4990 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4991 	if (vih == NULL) {
   4992 		aprint_error_dev(sc->sc_dev,
   4993 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4994 		    intrstr ? " at " : "",
   4995 		    intrstr ? intrstr : "");
   4996 
   4997 		goto fail;
   4998 	}
   4999 	/* keep default affinity to LINK interrupt */
   5000 	aprint_normal_dev(sc->sc_dev,
   5001 	    "for LINK interrupting at %s\n", intrstr);
   5002 	sc->sc_ihs[intr_idx] = vih;
   5003 	sc->sc_link_intr_idx = intr_idx;
   5004 
   5005 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5006 	kcpuset_destroy(affinity);
   5007 	return 0;
   5008 
   5009  fail:
   5010 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5011 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5012 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5013 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5014 	}
   5015 
   5016 	kcpuset_destroy(affinity);
   5017 	return ENOMEM;
   5018 }
   5019 
   5020 static void
   5021 wm_turnon(struct wm_softc *sc)
   5022 {
   5023 	int i;
   5024 
   5025 	KASSERT(WM_CORE_LOCKED(sc));
   5026 
   5027 	/*
   5028 	 * must unset stopping flags in ascending order.
   5029 	 */
   5030 	for(i = 0; i < sc->sc_nqueues; i++) {
   5031 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5032 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5033 
   5034 		mutex_enter(txq->txq_lock);
   5035 		txq->txq_stopping = false;
   5036 		mutex_exit(txq->txq_lock);
   5037 
   5038 		mutex_enter(rxq->rxq_lock);
   5039 		rxq->rxq_stopping = false;
   5040 		mutex_exit(rxq->rxq_lock);
   5041 	}
   5042 
   5043 	sc->sc_core_stopping = false;
   5044 }
   5045 
   5046 static void
   5047 wm_turnoff(struct wm_softc *sc)
   5048 {
   5049 	int i;
   5050 
   5051 	KASSERT(WM_CORE_LOCKED(sc));
   5052 
   5053 	sc->sc_core_stopping = true;
   5054 
   5055 	/*
   5056 	 * must set stopping flags in ascending order.
   5057 	 */
   5058 	for(i = 0; i < sc->sc_nqueues; i++) {
   5059 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5060 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5061 
   5062 		mutex_enter(rxq->rxq_lock);
   5063 		rxq->rxq_stopping = true;
   5064 		mutex_exit(rxq->rxq_lock);
   5065 
   5066 		mutex_enter(txq->txq_lock);
   5067 		txq->txq_stopping = true;
   5068 		mutex_exit(txq->txq_lock);
   5069 	}
   5070 }
   5071 
   5072 /*
   5073  * write interrupt interval value to ITR or EITR
   5074  */
   5075 static void
   5076 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5077 {
   5078 
   5079 	if (!wmq->wmq_set_itr)
   5080 		return;
   5081 
   5082 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5083 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5084 
   5085 		/*
   5086 		 * 82575 doesn't have CNT_INGR field.
   5087 		 * So, overwrite counter field by software.
   5088 		 */
   5089 		if (sc->sc_type == WM_T_82575)
   5090 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5091 		else
   5092 			eitr |= EITR_CNT_INGR;
   5093 
   5094 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5095 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5096 		/*
   5097 		 * 82574 has both ITR and EITR. SET EITR when we use
   5098 		 * the multi queue function with MSI-X.
   5099 		 */
   5100 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5101 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5102 	} else {
   5103 		KASSERT(wmq->wmq_id == 0);
   5104 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5105 	}
   5106 
   5107 	wmq->wmq_set_itr = false;
   5108 }
   5109 
   5110 /*
   5111  * TODO
   5112  * Below dynamic calculation of itr is almost the same as linux igb,
   5113  * however it does not fit to wm(4). So, we will have been disable AIM
   5114  * until we will find appropriate calculation of itr.
   5115  */
   5116 /*
   5117  * calculate interrupt interval value to be going to write register in
   5118  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5119  */
   5120 static void
   5121 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5122 {
   5123 #ifdef NOTYET
   5124 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5125 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5126 	uint32_t avg_size = 0;
   5127 	uint32_t new_itr;
   5128 
   5129 	if (rxq->rxq_packets)
   5130 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5131 	if (txq->txq_packets)
   5132 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5133 
   5134 	if (avg_size == 0) {
   5135 		new_itr = 450; /* restore default value */
   5136 		goto out;
   5137 	}
   5138 
   5139 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5140 	avg_size += 24;
   5141 
   5142 	/* Don't starve jumbo frames */
   5143 	avg_size = min(avg_size, 3000);
   5144 
   5145 	/* Give a little boost to mid-size frames */
   5146 	if ((avg_size > 300) && (avg_size < 1200))
   5147 		new_itr = avg_size / 3;
   5148 	else
   5149 		new_itr = avg_size / 2;
   5150 
   5151 out:
   5152 	/*
   5153 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5154 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5155 	 */
   5156 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5157 		new_itr *= 4;
   5158 
   5159 	if (new_itr != wmq->wmq_itr) {
   5160 		wmq->wmq_itr = new_itr;
   5161 		wmq->wmq_set_itr = true;
   5162 	} else
   5163 		wmq->wmq_set_itr = false;
   5164 
   5165 	rxq->rxq_packets = 0;
   5166 	rxq->rxq_bytes = 0;
   5167 	txq->txq_packets = 0;
   5168 	txq->txq_bytes = 0;
   5169 #endif
   5170 }
   5171 
   5172 /*
   5173  * wm_init:		[ifnet interface function]
   5174  *
   5175  *	Initialize the interface.
   5176  */
   5177 static int
   5178 wm_init(struct ifnet *ifp)
   5179 {
   5180 	struct wm_softc *sc = ifp->if_softc;
   5181 	int ret;
   5182 
   5183 	WM_CORE_LOCK(sc);
   5184 	ret = wm_init_locked(ifp);
   5185 	WM_CORE_UNLOCK(sc);
   5186 
   5187 	return ret;
   5188 }
   5189 
   5190 static int
   5191 wm_init_locked(struct ifnet *ifp)
   5192 {
   5193 	struct wm_softc *sc = ifp->if_softc;
   5194 	int i, j, trynum, error = 0;
   5195 	uint32_t reg;
   5196 
   5197 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5198 		device_xname(sc->sc_dev), __func__));
   5199 	KASSERT(WM_CORE_LOCKED(sc));
   5200 
   5201 	/*
   5202 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5203 	 * There is a small but measurable benefit to avoiding the adjusment
   5204 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5205 	 * on such platforms.  One possibility is that the DMA itself is
   5206 	 * slightly more efficient if the front of the entire packet (instead
   5207 	 * of the front of the headers) is aligned.
   5208 	 *
   5209 	 * Note we must always set align_tweak to 0 if we are using
   5210 	 * jumbo frames.
   5211 	 */
   5212 #ifdef __NO_STRICT_ALIGNMENT
   5213 	sc->sc_align_tweak = 0;
   5214 #else
   5215 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5216 		sc->sc_align_tweak = 0;
   5217 	else
   5218 		sc->sc_align_tweak = 2;
   5219 #endif /* __NO_STRICT_ALIGNMENT */
   5220 
   5221 	/* Cancel any pending I/O. */
   5222 	wm_stop_locked(ifp, 0);
   5223 
   5224 	/* update statistics before reset */
   5225 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5226 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5227 
   5228 	/* PCH_SPT hardware workaround */
   5229 	if (sc->sc_type == WM_T_PCH_SPT)
   5230 		wm_flush_desc_rings(sc);
   5231 
   5232 	/* Reset the chip to a known state. */
   5233 	wm_reset(sc);
   5234 
   5235 	/*
   5236 	 * AMT based hardware can now take control from firmware
   5237 	 * Do this after reset.
   5238 	 */
   5239 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5240 		wm_get_hw_control(sc);
   5241 
   5242 	if ((sc->sc_type == WM_T_PCH_SPT) &&
   5243 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5244 		wm_legacy_irq_quirk_spt(sc);
   5245 
   5246 	/* Init hardware bits */
   5247 	wm_initialize_hardware_bits(sc);
   5248 
   5249 	/* Reset the PHY. */
   5250 	if (sc->sc_flags & WM_F_HAS_MII)
   5251 		wm_gmii_reset(sc);
   5252 
   5253 	/* Calculate (E)ITR value */
   5254 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5255 		/*
   5256 		 * For NEWQUEUE's EITR (except for 82575).
   5257 		 * 82575's EITR should be set same throttling value as other
   5258 		 * old controllers' ITR because the interrupt/sec calculation
   5259 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5260 		 *
   5261 		 * 82574's EITR should be set same throttling value as ITR.
   5262 		 *
   5263 		 * For N interrupts/sec, set this value to:
   5264 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5265 		 */
   5266 		sc->sc_itr_init = 450;
   5267 	} else if (sc->sc_type >= WM_T_82543) {
   5268 		/*
   5269 		 * Set up the interrupt throttling register (units of 256ns)
   5270 		 * Note that a footnote in Intel's documentation says this
   5271 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5272 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5273 		 * that that is also true for the 1024ns units of the other
   5274 		 * interrupt-related timer registers -- so, really, we ought
   5275 		 * to divide this value by 4 when the link speed is low.
   5276 		 *
   5277 		 * XXX implement this division at link speed change!
   5278 		 */
   5279 
   5280 		/*
   5281 		 * For N interrupts/sec, set this value to:
   5282 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5283 		 * absolute and packet timer values to this value
   5284 		 * divided by 4 to get "simple timer" behavior.
   5285 		 */
   5286 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5287 	}
   5288 
   5289 	error = wm_init_txrx_queues(sc);
   5290 	if (error)
   5291 		goto out;
   5292 
   5293 	/*
   5294 	 * Clear out the VLAN table -- we don't use it (yet).
   5295 	 */
   5296 	CSR_WRITE(sc, WMREG_VET, 0);
   5297 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5298 		trynum = 10; /* Due to hw errata */
   5299 	else
   5300 		trynum = 1;
   5301 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5302 		for (j = 0; j < trynum; j++)
   5303 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5304 
   5305 	/*
   5306 	 * Set up flow-control parameters.
   5307 	 *
   5308 	 * XXX Values could probably stand some tuning.
   5309 	 */
   5310 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5311 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5312 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5313 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5314 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5315 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5316 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5317 	}
   5318 
   5319 	sc->sc_fcrtl = FCRTL_DFLT;
   5320 	if (sc->sc_type < WM_T_82543) {
   5321 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5322 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5323 	} else {
   5324 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5325 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5326 	}
   5327 
   5328 	if (sc->sc_type == WM_T_80003)
   5329 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5330 	else
   5331 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5332 
   5333 	/* Writes the control register. */
   5334 	wm_set_vlan(sc);
   5335 
   5336 	if (sc->sc_flags & WM_F_HAS_MII) {
   5337 		int val;
   5338 
   5339 		switch (sc->sc_type) {
   5340 		case WM_T_80003:
   5341 		case WM_T_ICH8:
   5342 		case WM_T_ICH9:
   5343 		case WM_T_ICH10:
   5344 		case WM_T_PCH:
   5345 		case WM_T_PCH2:
   5346 		case WM_T_PCH_LPT:
   5347 		case WM_T_PCH_SPT:
   5348 			/*
   5349 			 * Set the mac to wait the maximum time between each
   5350 			 * iteration and increase the max iterations when
   5351 			 * polling the phy; this fixes erroneous timeouts at
   5352 			 * 10Mbps.
   5353 			 */
   5354 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5355 			    0xFFFF);
   5356 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   5357 			val |= 0x3F;
   5358 			wm_kmrn_writereg(sc,
   5359 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   5360 			break;
   5361 		default:
   5362 			break;
   5363 		}
   5364 
   5365 		if (sc->sc_type == WM_T_80003) {
   5366 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   5367 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   5368 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   5369 
   5370 			/* Bypass RX and TX FIFO's */
   5371 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5372 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5373 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5374 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5375 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5376 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5377 		}
   5378 	}
   5379 #if 0
   5380 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5381 #endif
   5382 
   5383 	/* Set up checksum offload parameters. */
   5384 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5385 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5386 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5387 		reg |= RXCSUM_IPOFL;
   5388 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5389 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5390 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5391 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5392 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5393 
   5394 	/* Set registers about MSI-X */
   5395 	if (wm_is_using_msix(sc)) {
   5396 		uint32_t ivar;
   5397 		struct wm_queue *wmq;
   5398 		int qid, qintr_idx;
   5399 
   5400 		if (sc->sc_type == WM_T_82575) {
   5401 			/* Interrupt control */
   5402 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5403 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5404 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5405 
   5406 			/* TX and RX */
   5407 			for (i = 0; i < sc->sc_nqueues; i++) {
   5408 				wmq = &sc->sc_queue[i];
   5409 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5410 				    EITR_TX_QUEUE(wmq->wmq_id)
   5411 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5412 			}
   5413 			/* Link status */
   5414 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5415 			    EITR_OTHER);
   5416 		} else if (sc->sc_type == WM_T_82574) {
   5417 			/* Interrupt control */
   5418 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5419 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5420 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5421 
   5422 			/*
   5423 			 * workaround issue with spurious interrupts
   5424 			 * in MSI-X mode.
   5425 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5426 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5427 			 */
   5428 			reg = CSR_READ(sc, WMREG_RFCTL);
   5429 			reg |= WMREG_RFCTL_ACKDIS;
   5430 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5431 
   5432 			ivar = 0;
   5433 			/* TX and RX */
   5434 			for (i = 0; i < sc->sc_nqueues; i++) {
   5435 				wmq = &sc->sc_queue[i];
   5436 				qid = wmq->wmq_id;
   5437 				qintr_idx = wmq->wmq_intr_idx;
   5438 
   5439 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5440 				    IVAR_TX_MASK_Q_82574(qid));
   5441 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5442 				    IVAR_RX_MASK_Q_82574(qid));
   5443 			}
   5444 			/* Link status */
   5445 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5446 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5447 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5448 		} else {
   5449 			/* Interrupt control */
   5450 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5451 			    | GPIE_EIAME | GPIE_PBA);
   5452 
   5453 			switch (sc->sc_type) {
   5454 			case WM_T_82580:
   5455 			case WM_T_I350:
   5456 			case WM_T_I354:
   5457 			case WM_T_I210:
   5458 			case WM_T_I211:
   5459 				/* TX and RX */
   5460 				for (i = 0; i < sc->sc_nqueues; i++) {
   5461 					wmq = &sc->sc_queue[i];
   5462 					qid = wmq->wmq_id;
   5463 					qintr_idx = wmq->wmq_intr_idx;
   5464 
   5465 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5466 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5467 					ivar |= __SHIFTIN((qintr_idx
   5468 						| IVAR_VALID),
   5469 					    IVAR_TX_MASK_Q(qid));
   5470 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5471 					ivar |= __SHIFTIN((qintr_idx
   5472 						| IVAR_VALID),
   5473 					    IVAR_RX_MASK_Q(qid));
   5474 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5475 				}
   5476 				break;
   5477 			case WM_T_82576:
   5478 				/* TX and RX */
   5479 				for (i = 0; i < sc->sc_nqueues; i++) {
   5480 					wmq = &sc->sc_queue[i];
   5481 					qid = wmq->wmq_id;
   5482 					qintr_idx = wmq->wmq_intr_idx;
   5483 
   5484 					ivar = CSR_READ(sc,
   5485 					    WMREG_IVAR_Q_82576(qid));
   5486 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5487 					ivar |= __SHIFTIN((qintr_idx
   5488 						| IVAR_VALID),
   5489 					    IVAR_TX_MASK_Q_82576(qid));
   5490 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5491 					ivar |= __SHIFTIN((qintr_idx
   5492 						| IVAR_VALID),
   5493 					    IVAR_RX_MASK_Q_82576(qid));
   5494 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5495 					    ivar);
   5496 				}
   5497 				break;
   5498 			default:
   5499 				break;
   5500 			}
   5501 
   5502 			/* Link status */
   5503 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5504 			    IVAR_MISC_OTHER);
   5505 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5506 		}
   5507 
   5508 		if (wm_is_using_multiqueue(sc)) {
   5509 			wm_init_rss(sc);
   5510 
   5511 			/*
   5512 			** NOTE: Receive Full-Packet Checksum Offload
   5513 			** is mutually exclusive with Multiqueue. However
   5514 			** this is not the same as TCP/IP checksums which
   5515 			** still work.
   5516 			*/
   5517 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5518 			reg |= RXCSUM_PCSD;
   5519 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5520 		}
   5521 	}
   5522 
   5523 	/* Set up the interrupt registers. */
   5524 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5525 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5526 	    ICR_RXO | ICR_RXT0;
   5527 	if (wm_is_using_msix(sc)) {
   5528 		uint32_t mask;
   5529 		struct wm_queue *wmq;
   5530 
   5531 		switch (sc->sc_type) {
   5532 		case WM_T_82574:
   5533 			mask = 0;
   5534 			for (i = 0; i < sc->sc_nqueues; i++) {
   5535 				wmq = &sc->sc_queue[i];
   5536 				mask |= ICR_TXQ(wmq->wmq_id);
   5537 				mask |= ICR_RXQ(wmq->wmq_id);
   5538 			}
   5539 			mask |= ICR_OTHER;
   5540 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5541 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5542 			break;
   5543 		default:
   5544 			if (sc->sc_type == WM_T_82575) {
   5545 				mask = 0;
   5546 				for (i = 0; i < sc->sc_nqueues; i++) {
   5547 					wmq = &sc->sc_queue[i];
   5548 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5549 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5550 				}
   5551 				mask |= EITR_OTHER;
   5552 			} else {
   5553 				mask = 0;
   5554 				for (i = 0; i < sc->sc_nqueues; i++) {
   5555 					wmq = &sc->sc_queue[i];
   5556 					mask |= 1 << wmq->wmq_intr_idx;
   5557 				}
   5558 				mask |= 1 << sc->sc_link_intr_idx;
   5559 			}
   5560 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5561 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5562 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5563 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5564 			break;
   5565 		}
   5566 	} else
   5567 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5568 
   5569 	/* Set up the inter-packet gap. */
   5570 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5571 
   5572 	if (sc->sc_type >= WM_T_82543) {
   5573 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5574 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5575 			wm_itrs_writereg(sc, wmq);
   5576 		}
   5577 		/*
   5578 		 * Link interrupts occur much less than TX
   5579 		 * interrupts and RX interrupts. So, we don't
   5580 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5581 		 * FreeBSD's if_igb.
   5582 		 */
   5583 	}
   5584 
   5585 	/* Set the VLAN ethernetype. */
   5586 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5587 
   5588 	/*
   5589 	 * Set up the transmit control register; we start out with
   5590 	 * a collision distance suitable for FDX, but update it whe
   5591 	 * we resolve the media type.
   5592 	 */
   5593 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5594 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5595 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5596 	if (sc->sc_type >= WM_T_82571)
   5597 		sc->sc_tctl |= TCTL_MULR;
   5598 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5599 
   5600 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5601 		/* Write TDT after TCTL.EN is set. See the document. */
   5602 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5603 	}
   5604 
   5605 	if (sc->sc_type == WM_T_80003) {
   5606 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5607 		reg &= ~TCTL_EXT_GCEX_MASK;
   5608 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5609 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5610 	}
   5611 
   5612 	/* Set the media. */
   5613 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5614 		goto out;
   5615 
   5616 	/* Configure for OS presence */
   5617 	wm_init_manageability(sc);
   5618 
   5619 	/*
   5620 	 * Set up the receive control register; we actually program
   5621 	 * the register when we set the receive filter.  Use multicast
   5622 	 * address offset type 0.
   5623 	 *
   5624 	 * Only the i82544 has the ability to strip the incoming
   5625 	 * CRC, so we don't enable that feature.
   5626 	 */
   5627 	sc->sc_mchash_type = 0;
   5628 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5629 	    | RCTL_MO(sc->sc_mchash_type);
   5630 
   5631 	/*
   5632 	 * 82574 use one buffer extended Rx descriptor.
   5633 	 */
   5634 	if (sc->sc_type == WM_T_82574)
   5635 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5636 
   5637 	/*
   5638 	 * The I350 has a bug where it always strips the CRC whether
   5639 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5640 	 */
   5641 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5642 	    || (sc->sc_type == WM_T_I210))
   5643 		sc->sc_rctl |= RCTL_SECRC;
   5644 
   5645 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5646 	    && (ifp->if_mtu > ETHERMTU)) {
   5647 		sc->sc_rctl |= RCTL_LPE;
   5648 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5649 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5650 	}
   5651 
   5652 	if (MCLBYTES == 2048) {
   5653 		sc->sc_rctl |= RCTL_2k;
   5654 	} else {
   5655 		if (sc->sc_type >= WM_T_82543) {
   5656 			switch (MCLBYTES) {
   5657 			case 4096:
   5658 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5659 				break;
   5660 			case 8192:
   5661 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5662 				break;
   5663 			case 16384:
   5664 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5665 				break;
   5666 			default:
   5667 				panic("wm_init: MCLBYTES %d unsupported",
   5668 				    MCLBYTES);
   5669 				break;
   5670 			}
   5671 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5672 	}
   5673 
   5674 	/* Enable ECC */
   5675 	switch (sc->sc_type) {
   5676 	case WM_T_82571:
   5677 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5678 		reg |= PBA_ECC_CORR_EN;
   5679 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5680 		break;
   5681 	case WM_T_PCH_LPT:
   5682 	case WM_T_PCH_SPT:
   5683 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5684 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5685 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5686 
   5687 		sc->sc_ctrl |= CTRL_MEHE;
   5688 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5689 		break;
   5690 	default:
   5691 		break;
   5692 	}
   5693 
   5694 	/* On 575 and later set RDT only if RX enabled */
   5695 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5696 		int qidx;
   5697 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5698 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5699 			for (i = 0; i < WM_NRXDESC; i++) {
   5700 				mutex_enter(rxq->rxq_lock);
   5701 				wm_init_rxdesc(rxq, i);
   5702 				mutex_exit(rxq->rxq_lock);
   5703 
   5704 			}
   5705 		}
   5706 	}
   5707 
   5708 	/* Set the receive filter. */
   5709 	wm_set_filter(sc);
   5710 
   5711 	wm_turnon(sc);
   5712 
   5713 	/* Start the one second link check clock. */
   5714 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5715 
   5716 	/* ...all done! */
   5717 	ifp->if_flags |= IFF_RUNNING;
   5718 	ifp->if_flags &= ~IFF_OACTIVE;
   5719 
   5720  out:
   5721 	sc->sc_if_flags = ifp->if_flags;
   5722 	if (error)
   5723 		log(LOG_ERR, "%s: interface not running\n",
   5724 		    device_xname(sc->sc_dev));
   5725 	return error;
   5726 }
   5727 
   5728 /*
   5729  * wm_stop:		[ifnet interface function]
   5730  *
   5731  *	Stop transmission on the interface.
   5732  */
   5733 static void
   5734 wm_stop(struct ifnet *ifp, int disable)
   5735 {
   5736 	struct wm_softc *sc = ifp->if_softc;
   5737 
   5738 	WM_CORE_LOCK(sc);
   5739 	wm_stop_locked(ifp, disable);
   5740 	WM_CORE_UNLOCK(sc);
   5741 }
   5742 
   5743 static void
   5744 wm_stop_locked(struct ifnet *ifp, int disable)
   5745 {
   5746 	struct wm_softc *sc = ifp->if_softc;
   5747 	struct wm_txsoft *txs;
   5748 	int i, qidx;
   5749 
   5750 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5751 		device_xname(sc->sc_dev), __func__));
   5752 	KASSERT(WM_CORE_LOCKED(sc));
   5753 
   5754 	wm_turnoff(sc);
   5755 
   5756 	/* Stop the one second clock. */
   5757 	callout_stop(&sc->sc_tick_ch);
   5758 
   5759 	/* Stop the 82547 Tx FIFO stall check timer. */
   5760 	if (sc->sc_type == WM_T_82547)
   5761 		callout_stop(&sc->sc_txfifo_ch);
   5762 
   5763 	if (sc->sc_flags & WM_F_HAS_MII) {
   5764 		/* Down the MII. */
   5765 		mii_down(&sc->sc_mii);
   5766 	} else {
   5767 #if 0
   5768 		/* Should we clear PHY's status properly? */
   5769 		wm_reset(sc);
   5770 #endif
   5771 	}
   5772 
   5773 	/* Stop the transmit and receive processes. */
   5774 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5775 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5776 	sc->sc_rctl &= ~RCTL_EN;
   5777 
   5778 	/*
   5779 	 * Clear the interrupt mask to ensure the device cannot assert its
   5780 	 * interrupt line.
   5781 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5782 	 * service any currently pending or shared interrupt.
   5783 	 */
   5784 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5785 	sc->sc_icr = 0;
   5786 	if (wm_is_using_msix(sc)) {
   5787 		if (sc->sc_type != WM_T_82574) {
   5788 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5789 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5790 		} else
   5791 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5792 	}
   5793 
   5794 	/* Release any queued transmit buffers. */
   5795 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5796 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5797 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5798 		mutex_enter(txq->txq_lock);
   5799 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5800 			txs = &txq->txq_soft[i];
   5801 			if (txs->txs_mbuf != NULL) {
   5802 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5803 				m_freem(txs->txs_mbuf);
   5804 				txs->txs_mbuf = NULL;
   5805 			}
   5806 		}
   5807 		mutex_exit(txq->txq_lock);
   5808 	}
   5809 
   5810 	/* Mark the interface as down and cancel the watchdog timer. */
   5811 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5812 	ifp->if_timer = 0;
   5813 
   5814 	if (disable) {
   5815 		for (i = 0; i < sc->sc_nqueues; i++) {
   5816 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5817 			mutex_enter(rxq->rxq_lock);
   5818 			wm_rxdrain(rxq);
   5819 			mutex_exit(rxq->rxq_lock);
   5820 		}
   5821 	}
   5822 
   5823 #if 0 /* notyet */
   5824 	if (sc->sc_type >= WM_T_82544)
   5825 		CSR_WRITE(sc, WMREG_WUC, 0);
   5826 #endif
   5827 }
   5828 
   5829 static void
   5830 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5831 {
   5832 	struct mbuf *m;
   5833 	int i;
   5834 
   5835 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5836 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5837 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5838 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5839 		    m->m_data, m->m_len, m->m_flags);
   5840 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5841 	    i, i == 1 ? "" : "s");
   5842 }
   5843 
   5844 /*
   5845  * wm_82547_txfifo_stall:
   5846  *
   5847  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5848  *	reset the FIFO pointers, and restart packet transmission.
   5849  */
   5850 static void
   5851 wm_82547_txfifo_stall(void *arg)
   5852 {
   5853 	struct wm_softc *sc = arg;
   5854 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5855 
   5856 	mutex_enter(txq->txq_lock);
   5857 
   5858 	if (txq->txq_stopping)
   5859 		goto out;
   5860 
   5861 	if (txq->txq_fifo_stall) {
   5862 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5863 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5864 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5865 			/*
   5866 			 * Packets have drained.  Stop transmitter, reset
   5867 			 * FIFO pointers, restart transmitter, and kick
   5868 			 * the packet queue.
   5869 			 */
   5870 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5871 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5872 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5873 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5874 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5875 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5876 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5877 			CSR_WRITE_FLUSH(sc);
   5878 
   5879 			txq->txq_fifo_head = 0;
   5880 			txq->txq_fifo_stall = 0;
   5881 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5882 		} else {
   5883 			/*
   5884 			 * Still waiting for packets to drain; try again in
   5885 			 * another tick.
   5886 			 */
   5887 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5888 		}
   5889 	}
   5890 
   5891 out:
   5892 	mutex_exit(txq->txq_lock);
   5893 }
   5894 
   5895 /*
   5896  * wm_82547_txfifo_bugchk:
   5897  *
   5898  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5899  *	prevent enqueueing a packet that would wrap around the end
   5900  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5901  *
   5902  *	We do this by checking the amount of space before the end
   5903  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5904  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5905  *	the internal FIFO pointers to the beginning, and restart
   5906  *	transmission on the interface.
   5907  */
   5908 #define	WM_FIFO_HDR		0x10
   5909 #define	WM_82547_PAD_LEN	0x3e0
   5910 static int
   5911 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5912 {
   5913 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5914 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5915 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5916 
   5917 	/* Just return if already stalled. */
   5918 	if (txq->txq_fifo_stall)
   5919 		return 1;
   5920 
   5921 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5922 		/* Stall only occurs in half-duplex mode. */
   5923 		goto send_packet;
   5924 	}
   5925 
   5926 	if (len >= WM_82547_PAD_LEN + space) {
   5927 		txq->txq_fifo_stall = 1;
   5928 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5929 		return 1;
   5930 	}
   5931 
   5932  send_packet:
   5933 	txq->txq_fifo_head += len;
   5934 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5935 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5936 
   5937 	return 0;
   5938 }
   5939 
   5940 static int
   5941 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5942 {
   5943 	int error;
   5944 
   5945 	/*
   5946 	 * Allocate the control data structures, and create and load the
   5947 	 * DMA map for it.
   5948 	 *
   5949 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5950 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5951 	 * both sets within the same 4G segment.
   5952 	 */
   5953 	if (sc->sc_type < WM_T_82544)
   5954 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5955 	else
   5956 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5957 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5958 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5959 	else
   5960 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5961 
   5962 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5963 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5964 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5965 		aprint_error_dev(sc->sc_dev,
   5966 		    "unable to allocate TX control data, error = %d\n",
   5967 		    error);
   5968 		goto fail_0;
   5969 	}
   5970 
   5971 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5972 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5973 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5974 		aprint_error_dev(sc->sc_dev,
   5975 		    "unable to map TX control data, error = %d\n", error);
   5976 		goto fail_1;
   5977 	}
   5978 
   5979 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5980 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5981 		aprint_error_dev(sc->sc_dev,
   5982 		    "unable to create TX control data DMA map, error = %d\n",
   5983 		    error);
   5984 		goto fail_2;
   5985 	}
   5986 
   5987 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5988 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5989 		aprint_error_dev(sc->sc_dev,
   5990 		    "unable to load TX control data DMA map, error = %d\n",
   5991 		    error);
   5992 		goto fail_3;
   5993 	}
   5994 
   5995 	return 0;
   5996 
   5997  fail_3:
   5998 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5999  fail_2:
   6000 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6001 	    WM_TXDESCS_SIZE(txq));
   6002  fail_1:
   6003 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6004  fail_0:
   6005 	return error;
   6006 }
   6007 
   6008 static void
   6009 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6010 {
   6011 
   6012 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6013 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6014 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6015 	    WM_TXDESCS_SIZE(txq));
   6016 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6017 }
   6018 
   6019 static int
   6020 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6021 {
   6022 	int error;
   6023 	size_t rxq_descs_size;
   6024 
   6025 	/*
   6026 	 * Allocate the control data structures, and create and load the
   6027 	 * DMA map for it.
   6028 	 *
   6029 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6030 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6031 	 * both sets within the same 4G segment.
   6032 	 */
   6033 	rxq->rxq_ndesc = WM_NRXDESC;
   6034 	if (sc->sc_type == WM_T_82574)
   6035 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6036 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6037 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6038 	else
   6039 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6040 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6041 
   6042 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6043 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6044 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6045 		aprint_error_dev(sc->sc_dev,
   6046 		    "unable to allocate RX control data, error = %d\n",
   6047 		    error);
   6048 		goto fail_0;
   6049 	}
   6050 
   6051 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6052 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6053 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6054 		aprint_error_dev(sc->sc_dev,
   6055 		    "unable to map RX control data, error = %d\n", error);
   6056 		goto fail_1;
   6057 	}
   6058 
   6059 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6060 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6061 		aprint_error_dev(sc->sc_dev,
   6062 		    "unable to create RX control data DMA map, error = %d\n",
   6063 		    error);
   6064 		goto fail_2;
   6065 	}
   6066 
   6067 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6068 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6069 		aprint_error_dev(sc->sc_dev,
   6070 		    "unable to load RX control data DMA map, error = %d\n",
   6071 		    error);
   6072 		goto fail_3;
   6073 	}
   6074 
   6075 	return 0;
   6076 
   6077  fail_3:
   6078 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6079  fail_2:
   6080 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6081 	    rxq_descs_size);
   6082  fail_1:
   6083 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6084  fail_0:
   6085 	return error;
   6086 }
   6087 
   6088 static void
   6089 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6090 {
   6091 
   6092 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6093 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6094 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6095 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6096 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6097 }
   6098 
   6099 
   6100 static int
   6101 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6102 {
   6103 	int i, error;
   6104 
   6105 	/* Create the transmit buffer DMA maps. */
   6106 	WM_TXQUEUELEN(txq) =
   6107 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6108 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6109 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6110 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6111 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6112 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6113 			aprint_error_dev(sc->sc_dev,
   6114 			    "unable to create Tx DMA map %d, error = %d\n",
   6115 			    i, error);
   6116 			goto fail;
   6117 		}
   6118 	}
   6119 
   6120 	return 0;
   6121 
   6122  fail:
   6123 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6124 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6125 			bus_dmamap_destroy(sc->sc_dmat,
   6126 			    txq->txq_soft[i].txs_dmamap);
   6127 	}
   6128 	return error;
   6129 }
   6130 
   6131 static void
   6132 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6133 {
   6134 	int i;
   6135 
   6136 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6137 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6138 			bus_dmamap_destroy(sc->sc_dmat,
   6139 			    txq->txq_soft[i].txs_dmamap);
   6140 	}
   6141 }
   6142 
   6143 static int
   6144 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6145 {
   6146 	int i, error;
   6147 
   6148 	/* Create the receive buffer DMA maps. */
   6149 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6150 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6151 			    MCLBYTES, 0, 0,
   6152 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6153 			aprint_error_dev(sc->sc_dev,
   6154 			    "unable to create Rx DMA map %d error = %d\n",
   6155 			    i, error);
   6156 			goto fail;
   6157 		}
   6158 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6159 	}
   6160 
   6161 	return 0;
   6162 
   6163  fail:
   6164 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6165 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6166 			bus_dmamap_destroy(sc->sc_dmat,
   6167 			    rxq->rxq_soft[i].rxs_dmamap);
   6168 	}
   6169 	return error;
   6170 }
   6171 
   6172 static void
   6173 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6174 {
   6175 	int i;
   6176 
   6177 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6178 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6179 			bus_dmamap_destroy(sc->sc_dmat,
   6180 			    rxq->rxq_soft[i].rxs_dmamap);
   6181 	}
   6182 }
   6183 
   6184 /*
   6185  * wm_alloc_quques:
   6186  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6187  */
   6188 static int
   6189 wm_alloc_txrx_queues(struct wm_softc *sc)
   6190 {
   6191 	int i, error, tx_done, rx_done;
   6192 
   6193 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6194 	    KM_SLEEP);
   6195 	if (sc->sc_queue == NULL) {
   6196 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6197 		error = ENOMEM;
   6198 		goto fail_0;
   6199 	}
   6200 
   6201 	/*
   6202 	 * For transmission
   6203 	 */
   6204 	error = 0;
   6205 	tx_done = 0;
   6206 	for (i = 0; i < sc->sc_nqueues; i++) {
   6207 #ifdef WM_EVENT_COUNTERS
   6208 		int j;
   6209 		const char *xname;
   6210 #endif
   6211 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6212 		txq->txq_sc = sc;
   6213 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6214 
   6215 		error = wm_alloc_tx_descs(sc, txq);
   6216 		if (error)
   6217 			break;
   6218 		error = wm_alloc_tx_buffer(sc, txq);
   6219 		if (error) {
   6220 			wm_free_tx_descs(sc, txq);
   6221 			break;
   6222 		}
   6223 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6224 		if (txq->txq_interq == NULL) {
   6225 			wm_free_tx_descs(sc, txq);
   6226 			wm_free_tx_buffer(sc, txq);
   6227 			error = ENOMEM;
   6228 			break;
   6229 		}
   6230 
   6231 #ifdef WM_EVENT_COUNTERS
   6232 		xname = device_xname(sc->sc_dev);
   6233 
   6234 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6235 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6236 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6237 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6238 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6239 
   6240 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6241 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6242 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6243 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6244 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6245 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6246 
   6247 		for (j = 0; j < WM_NTXSEGS; j++) {
   6248 			snprintf(txq->txq_txseg_evcnt_names[j],
   6249 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6250 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6251 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6252 		}
   6253 
   6254 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6255 
   6256 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6257 #endif /* WM_EVENT_COUNTERS */
   6258 
   6259 		tx_done++;
   6260 	}
   6261 	if (error)
   6262 		goto fail_1;
   6263 
   6264 	/*
   6265 	 * For recieve
   6266 	 */
   6267 	error = 0;
   6268 	rx_done = 0;
   6269 	for (i = 0; i < sc->sc_nqueues; i++) {
   6270 #ifdef WM_EVENT_COUNTERS
   6271 		const char *xname;
   6272 #endif
   6273 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6274 		rxq->rxq_sc = sc;
   6275 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6276 
   6277 		error = wm_alloc_rx_descs(sc, rxq);
   6278 		if (error)
   6279 			break;
   6280 
   6281 		error = wm_alloc_rx_buffer(sc, rxq);
   6282 		if (error) {
   6283 			wm_free_rx_descs(sc, rxq);
   6284 			break;
   6285 		}
   6286 
   6287 #ifdef WM_EVENT_COUNTERS
   6288 		xname = device_xname(sc->sc_dev);
   6289 
   6290 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6291 
   6292 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6293 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6294 #endif /* WM_EVENT_COUNTERS */
   6295 
   6296 		rx_done++;
   6297 	}
   6298 	if (error)
   6299 		goto fail_2;
   6300 
   6301 	return 0;
   6302 
   6303  fail_2:
   6304 	for (i = 0; i < rx_done; i++) {
   6305 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6306 		wm_free_rx_buffer(sc, rxq);
   6307 		wm_free_rx_descs(sc, rxq);
   6308 		if (rxq->rxq_lock)
   6309 			mutex_obj_free(rxq->rxq_lock);
   6310 	}
   6311  fail_1:
   6312 	for (i = 0; i < tx_done; i++) {
   6313 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6314 		pcq_destroy(txq->txq_interq);
   6315 		wm_free_tx_buffer(sc, txq);
   6316 		wm_free_tx_descs(sc, txq);
   6317 		if (txq->txq_lock)
   6318 			mutex_obj_free(txq->txq_lock);
   6319 	}
   6320 
   6321 	kmem_free(sc->sc_queue,
   6322 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6323  fail_0:
   6324 	return error;
   6325 }
   6326 
   6327 /*
   6328  * wm_free_quques:
   6329  *	Free {tx,rx}descs and {tx,rx} buffers
   6330  */
   6331 static void
   6332 wm_free_txrx_queues(struct wm_softc *sc)
   6333 {
   6334 	int i;
   6335 
   6336 	for (i = 0; i < sc->sc_nqueues; i++) {
   6337 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6338 
   6339 #ifdef WM_EVENT_COUNTERS
   6340 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6341 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6342 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6343 #endif /* WM_EVENT_COUNTERS */
   6344 
   6345 		wm_free_rx_buffer(sc, rxq);
   6346 		wm_free_rx_descs(sc, rxq);
   6347 		if (rxq->rxq_lock)
   6348 			mutex_obj_free(rxq->rxq_lock);
   6349 	}
   6350 
   6351 	for (i = 0; i < sc->sc_nqueues; i++) {
   6352 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6353 		struct mbuf *m;
   6354 #ifdef WM_EVENT_COUNTERS
   6355 		int j;
   6356 
   6357 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6358 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6359 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6360 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6361 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6362 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6363 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6364 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6365 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6366 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6367 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6368 
   6369 		for (j = 0; j < WM_NTXSEGS; j++)
   6370 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6371 
   6372 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6373 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6374 #endif /* WM_EVENT_COUNTERS */
   6375 
   6376 		/* drain txq_interq */
   6377 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6378 			m_freem(m);
   6379 		pcq_destroy(txq->txq_interq);
   6380 
   6381 		wm_free_tx_buffer(sc, txq);
   6382 		wm_free_tx_descs(sc, txq);
   6383 		if (txq->txq_lock)
   6384 			mutex_obj_free(txq->txq_lock);
   6385 	}
   6386 
   6387 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6388 }
   6389 
   6390 static void
   6391 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6392 {
   6393 
   6394 	KASSERT(mutex_owned(txq->txq_lock));
   6395 
   6396 	/* Initialize the transmit descriptor ring. */
   6397 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6398 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6399 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6400 	txq->txq_free = WM_NTXDESC(txq);
   6401 	txq->txq_next = 0;
   6402 }
   6403 
   6404 static void
   6405 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6406     struct wm_txqueue *txq)
   6407 {
   6408 
   6409 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6410 		device_xname(sc->sc_dev), __func__));
   6411 	KASSERT(mutex_owned(txq->txq_lock));
   6412 
   6413 	if (sc->sc_type < WM_T_82543) {
   6414 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6415 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6416 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6417 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6418 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6419 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6420 	} else {
   6421 		int qid = wmq->wmq_id;
   6422 
   6423 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6424 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6425 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6426 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6427 
   6428 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6429 			/*
   6430 			 * Don't write TDT before TCTL.EN is set.
   6431 			 * See the document.
   6432 			 */
   6433 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6434 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6435 			    | TXDCTL_WTHRESH(0));
   6436 		else {
   6437 			/* XXX should update with AIM? */
   6438 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6439 			if (sc->sc_type >= WM_T_82540) {
   6440 				/* should be same */
   6441 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6442 			}
   6443 
   6444 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6445 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6446 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6447 		}
   6448 	}
   6449 }
   6450 
   6451 static void
   6452 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6453 {
   6454 	int i;
   6455 
   6456 	KASSERT(mutex_owned(txq->txq_lock));
   6457 
   6458 	/* Initialize the transmit job descriptors. */
   6459 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6460 		txq->txq_soft[i].txs_mbuf = NULL;
   6461 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6462 	txq->txq_snext = 0;
   6463 	txq->txq_sdirty = 0;
   6464 }
   6465 
   6466 static void
   6467 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6468     struct wm_txqueue *txq)
   6469 {
   6470 
   6471 	KASSERT(mutex_owned(txq->txq_lock));
   6472 
   6473 	/*
   6474 	 * Set up some register offsets that are different between
   6475 	 * the i82542 and the i82543 and later chips.
   6476 	 */
   6477 	if (sc->sc_type < WM_T_82543)
   6478 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6479 	else
   6480 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6481 
   6482 	wm_init_tx_descs(sc, txq);
   6483 	wm_init_tx_regs(sc, wmq, txq);
   6484 	wm_init_tx_buffer(sc, txq);
   6485 }
   6486 
   6487 static void
   6488 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6489     struct wm_rxqueue *rxq)
   6490 {
   6491 
   6492 	KASSERT(mutex_owned(rxq->rxq_lock));
   6493 
   6494 	/*
   6495 	 * Initialize the receive descriptor and receive job
   6496 	 * descriptor rings.
   6497 	 */
   6498 	if (sc->sc_type < WM_T_82543) {
   6499 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6500 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6501 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6502 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6503 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6504 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6505 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6506 
   6507 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6508 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6509 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6510 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6511 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6512 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6513 	} else {
   6514 		int qid = wmq->wmq_id;
   6515 
   6516 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6517 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6518 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6519 
   6520 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6521 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6522 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6523 
   6524 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6525 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6526 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6527 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6528 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6529 			    | RXDCTL_WTHRESH(1));
   6530 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6531 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6532 		} else {
   6533 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6534 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6535 			/* XXX should update with AIM? */
   6536 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6537 			/* MUST be same */
   6538 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6539 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6540 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6541 		}
   6542 	}
   6543 }
   6544 
   6545 static int
   6546 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6547 {
   6548 	struct wm_rxsoft *rxs;
   6549 	int error, i;
   6550 
   6551 	KASSERT(mutex_owned(rxq->rxq_lock));
   6552 
   6553 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6554 		rxs = &rxq->rxq_soft[i];
   6555 		if (rxs->rxs_mbuf == NULL) {
   6556 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6557 				log(LOG_ERR, "%s: unable to allocate or map "
   6558 				    "rx buffer %d, error = %d\n",
   6559 				    device_xname(sc->sc_dev), i, error);
   6560 				/*
   6561 				 * XXX Should attempt to run with fewer receive
   6562 				 * XXX buffers instead of just failing.
   6563 				 */
   6564 				wm_rxdrain(rxq);
   6565 				return ENOMEM;
   6566 			}
   6567 		} else {
   6568 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6569 				wm_init_rxdesc(rxq, i);
   6570 			/*
   6571 			 * For 82575 and newer device, the RX descriptors
   6572 			 * must be initialized after the setting of RCTL.EN in
   6573 			 * wm_set_filter()
   6574 			 */
   6575 		}
   6576 	}
   6577 	rxq->rxq_ptr = 0;
   6578 	rxq->rxq_discard = 0;
   6579 	WM_RXCHAIN_RESET(rxq);
   6580 
   6581 	return 0;
   6582 }
   6583 
   6584 static int
   6585 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6586     struct wm_rxqueue *rxq)
   6587 {
   6588 
   6589 	KASSERT(mutex_owned(rxq->rxq_lock));
   6590 
   6591 	/*
   6592 	 * Set up some register offsets that are different between
   6593 	 * the i82542 and the i82543 and later chips.
   6594 	 */
   6595 	if (sc->sc_type < WM_T_82543)
   6596 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6597 	else
   6598 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6599 
   6600 	wm_init_rx_regs(sc, wmq, rxq);
   6601 	return wm_init_rx_buffer(sc, rxq);
   6602 }
   6603 
   6604 /*
   6605  * wm_init_quques:
   6606  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6607  */
   6608 static int
   6609 wm_init_txrx_queues(struct wm_softc *sc)
   6610 {
   6611 	int i, error = 0;
   6612 
   6613 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6614 		device_xname(sc->sc_dev), __func__));
   6615 
   6616 	for (i = 0; i < sc->sc_nqueues; i++) {
   6617 		struct wm_queue *wmq = &sc->sc_queue[i];
   6618 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6619 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6620 
   6621 		/*
   6622 		 * TODO
   6623 		 * Currently, use constant variable instead of AIM.
   6624 		 * Furthermore, the interrupt interval of multiqueue which use
   6625 		 * polling mode is less than default value.
   6626 		 * More tuning and AIM are required.
   6627 		 */
   6628 		if (wm_is_using_multiqueue(sc))
   6629 			wmq->wmq_itr = 50;
   6630 		else
   6631 			wmq->wmq_itr = sc->sc_itr_init;
   6632 		wmq->wmq_set_itr = true;
   6633 
   6634 		mutex_enter(txq->txq_lock);
   6635 		wm_init_tx_queue(sc, wmq, txq);
   6636 		mutex_exit(txq->txq_lock);
   6637 
   6638 		mutex_enter(rxq->rxq_lock);
   6639 		error = wm_init_rx_queue(sc, wmq, rxq);
   6640 		mutex_exit(rxq->rxq_lock);
   6641 		if (error)
   6642 			break;
   6643 	}
   6644 
   6645 	return error;
   6646 }
   6647 
   6648 /*
   6649  * wm_tx_offload:
   6650  *
   6651  *	Set up TCP/IP checksumming parameters for the
   6652  *	specified packet.
   6653  */
   6654 static int
   6655 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6656     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6657 {
   6658 	struct mbuf *m0 = txs->txs_mbuf;
   6659 	struct livengood_tcpip_ctxdesc *t;
   6660 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6661 	uint32_t ipcse;
   6662 	struct ether_header *eh;
   6663 	int offset, iphl;
   6664 	uint8_t fields;
   6665 
   6666 	/*
   6667 	 * XXX It would be nice if the mbuf pkthdr had offset
   6668 	 * fields for the protocol headers.
   6669 	 */
   6670 
   6671 	eh = mtod(m0, struct ether_header *);
   6672 	switch (htons(eh->ether_type)) {
   6673 	case ETHERTYPE_IP:
   6674 	case ETHERTYPE_IPV6:
   6675 		offset = ETHER_HDR_LEN;
   6676 		break;
   6677 
   6678 	case ETHERTYPE_VLAN:
   6679 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6680 		break;
   6681 
   6682 	default:
   6683 		/*
   6684 		 * Don't support this protocol or encapsulation.
   6685 		 */
   6686 		*fieldsp = 0;
   6687 		*cmdp = 0;
   6688 		return 0;
   6689 	}
   6690 
   6691 	if ((m0->m_pkthdr.csum_flags &
   6692 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6693 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6694 	} else {
   6695 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6696 	}
   6697 	ipcse = offset + iphl - 1;
   6698 
   6699 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6700 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6701 	seg = 0;
   6702 	fields = 0;
   6703 
   6704 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6705 		int hlen = offset + iphl;
   6706 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6707 
   6708 		if (__predict_false(m0->m_len <
   6709 				    (hlen + sizeof(struct tcphdr)))) {
   6710 			/*
   6711 			 * TCP/IP headers are not in the first mbuf; we need
   6712 			 * to do this the slow and painful way.  Let's just
   6713 			 * hope this doesn't happen very often.
   6714 			 */
   6715 			struct tcphdr th;
   6716 
   6717 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6718 
   6719 			m_copydata(m0, hlen, sizeof(th), &th);
   6720 			if (v4) {
   6721 				struct ip ip;
   6722 
   6723 				m_copydata(m0, offset, sizeof(ip), &ip);
   6724 				ip.ip_len = 0;
   6725 				m_copyback(m0,
   6726 				    offset + offsetof(struct ip, ip_len),
   6727 				    sizeof(ip.ip_len), &ip.ip_len);
   6728 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6729 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6730 			} else {
   6731 				struct ip6_hdr ip6;
   6732 
   6733 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6734 				ip6.ip6_plen = 0;
   6735 				m_copyback(m0,
   6736 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6737 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6738 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6739 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6740 			}
   6741 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6742 			    sizeof(th.th_sum), &th.th_sum);
   6743 
   6744 			hlen += th.th_off << 2;
   6745 		} else {
   6746 			/*
   6747 			 * TCP/IP headers are in the first mbuf; we can do
   6748 			 * this the easy way.
   6749 			 */
   6750 			struct tcphdr *th;
   6751 
   6752 			if (v4) {
   6753 				struct ip *ip =
   6754 				    (void *)(mtod(m0, char *) + offset);
   6755 				th = (void *)(mtod(m0, char *) + hlen);
   6756 
   6757 				ip->ip_len = 0;
   6758 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6759 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6760 			} else {
   6761 				struct ip6_hdr *ip6 =
   6762 				    (void *)(mtod(m0, char *) + offset);
   6763 				th = (void *)(mtod(m0, char *) + hlen);
   6764 
   6765 				ip6->ip6_plen = 0;
   6766 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6767 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6768 			}
   6769 			hlen += th->th_off << 2;
   6770 		}
   6771 
   6772 		if (v4) {
   6773 			WM_Q_EVCNT_INCR(txq, txtso);
   6774 			cmdlen |= WTX_TCPIP_CMD_IP;
   6775 		} else {
   6776 			WM_Q_EVCNT_INCR(txq, txtso6);
   6777 			ipcse = 0;
   6778 		}
   6779 		cmd |= WTX_TCPIP_CMD_TSE;
   6780 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6781 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6782 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6783 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6784 	}
   6785 
   6786 	/*
   6787 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6788 	 * offload feature, if we load the context descriptor, we
   6789 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6790 	 */
   6791 
   6792 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6793 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6794 	    WTX_TCPIP_IPCSE(ipcse);
   6795 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6796 		WM_Q_EVCNT_INCR(txq, txipsum);
   6797 		fields |= WTX_IXSM;
   6798 	}
   6799 
   6800 	offset += iphl;
   6801 
   6802 	if (m0->m_pkthdr.csum_flags &
   6803 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6804 		WM_Q_EVCNT_INCR(txq, txtusum);
   6805 		fields |= WTX_TXSM;
   6806 		tucs = WTX_TCPIP_TUCSS(offset) |
   6807 		    WTX_TCPIP_TUCSO(offset +
   6808 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6809 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6810 	} else if ((m0->m_pkthdr.csum_flags &
   6811 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6812 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6813 		fields |= WTX_TXSM;
   6814 		tucs = WTX_TCPIP_TUCSS(offset) |
   6815 		    WTX_TCPIP_TUCSO(offset +
   6816 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6817 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6818 	} else {
   6819 		/* Just initialize it to a valid TCP context. */
   6820 		tucs = WTX_TCPIP_TUCSS(offset) |
   6821 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6822 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6823 	}
   6824 
   6825 	/*
   6826 	 * We don't have to write context descriptor for every packet
   6827 	 * except for 82574. For 82574, we must write context descriptor
   6828 	 * for every packet when we use two descriptor queues.
   6829 	 * It would be overhead to write context descriptor for every packet,
   6830 	 * however it does not cause problems.
   6831 	 */
   6832 	/* Fill in the context descriptor. */
   6833 	t = (struct livengood_tcpip_ctxdesc *)
   6834 	    &txq->txq_descs[txq->txq_next];
   6835 	t->tcpip_ipcs = htole32(ipcs);
   6836 	t->tcpip_tucs = htole32(tucs);
   6837 	t->tcpip_cmdlen = htole32(cmdlen);
   6838 	t->tcpip_seg = htole32(seg);
   6839 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6840 
   6841 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6842 	txs->txs_ndesc++;
   6843 
   6844 	*cmdp = cmd;
   6845 	*fieldsp = fields;
   6846 
   6847 	return 0;
   6848 }
   6849 
   6850 static inline int
   6851 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6852 {
   6853 	struct wm_softc *sc = ifp->if_softc;
   6854 	u_int cpuid = cpu_index(curcpu());
   6855 
   6856 	/*
   6857 	 * Currently, simple distribute strategy.
   6858 	 * TODO:
   6859 	 * distribute by flowid(RSS has value).
   6860 	 */
   6861         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6862 }
   6863 
   6864 /*
   6865  * wm_start:		[ifnet interface function]
   6866  *
   6867  *	Start packet transmission on the interface.
   6868  */
   6869 static void
   6870 wm_start(struct ifnet *ifp)
   6871 {
   6872 	struct wm_softc *sc = ifp->if_softc;
   6873 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6874 
   6875 #ifdef WM_MPSAFE
   6876 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6877 #endif
   6878 	/*
   6879 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6880 	 */
   6881 
   6882 	mutex_enter(txq->txq_lock);
   6883 	if (!txq->txq_stopping)
   6884 		wm_start_locked(ifp);
   6885 	mutex_exit(txq->txq_lock);
   6886 }
   6887 
   6888 static void
   6889 wm_start_locked(struct ifnet *ifp)
   6890 {
   6891 	struct wm_softc *sc = ifp->if_softc;
   6892 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6893 
   6894 	wm_send_common_locked(ifp, txq, false);
   6895 }
   6896 
   6897 static int
   6898 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6899 {
   6900 	int qid;
   6901 	struct wm_softc *sc = ifp->if_softc;
   6902 	struct wm_txqueue *txq;
   6903 
   6904 	qid = wm_select_txqueue(ifp, m);
   6905 	txq = &sc->sc_queue[qid].wmq_txq;
   6906 
   6907 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6908 		m_freem(m);
   6909 		WM_Q_EVCNT_INCR(txq, txdrop);
   6910 		return ENOBUFS;
   6911 	}
   6912 
   6913 	/*
   6914 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6915 	 */
   6916 	ifp->if_obytes += m->m_pkthdr.len;
   6917 	if (m->m_flags & M_MCAST)
   6918 		ifp->if_omcasts++;
   6919 
   6920 	if (mutex_tryenter(txq->txq_lock)) {
   6921 		if (!txq->txq_stopping)
   6922 			wm_transmit_locked(ifp, txq);
   6923 		mutex_exit(txq->txq_lock);
   6924 	}
   6925 
   6926 	return 0;
   6927 }
   6928 
   6929 static void
   6930 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6931 {
   6932 
   6933 	wm_send_common_locked(ifp, txq, true);
   6934 }
   6935 
   6936 static void
   6937 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6938     bool is_transmit)
   6939 {
   6940 	struct wm_softc *sc = ifp->if_softc;
   6941 	struct mbuf *m0;
   6942 	struct m_tag *mtag;
   6943 	struct wm_txsoft *txs;
   6944 	bus_dmamap_t dmamap;
   6945 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6946 	bus_addr_t curaddr;
   6947 	bus_size_t seglen, curlen;
   6948 	uint32_t cksumcmd;
   6949 	uint8_t cksumfields;
   6950 
   6951 	KASSERT(mutex_owned(txq->txq_lock));
   6952 
   6953 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   6954 		return;
   6955 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   6956 		return;
   6957 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6958 		return;
   6959 
   6960 	/* Remember the previous number of free descriptors. */
   6961 	ofree = txq->txq_free;
   6962 
   6963 	/*
   6964 	 * Loop through the send queue, setting up transmit descriptors
   6965 	 * until we drain the queue, or use up all available transmit
   6966 	 * descriptors.
   6967 	 */
   6968 	for (;;) {
   6969 		m0 = NULL;
   6970 
   6971 		/* Get a work queue entry. */
   6972 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6973 			wm_txeof(sc, txq);
   6974 			if (txq->txq_sfree == 0) {
   6975 				DPRINTF(WM_DEBUG_TX,
   6976 				    ("%s: TX: no free job descriptors\n",
   6977 					device_xname(sc->sc_dev)));
   6978 				WM_Q_EVCNT_INCR(txq, txsstall);
   6979 				break;
   6980 			}
   6981 		}
   6982 
   6983 		/* Grab a packet off the queue. */
   6984 		if (is_transmit)
   6985 			m0 = pcq_get(txq->txq_interq);
   6986 		else
   6987 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6988 		if (m0 == NULL)
   6989 			break;
   6990 
   6991 		DPRINTF(WM_DEBUG_TX,
   6992 		    ("%s: TX: have packet to transmit: %p\n",
   6993 		    device_xname(sc->sc_dev), m0));
   6994 
   6995 		txs = &txq->txq_soft[txq->txq_snext];
   6996 		dmamap = txs->txs_dmamap;
   6997 
   6998 		use_tso = (m0->m_pkthdr.csum_flags &
   6999 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7000 
   7001 		/*
   7002 		 * So says the Linux driver:
   7003 		 * The controller does a simple calculation to make sure
   7004 		 * there is enough room in the FIFO before initiating the
   7005 		 * DMA for each buffer.  The calc is:
   7006 		 *	4 = ceil(buffer len / MSS)
   7007 		 * To make sure we don't overrun the FIFO, adjust the max
   7008 		 * buffer len if the MSS drops.
   7009 		 */
   7010 		dmamap->dm_maxsegsz =
   7011 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7012 		    ? m0->m_pkthdr.segsz << 2
   7013 		    : WTX_MAX_LEN;
   7014 
   7015 		/*
   7016 		 * Load the DMA map.  If this fails, the packet either
   7017 		 * didn't fit in the allotted number of segments, or we
   7018 		 * were short on resources.  For the too-many-segments
   7019 		 * case, we simply report an error and drop the packet,
   7020 		 * since we can't sanely copy a jumbo packet to a single
   7021 		 * buffer.
   7022 		 */
   7023 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7024 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7025 		if (error) {
   7026 			if (error == EFBIG) {
   7027 				WM_Q_EVCNT_INCR(txq, txdrop);
   7028 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7029 				    "DMA segments, dropping...\n",
   7030 				    device_xname(sc->sc_dev));
   7031 				wm_dump_mbuf_chain(sc, m0);
   7032 				m_freem(m0);
   7033 				continue;
   7034 			}
   7035 			/*  Short on resources, just stop for now. */
   7036 			DPRINTF(WM_DEBUG_TX,
   7037 			    ("%s: TX: dmamap load failed: %d\n",
   7038 			    device_xname(sc->sc_dev), error));
   7039 			break;
   7040 		}
   7041 
   7042 		segs_needed = dmamap->dm_nsegs;
   7043 		if (use_tso) {
   7044 			/* For sentinel descriptor; see below. */
   7045 			segs_needed++;
   7046 		}
   7047 
   7048 		/*
   7049 		 * Ensure we have enough descriptors free to describe
   7050 		 * the packet.  Note, we always reserve one descriptor
   7051 		 * at the end of the ring due to the semantics of the
   7052 		 * TDT register, plus one more in the event we need
   7053 		 * to load offload context.
   7054 		 */
   7055 		if (segs_needed > txq->txq_free - 2) {
   7056 			/*
   7057 			 * Not enough free descriptors to transmit this
   7058 			 * packet.  We haven't committed anything yet,
   7059 			 * so just unload the DMA map, put the packet
   7060 			 * pack on the queue, and punt.  Notify the upper
   7061 			 * layer that there are no more slots left.
   7062 			 */
   7063 			DPRINTF(WM_DEBUG_TX,
   7064 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7065 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7066 			    segs_needed, txq->txq_free - 1));
   7067 			if (!is_transmit)
   7068 				ifp->if_flags |= IFF_OACTIVE;
   7069 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7070 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7071 			WM_Q_EVCNT_INCR(txq, txdstall);
   7072 			break;
   7073 		}
   7074 
   7075 		/*
   7076 		 * Check for 82547 Tx FIFO bug.  We need to do this
   7077 		 * once we know we can transmit the packet, since we
   7078 		 * do some internal FIFO space accounting here.
   7079 		 */
   7080 		if (sc->sc_type == WM_T_82547 &&
   7081 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7082 			DPRINTF(WM_DEBUG_TX,
   7083 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7084 			    device_xname(sc->sc_dev)));
   7085 			if (!is_transmit)
   7086 				ifp->if_flags |= IFF_OACTIVE;
   7087 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7088 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7089 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   7090 			break;
   7091 		}
   7092 
   7093 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7094 
   7095 		DPRINTF(WM_DEBUG_TX,
   7096 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7097 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7098 
   7099 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7100 
   7101 		/*
   7102 		 * Store a pointer to the packet so that we can free it
   7103 		 * later.
   7104 		 *
   7105 		 * Initially, we consider the number of descriptors the
   7106 		 * packet uses the number of DMA segments.  This may be
   7107 		 * incremented by 1 if we do checksum offload (a descriptor
   7108 		 * is used to set the checksum context).
   7109 		 */
   7110 		txs->txs_mbuf = m0;
   7111 		txs->txs_firstdesc = txq->txq_next;
   7112 		txs->txs_ndesc = segs_needed;
   7113 
   7114 		/* Set up offload parameters for this packet. */
   7115 		if (m0->m_pkthdr.csum_flags &
   7116 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7117 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7118 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7119 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7120 					  &cksumfields) != 0) {
   7121 				/* Error message already displayed. */
   7122 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7123 				continue;
   7124 			}
   7125 		} else {
   7126 			cksumcmd = 0;
   7127 			cksumfields = 0;
   7128 		}
   7129 
   7130 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7131 
   7132 		/* Sync the DMA map. */
   7133 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7134 		    BUS_DMASYNC_PREWRITE);
   7135 
   7136 		/* Initialize the transmit descriptor. */
   7137 		for (nexttx = txq->txq_next, seg = 0;
   7138 		     seg < dmamap->dm_nsegs; seg++) {
   7139 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7140 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7141 			     seglen != 0;
   7142 			     curaddr += curlen, seglen -= curlen,
   7143 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7144 				curlen = seglen;
   7145 
   7146 				/*
   7147 				 * So says the Linux driver:
   7148 				 * Work around for premature descriptor
   7149 				 * write-backs in TSO mode.  Append a
   7150 				 * 4-byte sentinel descriptor.
   7151 				 */
   7152 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7153 				    curlen > 8)
   7154 					curlen -= 4;
   7155 
   7156 				wm_set_dma_addr(
   7157 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7158 				txq->txq_descs[nexttx].wtx_cmdlen
   7159 				    = htole32(cksumcmd | curlen);
   7160 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7161 				    = 0;
   7162 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7163 				    = cksumfields;
   7164 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7165 				lasttx = nexttx;
   7166 
   7167 				DPRINTF(WM_DEBUG_TX,
   7168 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7169 				     "len %#04zx\n",
   7170 				    device_xname(sc->sc_dev), nexttx,
   7171 				    (uint64_t)curaddr, curlen));
   7172 			}
   7173 		}
   7174 
   7175 		KASSERT(lasttx != -1);
   7176 
   7177 		/*
   7178 		 * Set up the command byte on the last descriptor of
   7179 		 * the packet.  If we're in the interrupt delay window,
   7180 		 * delay the interrupt.
   7181 		 */
   7182 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7183 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7184 
   7185 		/*
   7186 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7187 		 * up the descriptor to encapsulate the packet for us.
   7188 		 *
   7189 		 * This is only valid on the last descriptor of the packet.
   7190 		 */
   7191 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7192 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7193 			    htole32(WTX_CMD_VLE);
   7194 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7195 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7196 		}
   7197 
   7198 		txs->txs_lastdesc = lasttx;
   7199 
   7200 		DPRINTF(WM_DEBUG_TX,
   7201 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7202 		    device_xname(sc->sc_dev),
   7203 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7204 
   7205 		/* Sync the descriptors we're using. */
   7206 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7207 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7208 
   7209 		/* Give the packet to the chip. */
   7210 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7211 
   7212 		DPRINTF(WM_DEBUG_TX,
   7213 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7214 
   7215 		DPRINTF(WM_DEBUG_TX,
   7216 		    ("%s: TX: finished transmitting packet, job %d\n",
   7217 		    device_xname(sc->sc_dev), txq->txq_snext));
   7218 
   7219 		/* Advance the tx pointer. */
   7220 		txq->txq_free -= txs->txs_ndesc;
   7221 		txq->txq_next = nexttx;
   7222 
   7223 		txq->txq_sfree--;
   7224 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7225 
   7226 		/* Pass the packet to any BPF listeners. */
   7227 		bpf_mtap(ifp, m0);
   7228 	}
   7229 
   7230 	if (m0 != NULL) {
   7231 		if (!is_transmit)
   7232 			ifp->if_flags |= IFF_OACTIVE;
   7233 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7234 		WM_Q_EVCNT_INCR(txq, txdrop);
   7235 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7236 			__func__));
   7237 		m_freem(m0);
   7238 	}
   7239 
   7240 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7241 		/* No more slots; notify upper layer. */
   7242 		if (!is_transmit)
   7243 			ifp->if_flags |= IFF_OACTIVE;
   7244 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7245 	}
   7246 
   7247 	if (txq->txq_free != ofree) {
   7248 		/* Set a watchdog timer in case the chip flakes out. */
   7249 		ifp->if_timer = 5;
   7250 	}
   7251 }
   7252 
   7253 /*
   7254  * wm_nq_tx_offload:
   7255  *
   7256  *	Set up TCP/IP checksumming parameters for the
   7257  *	specified packet, for NEWQUEUE devices
   7258  */
   7259 static int
   7260 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7261     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7262 {
   7263 	struct mbuf *m0 = txs->txs_mbuf;
   7264 	struct m_tag *mtag;
   7265 	uint32_t vl_len, mssidx, cmdc;
   7266 	struct ether_header *eh;
   7267 	int offset, iphl;
   7268 
   7269 	/*
   7270 	 * XXX It would be nice if the mbuf pkthdr had offset
   7271 	 * fields for the protocol headers.
   7272 	 */
   7273 	*cmdlenp = 0;
   7274 	*fieldsp = 0;
   7275 
   7276 	eh = mtod(m0, struct ether_header *);
   7277 	switch (htons(eh->ether_type)) {
   7278 	case ETHERTYPE_IP:
   7279 	case ETHERTYPE_IPV6:
   7280 		offset = ETHER_HDR_LEN;
   7281 		break;
   7282 
   7283 	case ETHERTYPE_VLAN:
   7284 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7285 		break;
   7286 
   7287 	default:
   7288 		/* Don't support this protocol or encapsulation. */
   7289 		*do_csum = false;
   7290 		return 0;
   7291 	}
   7292 	*do_csum = true;
   7293 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7294 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7295 
   7296 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7297 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7298 
   7299 	if ((m0->m_pkthdr.csum_flags &
   7300 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7301 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7302 	} else {
   7303 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7304 	}
   7305 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7306 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7307 
   7308 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7309 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7310 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7311 		*cmdlenp |= NQTX_CMD_VLE;
   7312 	}
   7313 
   7314 	mssidx = 0;
   7315 
   7316 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7317 		int hlen = offset + iphl;
   7318 		int tcp_hlen;
   7319 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7320 
   7321 		if (__predict_false(m0->m_len <
   7322 				    (hlen + sizeof(struct tcphdr)))) {
   7323 			/*
   7324 			 * TCP/IP headers are not in the first mbuf; we need
   7325 			 * to do this the slow and painful way.  Let's just
   7326 			 * hope this doesn't happen very often.
   7327 			 */
   7328 			struct tcphdr th;
   7329 
   7330 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7331 
   7332 			m_copydata(m0, hlen, sizeof(th), &th);
   7333 			if (v4) {
   7334 				struct ip ip;
   7335 
   7336 				m_copydata(m0, offset, sizeof(ip), &ip);
   7337 				ip.ip_len = 0;
   7338 				m_copyback(m0,
   7339 				    offset + offsetof(struct ip, ip_len),
   7340 				    sizeof(ip.ip_len), &ip.ip_len);
   7341 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7342 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7343 			} else {
   7344 				struct ip6_hdr ip6;
   7345 
   7346 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7347 				ip6.ip6_plen = 0;
   7348 				m_copyback(m0,
   7349 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7350 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7351 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7352 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7353 			}
   7354 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7355 			    sizeof(th.th_sum), &th.th_sum);
   7356 
   7357 			tcp_hlen = th.th_off << 2;
   7358 		} else {
   7359 			/*
   7360 			 * TCP/IP headers are in the first mbuf; we can do
   7361 			 * this the easy way.
   7362 			 */
   7363 			struct tcphdr *th;
   7364 
   7365 			if (v4) {
   7366 				struct ip *ip =
   7367 				    (void *)(mtod(m0, char *) + offset);
   7368 				th = (void *)(mtod(m0, char *) + hlen);
   7369 
   7370 				ip->ip_len = 0;
   7371 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7372 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7373 			} else {
   7374 				struct ip6_hdr *ip6 =
   7375 				    (void *)(mtod(m0, char *) + offset);
   7376 				th = (void *)(mtod(m0, char *) + hlen);
   7377 
   7378 				ip6->ip6_plen = 0;
   7379 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7380 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7381 			}
   7382 			tcp_hlen = th->th_off << 2;
   7383 		}
   7384 		hlen += tcp_hlen;
   7385 		*cmdlenp |= NQTX_CMD_TSE;
   7386 
   7387 		if (v4) {
   7388 			WM_Q_EVCNT_INCR(txq, txtso);
   7389 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7390 		} else {
   7391 			WM_Q_EVCNT_INCR(txq, txtso6);
   7392 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7393 		}
   7394 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7395 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7396 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7397 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7398 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7399 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7400 	} else {
   7401 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7402 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7403 	}
   7404 
   7405 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7406 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7407 		cmdc |= NQTXC_CMD_IP4;
   7408 	}
   7409 
   7410 	if (m0->m_pkthdr.csum_flags &
   7411 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7412 		WM_Q_EVCNT_INCR(txq, txtusum);
   7413 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7414 			cmdc |= NQTXC_CMD_TCP;
   7415 		} else {
   7416 			cmdc |= NQTXC_CMD_UDP;
   7417 		}
   7418 		cmdc |= NQTXC_CMD_IP4;
   7419 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7420 	}
   7421 	if (m0->m_pkthdr.csum_flags &
   7422 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7423 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7424 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7425 			cmdc |= NQTXC_CMD_TCP;
   7426 		} else {
   7427 			cmdc |= NQTXC_CMD_UDP;
   7428 		}
   7429 		cmdc |= NQTXC_CMD_IP6;
   7430 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7431 	}
   7432 
   7433 	/*
   7434 	 * We don't have to write context descriptor for every packet to
   7435 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7436 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7437 	 * controllers.
   7438 	 * It would be overhead to write context descriptor for every packet,
   7439 	 * however it does not cause problems.
   7440 	 */
   7441 	/* Fill in the context descriptor. */
   7442 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7443 	    htole32(vl_len);
   7444 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7445 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7446 	    htole32(cmdc);
   7447 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7448 	    htole32(mssidx);
   7449 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7450 	DPRINTF(WM_DEBUG_TX,
   7451 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7452 	    txq->txq_next, 0, vl_len));
   7453 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7454 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7455 	txs->txs_ndesc++;
   7456 	return 0;
   7457 }
   7458 
   7459 /*
   7460  * wm_nq_start:		[ifnet interface function]
   7461  *
   7462  *	Start packet transmission on the interface for NEWQUEUE devices
   7463  */
   7464 static void
   7465 wm_nq_start(struct ifnet *ifp)
   7466 {
   7467 	struct wm_softc *sc = ifp->if_softc;
   7468 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7469 
   7470 #ifdef WM_MPSAFE
   7471 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7472 #endif
   7473 	/*
   7474 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7475 	 */
   7476 
   7477 	mutex_enter(txq->txq_lock);
   7478 	if (!txq->txq_stopping)
   7479 		wm_nq_start_locked(ifp);
   7480 	mutex_exit(txq->txq_lock);
   7481 }
   7482 
   7483 static void
   7484 wm_nq_start_locked(struct ifnet *ifp)
   7485 {
   7486 	struct wm_softc *sc = ifp->if_softc;
   7487 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7488 
   7489 	wm_nq_send_common_locked(ifp, txq, false);
   7490 }
   7491 
   7492 static int
   7493 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7494 {
   7495 	int qid;
   7496 	struct wm_softc *sc = ifp->if_softc;
   7497 	struct wm_txqueue *txq;
   7498 
   7499 	qid = wm_select_txqueue(ifp, m);
   7500 	txq = &sc->sc_queue[qid].wmq_txq;
   7501 
   7502 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7503 		m_freem(m);
   7504 		WM_Q_EVCNT_INCR(txq, txdrop);
   7505 		return ENOBUFS;
   7506 	}
   7507 
   7508 	/*
   7509 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7510 	 */
   7511 	ifp->if_obytes += m->m_pkthdr.len;
   7512 	if (m->m_flags & M_MCAST)
   7513 		ifp->if_omcasts++;
   7514 
   7515 	/*
   7516 	 * The situations which this mutex_tryenter() fails at running time
   7517 	 * are below two patterns.
   7518 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7519 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7520 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7521 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7522 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7523 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7524 	 */
   7525 	if (mutex_tryenter(txq->txq_lock)) {
   7526 		if (!txq->txq_stopping)
   7527 			wm_nq_transmit_locked(ifp, txq);
   7528 		mutex_exit(txq->txq_lock);
   7529 	}
   7530 
   7531 	return 0;
   7532 }
   7533 
   7534 static void
   7535 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7536 {
   7537 
   7538 	wm_nq_send_common_locked(ifp, txq, true);
   7539 }
   7540 
   7541 static void
   7542 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7543     bool is_transmit)
   7544 {
   7545 	struct wm_softc *sc = ifp->if_softc;
   7546 	struct mbuf *m0;
   7547 	struct m_tag *mtag;
   7548 	struct wm_txsoft *txs;
   7549 	bus_dmamap_t dmamap;
   7550 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7551 	bool do_csum, sent;
   7552 
   7553 	KASSERT(mutex_owned(txq->txq_lock));
   7554 
   7555 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7556 		return;
   7557 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7558 		return;
   7559 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7560 		return;
   7561 
   7562 	sent = false;
   7563 
   7564 	/*
   7565 	 * Loop through the send queue, setting up transmit descriptors
   7566 	 * until we drain the queue, or use up all available transmit
   7567 	 * descriptors.
   7568 	 */
   7569 	for (;;) {
   7570 		m0 = NULL;
   7571 
   7572 		/* Get a work queue entry. */
   7573 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7574 			wm_txeof(sc, txq);
   7575 			if (txq->txq_sfree == 0) {
   7576 				DPRINTF(WM_DEBUG_TX,
   7577 				    ("%s: TX: no free job descriptors\n",
   7578 					device_xname(sc->sc_dev)));
   7579 				WM_Q_EVCNT_INCR(txq, txsstall);
   7580 				break;
   7581 			}
   7582 		}
   7583 
   7584 		/* Grab a packet off the queue. */
   7585 		if (is_transmit)
   7586 			m0 = pcq_get(txq->txq_interq);
   7587 		else
   7588 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7589 		if (m0 == NULL)
   7590 			break;
   7591 
   7592 		DPRINTF(WM_DEBUG_TX,
   7593 		    ("%s: TX: have packet to transmit: %p\n",
   7594 		    device_xname(sc->sc_dev), m0));
   7595 
   7596 		txs = &txq->txq_soft[txq->txq_snext];
   7597 		dmamap = txs->txs_dmamap;
   7598 
   7599 		/*
   7600 		 * Load the DMA map.  If this fails, the packet either
   7601 		 * didn't fit in the allotted number of segments, or we
   7602 		 * were short on resources.  For the too-many-segments
   7603 		 * case, we simply report an error and drop the packet,
   7604 		 * since we can't sanely copy a jumbo packet to a single
   7605 		 * buffer.
   7606 		 */
   7607 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7608 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7609 		if (error) {
   7610 			if (error == EFBIG) {
   7611 				WM_Q_EVCNT_INCR(txq, txdrop);
   7612 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7613 				    "DMA segments, dropping...\n",
   7614 				    device_xname(sc->sc_dev));
   7615 				wm_dump_mbuf_chain(sc, m0);
   7616 				m_freem(m0);
   7617 				continue;
   7618 			}
   7619 			/* Short on resources, just stop for now. */
   7620 			DPRINTF(WM_DEBUG_TX,
   7621 			    ("%s: TX: dmamap load failed: %d\n",
   7622 			    device_xname(sc->sc_dev), error));
   7623 			break;
   7624 		}
   7625 
   7626 		segs_needed = dmamap->dm_nsegs;
   7627 
   7628 		/*
   7629 		 * Ensure we have enough descriptors free to describe
   7630 		 * the packet.  Note, we always reserve one descriptor
   7631 		 * at the end of the ring due to the semantics of the
   7632 		 * TDT register, plus one more in the event we need
   7633 		 * to load offload context.
   7634 		 */
   7635 		if (segs_needed > txq->txq_free - 2) {
   7636 			/*
   7637 			 * Not enough free descriptors to transmit this
   7638 			 * packet.  We haven't committed anything yet,
   7639 			 * so just unload the DMA map, put the packet
   7640 			 * pack on the queue, and punt.  Notify the upper
   7641 			 * layer that there are no more slots left.
   7642 			 */
   7643 			DPRINTF(WM_DEBUG_TX,
   7644 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7645 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7646 			    segs_needed, txq->txq_free - 1));
   7647 			if (!is_transmit)
   7648 				ifp->if_flags |= IFF_OACTIVE;
   7649 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7650 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7651 			WM_Q_EVCNT_INCR(txq, txdstall);
   7652 			break;
   7653 		}
   7654 
   7655 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7656 
   7657 		DPRINTF(WM_DEBUG_TX,
   7658 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7659 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7660 
   7661 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7662 
   7663 		/*
   7664 		 * Store a pointer to the packet so that we can free it
   7665 		 * later.
   7666 		 *
   7667 		 * Initially, we consider the number of descriptors the
   7668 		 * packet uses the number of DMA segments.  This may be
   7669 		 * incremented by 1 if we do checksum offload (a descriptor
   7670 		 * is used to set the checksum context).
   7671 		 */
   7672 		txs->txs_mbuf = m0;
   7673 		txs->txs_firstdesc = txq->txq_next;
   7674 		txs->txs_ndesc = segs_needed;
   7675 
   7676 		/* Set up offload parameters for this packet. */
   7677 		uint32_t cmdlen, fields, dcmdlen;
   7678 		if (m0->m_pkthdr.csum_flags &
   7679 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7680 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7681 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7682 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7683 			    &do_csum) != 0) {
   7684 				/* Error message already displayed. */
   7685 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7686 				continue;
   7687 			}
   7688 		} else {
   7689 			do_csum = false;
   7690 			cmdlen = 0;
   7691 			fields = 0;
   7692 		}
   7693 
   7694 		/* Sync the DMA map. */
   7695 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7696 		    BUS_DMASYNC_PREWRITE);
   7697 
   7698 		/* Initialize the first transmit descriptor. */
   7699 		nexttx = txq->txq_next;
   7700 		if (!do_csum) {
   7701 			/* setup a legacy descriptor */
   7702 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7703 			    dmamap->dm_segs[0].ds_addr);
   7704 			txq->txq_descs[nexttx].wtx_cmdlen =
   7705 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7706 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7707 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7708 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7709 			    NULL) {
   7710 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7711 				    htole32(WTX_CMD_VLE);
   7712 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7713 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7714 			} else {
   7715 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7716 			}
   7717 			dcmdlen = 0;
   7718 		} else {
   7719 			/* setup an advanced data descriptor */
   7720 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7721 			    htole64(dmamap->dm_segs[0].ds_addr);
   7722 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7723 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7724 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7725 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7726 			    htole32(fields);
   7727 			DPRINTF(WM_DEBUG_TX,
   7728 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7729 			    device_xname(sc->sc_dev), nexttx,
   7730 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7731 			DPRINTF(WM_DEBUG_TX,
   7732 			    ("\t 0x%08x%08x\n", fields,
   7733 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7734 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7735 		}
   7736 
   7737 		lasttx = nexttx;
   7738 		nexttx = WM_NEXTTX(txq, nexttx);
   7739 		/*
   7740 		 * fill in the next descriptors. legacy or adcanced format
   7741 		 * is the same here
   7742 		 */
   7743 		for (seg = 1; seg < dmamap->dm_nsegs;
   7744 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7745 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7746 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7747 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7748 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7749 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7750 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7751 			lasttx = nexttx;
   7752 
   7753 			DPRINTF(WM_DEBUG_TX,
   7754 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7755 			     "len %#04zx\n",
   7756 			    device_xname(sc->sc_dev), nexttx,
   7757 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7758 			    dmamap->dm_segs[seg].ds_len));
   7759 		}
   7760 
   7761 		KASSERT(lasttx != -1);
   7762 
   7763 		/*
   7764 		 * Set up the command byte on the last descriptor of
   7765 		 * the packet.  If we're in the interrupt delay window,
   7766 		 * delay the interrupt.
   7767 		 */
   7768 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7769 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7770 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7771 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7772 
   7773 		txs->txs_lastdesc = lasttx;
   7774 
   7775 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7776 		    device_xname(sc->sc_dev),
   7777 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7778 
   7779 		/* Sync the descriptors we're using. */
   7780 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7781 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7782 
   7783 		/* Give the packet to the chip. */
   7784 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7785 		sent = true;
   7786 
   7787 		DPRINTF(WM_DEBUG_TX,
   7788 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7789 
   7790 		DPRINTF(WM_DEBUG_TX,
   7791 		    ("%s: TX: finished transmitting packet, job %d\n",
   7792 		    device_xname(sc->sc_dev), txq->txq_snext));
   7793 
   7794 		/* Advance the tx pointer. */
   7795 		txq->txq_free -= txs->txs_ndesc;
   7796 		txq->txq_next = nexttx;
   7797 
   7798 		txq->txq_sfree--;
   7799 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7800 
   7801 		/* Pass the packet to any BPF listeners. */
   7802 		bpf_mtap(ifp, m0);
   7803 	}
   7804 
   7805 	if (m0 != NULL) {
   7806 		if (!is_transmit)
   7807 			ifp->if_flags |= IFF_OACTIVE;
   7808 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7809 		WM_Q_EVCNT_INCR(txq, txdrop);
   7810 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7811 			__func__));
   7812 		m_freem(m0);
   7813 	}
   7814 
   7815 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7816 		/* No more slots; notify upper layer. */
   7817 		if (!is_transmit)
   7818 			ifp->if_flags |= IFF_OACTIVE;
   7819 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7820 	}
   7821 
   7822 	if (sent) {
   7823 		/* Set a watchdog timer in case the chip flakes out. */
   7824 		ifp->if_timer = 5;
   7825 	}
   7826 }
   7827 
   7828 static void
   7829 wm_deferred_start_locked(struct wm_txqueue *txq)
   7830 {
   7831 	struct wm_softc *sc = txq->txq_sc;
   7832 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7833 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7834 	int qid = wmq->wmq_id;
   7835 
   7836 	KASSERT(mutex_owned(txq->txq_lock));
   7837 
   7838 	if (txq->txq_stopping) {
   7839 		mutex_exit(txq->txq_lock);
   7840 		return;
   7841 	}
   7842 
   7843 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7844 		/* XXX need for ALTQ or one CPU system */
   7845 		if (qid == 0)
   7846 			wm_nq_start_locked(ifp);
   7847 		wm_nq_transmit_locked(ifp, txq);
   7848 	} else {
   7849 		/* XXX need for ALTQ or one CPU system */
   7850 		if (qid == 0)
   7851 			wm_start_locked(ifp);
   7852 		wm_transmit_locked(ifp, txq);
   7853 	}
   7854 }
   7855 
   7856 /* Interrupt */
   7857 
   7858 /*
   7859  * wm_txeof:
   7860  *
   7861  *	Helper; handle transmit interrupts.
   7862  */
   7863 static int
   7864 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7865 {
   7866 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7867 	struct wm_txsoft *txs;
   7868 	bool processed = false;
   7869 	int count = 0;
   7870 	int i;
   7871 	uint8_t status;
   7872 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7873 
   7874 	KASSERT(mutex_owned(txq->txq_lock));
   7875 
   7876 	if (txq->txq_stopping)
   7877 		return 0;
   7878 
   7879 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7880 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7881 	if (wmq->wmq_id == 0)
   7882 		ifp->if_flags &= ~IFF_OACTIVE;
   7883 
   7884 	/*
   7885 	 * Go through the Tx list and free mbufs for those
   7886 	 * frames which have been transmitted.
   7887 	 */
   7888 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7889 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7890 		txs = &txq->txq_soft[i];
   7891 
   7892 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7893 			device_xname(sc->sc_dev), i));
   7894 
   7895 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7896 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7897 
   7898 		status =
   7899 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7900 		if ((status & WTX_ST_DD) == 0) {
   7901 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7902 			    BUS_DMASYNC_PREREAD);
   7903 			break;
   7904 		}
   7905 
   7906 		processed = true;
   7907 		count++;
   7908 		DPRINTF(WM_DEBUG_TX,
   7909 		    ("%s: TX: job %d done: descs %d..%d\n",
   7910 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7911 		    txs->txs_lastdesc));
   7912 
   7913 		/*
   7914 		 * XXX We should probably be using the statistics
   7915 		 * XXX registers, but I don't know if they exist
   7916 		 * XXX on chips before the i82544.
   7917 		 */
   7918 
   7919 #ifdef WM_EVENT_COUNTERS
   7920 		if (status & WTX_ST_TU)
   7921 			WM_Q_EVCNT_INCR(txq, tu);
   7922 #endif /* WM_EVENT_COUNTERS */
   7923 
   7924 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7925 			ifp->if_oerrors++;
   7926 			if (status & WTX_ST_LC)
   7927 				log(LOG_WARNING, "%s: late collision\n",
   7928 				    device_xname(sc->sc_dev));
   7929 			else if (status & WTX_ST_EC) {
   7930 				ifp->if_collisions += 16;
   7931 				log(LOG_WARNING, "%s: excessive collisions\n",
   7932 				    device_xname(sc->sc_dev));
   7933 			}
   7934 		} else
   7935 			ifp->if_opackets++;
   7936 
   7937 		txq->txq_packets++;
   7938 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   7939 
   7940 		txq->txq_free += txs->txs_ndesc;
   7941 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7942 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7943 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7944 		m_freem(txs->txs_mbuf);
   7945 		txs->txs_mbuf = NULL;
   7946 	}
   7947 
   7948 	/* Update the dirty transmit buffer pointer. */
   7949 	txq->txq_sdirty = i;
   7950 	DPRINTF(WM_DEBUG_TX,
   7951 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7952 
   7953 	if (count != 0)
   7954 		rnd_add_uint32(&sc->rnd_source, count);
   7955 
   7956 	/*
   7957 	 * If there are no more pending transmissions, cancel the watchdog
   7958 	 * timer.
   7959 	 */
   7960 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7961 		ifp->if_timer = 0;
   7962 
   7963 	return processed;
   7964 }
   7965 
   7966 static inline uint32_t
   7967 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   7968 {
   7969 	struct wm_softc *sc = rxq->rxq_sc;
   7970 
   7971 	if (sc->sc_type == WM_T_82574)
   7972 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7973 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7974 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7975 	else
   7976 		return rxq->rxq_descs[idx].wrx_status;
   7977 }
   7978 
   7979 static inline uint32_t
   7980 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   7981 {
   7982 	struct wm_softc *sc = rxq->rxq_sc;
   7983 
   7984 	if (sc->sc_type == WM_T_82574)
   7985 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7986 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7987 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7988 	else
   7989 		return rxq->rxq_descs[idx].wrx_errors;
   7990 }
   7991 
   7992 static inline uint16_t
   7993 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   7994 {
   7995 	struct wm_softc *sc = rxq->rxq_sc;
   7996 
   7997 	if (sc->sc_type == WM_T_82574)
   7998 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   7999 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8000 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8001 	else
   8002 		return rxq->rxq_descs[idx].wrx_special;
   8003 }
   8004 
   8005 static inline int
   8006 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8007 {
   8008 	struct wm_softc *sc = rxq->rxq_sc;
   8009 
   8010 	if (sc->sc_type == WM_T_82574)
   8011 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8012 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8013 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8014 	else
   8015 		return rxq->rxq_descs[idx].wrx_len;
   8016 }
   8017 
   8018 #ifdef WM_DEBUG
   8019 static inline uint32_t
   8020 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8021 {
   8022 	struct wm_softc *sc = rxq->rxq_sc;
   8023 
   8024 	if (sc->sc_type == WM_T_82574)
   8025 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8026 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8027 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8028 	else
   8029 		return 0;
   8030 }
   8031 
   8032 static inline uint8_t
   8033 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8034 {
   8035 	struct wm_softc *sc = rxq->rxq_sc;
   8036 
   8037 	if (sc->sc_type == WM_T_82574)
   8038 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8039 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8040 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8041 	else
   8042 		return 0;
   8043 }
   8044 #endif /* WM_DEBUG */
   8045 
   8046 static inline bool
   8047 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8048     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8049 {
   8050 
   8051 	if (sc->sc_type == WM_T_82574)
   8052 		return (status & ext_bit) != 0;
   8053 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8054 		return (status & nq_bit) != 0;
   8055 	else
   8056 		return (status & legacy_bit) != 0;
   8057 }
   8058 
   8059 static inline bool
   8060 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8061     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8062 {
   8063 
   8064 	if (sc->sc_type == WM_T_82574)
   8065 		return (error & ext_bit) != 0;
   8066 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8067 		return (error & nq_bit) != 0;
   8068 	else
   8069 		return (error & legacy_bit) != 0;
   8070 }
   8071 
   8072 static inline bool
   8073 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8074 {
   8075 
   8076 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8077 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8078 		return true;
   8079 	else
   8080 		return false;
   8081 }
   8082 
   8083 static inline bool
   8084 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8085 {
   8086 	struct wm_softc *sc = rxq->rxq_sc;
   8087 
   8088 	/* XXXX missing error bit for newqueue? */
   8089 	if (wm_rxdesc_is_set_error(sc, errors,
   8090 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   8091 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   8092 		NQRXC_ERROR_RXE)) {
   8093 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   8094 			log(LOG_WARNING, "%s: symbol error\n",
   8095 			    device_xname(sc->sc_dev));
   8096 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   8097 			log(LOG_WARNING, "%s: receive sequence error\n",
   8098 			    device_xname(sc->sc_dev));
   8099 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   8100 			log(LOG_WARNING, "%s: CRC error\n",
   8101 			    device_xname(sc->sc_dev));
   8102 		return true;
   8103 	}
   8104 
   8105 	return false;
   8106 }
   8107 
   8108 static inline bool
   8109 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8110 {
   8111 	struct wm_softc *sc = rxq->rxq_sc;
   8112 
   8113 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8114 		NQRXC_STATUS_DD)) {
   8115 		/* We have processed all of the receive descriptors. */
   8116 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8117 		return false;
   8118 	}
   8119 
   8120 	return true;
   8121 }
   8122 
   8123 static inline bool
   8124 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   8125     struct mbuf *m)
   8126 {
   8127 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   8128 
   8129 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8130 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8131 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   8132 	}
   8133 
   8134 	return true;
   8135 }
   8136 
   8137 static inline void
   8138 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8139     uint32_t errors, struct mbuf *m)
   8140 {
   8141 	struct wm_softc *sc = rxq->rxq_sc;
   8142 
   8143 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8144 		if (wm_rxdesc_is_set_status(sc, status,
   8145 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8146 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8147 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8148 			if (wm_rxdesc_is_set_error(sc, errors,
   8149 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8150 				m->m_pkthdr.csum_flags |=
   8151 					M_CSUM_IPv4_BAD;
   8152 		}
   8153 		if (wm_rxdesc_is_set_status(sc, status,
   8154 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8155 			/*
   8156 			 * Note: we don't know if this was TCP or UDP,
   8157 			 * so we just set both bits, and expect the
   8158 			 * upper layers to deal.
   8159 			 */
   8160 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8161 			m->m_pkthdr.csum_flags |=
   8162 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8163 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8164 			if (wm_rxdesc_is_set_error(sc, errors,
   8165 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8166 				m->m_pkthdr.csum_flags |=
   8167 					M_CSUM_TCP_UDP_BAD;
   8168 		}
   8169 	}
   8170 }
   8171 
   8172 /*
   8173  * wm_rxeof:
   8174  *
   8175  *	Helper; handle receive interrupts.
   8176  */
   8177 static void
   8178 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8179 {
   8180 	struct wm_softc *sc = rxq->rxq_sc;
   8181 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8182 	struct wm_rxsoft *rxs;
   8183 	struct mbuf *m;
   8184 	int i, len;
   8185 	int count = 0;
   8186 	uint32_t status, errors;
   8187 	uint16_t vlantag;
   8188 
   8189 	KASSERT(mutex_owned(rxq->rxq_lock));
   8190 
   8191 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8192 		if (limit-- == 0) {
   8193 			rxq->rxq_ptr = i;
   8194 			break;
   8195 		}
   8196 
   8197 		rxs = &rxq->rxq_soft[i];
   8198 
   8199 		DPRINTF(WM_DEBUG_RX,
   8200 		    ("%s: RX: checking descriptor %d\n",
   8201 		    device_xname(sc->sc_dev), i));
   8202 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8203 
   8204 		status = wm_rxdesc_get_status(rxq, i);
   8205 		errors = wm_rxdesc_get_errors(rxq, i);
   8206 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8207 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8208 #ifdef WM_DEBUG
   8209 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8210 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8211 #endif
   8212 
   8213 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8214 			/*
   8215 			 * Update the receive pointer holding rxq_lock
   8216 			 * consistent with increment counter.
   8217 			 */
   8218 			rxq->rxq_ptr = i;
   8219 			break;
   8220 		}
   8221 
   8222 		count++;
   8223 		if (__predict_false(rxq->rxq_discard)) {
   8224 			DPRINTF(WM_DEBUG_RX,
   8225 			    ("%s: RX: discarding contents of descriptor %d\n",
   8226 			    device_xname(sc->sc_dev), i));
   8227 			wm_init_rxdesc(rxq, i);
   8228 			if (wm_rxdesc_is_eop(rxq, status)) {
   8229 				/* Reset our state. */
   8230 				DPRINTF(WM_DEBUG_RX,
   8231 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8232 				    device_xname(sc->sc_dev)));
   8233 				rxq->rxq_discard = 0;
   8234 			}
   8235 			continue;
   8236 		}
   8237 
   8238 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8239 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8240 
   8241 		m = rxs->rxs_mbuf;
   8242 
   8243 		/*
   8244 		 * Add a new receive buffer to the ring, unless of
   8245 		 * course the length is zero. Treat the latter as a
   8246 		 * failed mapping.
   8247 		 */
   8248 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8249 			/*
   8250 			 * Failed, throw away what we've done so
   8251 			 * far, and discard the rest of the packet.
   8252 			 */
   8253 			ifp->if_ierrors++;
   8254 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8255 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8256 			wm_init_rxdesc(rxq, i);
   8257 			if (!wm_rxdesc_is_eop(rxq, status))
   8258 				rxq->rxq_discard = 1;
   8259 			if (rxq->rxq_head != NULL)
   8260 				m_freem(rxq->rxq_head);
   8261 			WM_RXCHAIN_RESET(rxq);
   8262 			DPRINTF(WM_DEBUG_RX,
   8263 			    ("%s: RX: Rx buffer allocation failed, "
   8264 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8265 			    rxq->rxq_discard ? " (discard)" : ""));
   8266 			continue;
   8267 		}
   8268 
   8269 		m->m_len = len;
   8270 		rxq->rxq_len += len;
   8271 		DPRINTF(WM_DEBUG_RX,
   8272 		    ("%s: RX: buffer at %p len %d\n",
   8273 		    device_xname(sc->sc_dev), m->m_data, len));
   8274 
   8275 		/* If this is not the end of the packet, keep looking. */
   8276 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8277 			WM_RXCHAIN_LINK(rxq, m);
   8278 			DPRINTF(WM_DEBUG_RX,
   8279 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8280 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8281 			continue;
   8282 		}
   8283 
   8284 		/*
   8285 		 * Okay, we have the entire packet now.  The chip is
   8286 		 * configured to include the FCS except I350 and I21[01]
   8287 		 * (not all chips can be configured to strip it),
   8288 		 * so we need to trim it.
   8289 		 * May need to adjust length of previous mbuf in the
   8290 		 * chain if the current mbuf is too short.
   8291 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8292 		 * is always set in I350, so we don't trim it.
   8293 		 */
   8294 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8295 		    && (sc->sc_type != WM_T_I210)
   8296 		    && (sc->sc_type != WM_T_I211)) {
   8297 			if (m->m_len < ETHER_CRC_LEN) {
   8298 				rxq->rxq_tail->m_len
   8299 				    -= (ETHER_CRC_LEN - m->m_len);
   8300 				m->m_len = 0;
   8301 			} else
   8302 				m->m_len -= ETHER_CRC_LEN;
   8303 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8304 		} else
   8305 			len = rxq->rxq_len;
   8306 
   8307 		WM_RXCHAIN_LINK(rxq, m);
   8308 
   8309 		*rxq->rxq_tailp = NULL;
   8310 		m = rxq->rxq_head;
   8311 
   8312 		WM_RXCHAIN_RESET(rxq);
   8313 
   8314 		DPRINTF(WM_DEBUG_RX,
   8315 		    ("%s: RX: have entire packet, len -> %d\n",
   8316 		    device_xname(sc->sc_dev), len));
   8317 
   8318 		/* If an error occurred, update stats and drop the packet. */
   8319 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8320 			m_freem(m);
   8321 			continue;
   8322 		}
   8323 
   8324 		/* No errors.  Receive the packet. */
   8325 		m_set_rcvif(m, ifp);
   8326 		m->m_pkthdr.len = len;
   8327 		/*
   8328 		 * TODO
   8329 		 * should be save rsshash and rsstype to this mbuf.
   8330 		 */
   8331 		DPRINTF(WM_DEBUG_RX,
   8332 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8333 			device_xname(sc->sc_dev), rsstype, rsshash));
   8334 
   8335 		/*
   8336 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8337 		 * for us.  Associate the tag with the packet.
   8338 		 */
   8339 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8340 			continue;
   8341 
   8342 		/* Set up checksum info for this packet. */
   8343 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8344 		/*
   8345 		 * Update the receive pointer holding rxq_lock consistent with
   8346 		 * increment counter.
   8347 		 */
   8348 		rxq->rxq_ptr = i;
   8349 		rxq->rxq_packets++;
   8350 		rxq->rxq_bytes += len;
   8351 		mutex_exit(rxq->rxq_lock);
   8352 
   8353 		/* Pass it on. */
   8354 		if_percpuq_enqueue(sc->sc_ipq, m);
   8355 
   8356 		mutex_enter(rxq->rxq_lock);
   8357 
   8358 		if (rxq->rxq_stopping)
   8359 			break;
   8360 	}
   8361 
   8362 	if (count != 0)
   8363 		rnd_add_uint32(&sc->rnd_source, count);
   8364 
   8365 	DPRINTF(WM_DEBUG_RX,
   8366 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8367 }
   8368 
   8369 /*
   8370  * wm_linkintr_gmii:
   8371  *
   8372  *	Helper; handle link interrupts for GMII.
   8373  */
   8374 static void
   8375 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8376 {
   8377 
   8378 	KASSERT(WM_CORE_LOCKED(sc));
   8379 
   8380 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8381 		__func__));
   8382 
   8383 	if (icr & ICR_LSC) {
   8384 		uint32_t reg;
   8385 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8386 
   8387 		if ((status & STATUS_LU) != 0) {
   8388 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8389 				device_xname(sc->sc_dev),
   8390 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8391 		} else {
   8392 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8393 				device_xname(sc->sc_dev)));
   8394 		}
   8395 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8396 			wm_gig_downshift_workaround_ich8lan(sc);
   8397 
   8398 		if ((sc->sc_type == WM_T_ICH8)
   8399 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8400 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8401 		}
   8402 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8403 			device_xname(sc->sc_dev)));
   8404 		mii_pollstat(&sc->sc_mii);
   8405 		if (sc->sc_type == WM_T_82543) {
   8406 			int miistatus, active;
   8407 
   8408 			/*
   8409 			 * With 82543, we need to force speed and
   8410 			 * duplex on the MAC equal to what the PHY
   8411 			 * speed and duplex configuration is.
   8412 			 */
   8413 			miistatus = sc->sc_mii.mii_media_status;
   8414 
   8415 			if (miistatus & IFM_ACTIVE) {
   8416 				active = sc->sc_mii.mii_media_active;
   8417 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8418 				switch (IFM_SUBTYPE(active)) {
   8419 				case IFM_10_T:
   8420 					sc->sc_ctrl |= CTRL_SPEED_10;
   8421 					break;
   8422 				case IFM_100_TX:
   8423 					sc->sc_ctrl |= CTRL_SPEED_100;
   8424 					break;
   8425 				case IFM_1000_T:
   8426 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8427 					break;
   8428 				default:
   8429 					/*
   8430 					 * fiber?
   8431 					 * Shoud not enter here.
   8432 					 */
   8433 					printf("unknown media (%x)\n", active);
   8434 					break;
   8435 				}
   8436 				if (active & IFM_FDX)
   8437 					sc->sc_ctrl |= CTRL_FD;
   8438 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8439 			}
   8440 		} else if (sc->sc_type == WM_T_PCH) {
   8441 			wm_k1_gig_workaround_hv(sc,
   8442 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8443 		}
   8444 
   8445 		if ((sc->sc_phytype == WMPHY_82578)
   8446 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8447 			== IFM_1000_T)) {
   8448 
   8449 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8450 				delay(200*1000); /* XXX too big */
   8451 
   8452 				/* Link stall fix for link up */
   8453 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8454 				    HV_MUX_DATA_CTRL,
   8455 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8456 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8457 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8458 				    HV_MUX_DATA_CTRL,
   8459 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8460 			}
   8461 		}
   8462 		/*
   8463 		 * I217 Packet Loss issue:
   8464 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8465 		 * on power up.
   8466 		 * Set the Beacon Duration for I217 to 8 usec
   8467 		 */
   8468 		if ((sc->sc_type == WM_T_PCH_LPT)
   8469 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8470 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8471 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8472 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8473 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8474 		}
   8475 
   8476 		/* XXX Work-around I218 hang issue */
   8477 		/* e1000_k1_workaround_lpt_lp() */
   8478 
   8479 		if ((sc->sc_type == WM_T_PCH_LPT)
   8480 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8481 			/*
   8482 			 * Set platform power management values for Latency
   8483 			 * Tolerance Reporting (LTR)
   8484 			 */
   8485 			wm_platform_pm_pch_lpt(sc,
   8486 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8487 				    != 0));
   8488 		}
   8489 
   8490 		/* FEXTNVM6 K1-off workaround */
   8491 		if (sc->sc_type == WM_T_PCH_SPT) {
   8492 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8493 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8494 			    & FEXTNVM6_K1_OFF_ENABLE)
   8495 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8496 			else
   8497 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8498 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8499 		}
   8500 	} else if (icr & ICR_RXSEQ) {
   8501 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8502 			device_xname(sc->sc_dev)));
   8503 	}
   8504 }
   8505 
   8506 /*
   8507  * wm_linkintr_tbi:
   8508  *
   8509  *	Helper; handle link interrupts for TBI mode.
   8510  */
   8511 static void
   8512 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8513 {
   8514 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8515 	uint32_t status;
   8516 
   8517 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8518 		__func__));
   8519 
   8520 	status = CSR_READ(sc, WMREG_STATUS);
   8521 	if (icr & ICR_LSC) {
   8522 		if (status & STATUS_LU) {
   8523 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8524 			    device_xname(sc->sc_dev),
   8525 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8526 			/*
   8527 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8528 			 * so we should update sc->sc_ctrl
   8529 			 */
   8530 
   8531 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8532 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8533 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8534 			if (status & STATUS_FD)
   8535 				sc->sc_tctl |=
   8536 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8537 			else
   8538 				sc->sc_tctl |=
   8539 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8540 			if (sc->sc_ctrl & CTRL_TFCE)
   8541 				sc->sc_fcrtl |= FCRTL_XONE;
   8542 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8543 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8544 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8545 				      sc->sc_fcrtl);
   8546 			sc->sc_tbi_linkup = 1;
   8547 			if_link_state_change(ifp, LINK_STATE_UP);
   8548 		} else {
   8549 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8550 			    device_xname(sc->sc_dev)));
   8551 			sc->sc_tbi_linkup = 0;
   8552 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8553 		}
   8554 		/* Update LED */
   8555 		wm_tbi_serdes_set_linkled(sc);
   8556 	} else if (icr & ICR_RXSEQ) {
   8557 		DPRINTF(WM_DEBUG_LINK,
   8558 		    ("%s: LINK: Receive sequence error\n",
   8559 		    device_xname(sc->sc_dev)));
   8560 	}
   8561 }
   8562 
   8563 /*
   8564  * wm_linkintr_serdes:
   8565  *
   8566  *	Helper; handle link interrupts for TBI mode.
   8567  */
   8568 static void
   8569 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8570 {
   8571 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8572 	struct mii_data *mii = &sc->sc_mii;
   8573 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8574 	uint32_t pcs_adv, pcs_lpab, reg;
   8575 
   8576 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8577 		__func__));
   8578 
   8579 	if (icr & ICR_LSC) {
   8580 		/* Check PCS */
   8581 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8582 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8583 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8584 				device_xname(sc->sc_dev)));
   8585 			mii->mii_media_status |= IFM_ACTIVE;
   8586 			sc->sc_tbi_linkup = 1;
   8587 			if_link_state_change(ifp, LINK_STATE_UP);
   8588 		} else {
   8589 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8590 				device_xname(sc->sc_dev)));
   8591 			mii->mii_media_status |= IFM_NONE;
   8592 			sc->sc_tbi_linkup = 0;
   8593 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8594 			wm_tbi_serdes_set_linkled(sc);
   8595 			return;
   8596 		}
   8597 		mii->mii_media_active |= IFM_1000_SX;
   8598 		if ((reg & PCS_LSTS_FDX) != 0)
   8599 			mii->mii_media_active |= IFM_FDX;
   8600 		else
   8601 			mii->mii_media_active |= IFM_HDX;
   8602 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8603 			/* Check flow */
   8604 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8605 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8606 				DPRINTF(WM_DEBUG_LINK,
   8607 				    ("XXX LINKOK but not ACOMP\n"));
   8608 				return;
   8609 			}
   8610 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8611 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8612 			DPRINTF(WM_DEBUG_LINK,
   8613 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8614 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8615 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8616 				mii->mii_media_active |= IFM_FLOW
   8617 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8618 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8619 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8620 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8621 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8622 				mii->mii_media_active |= IFM_FLOW
   8623 				    | IFM_ETH_TXPAUSE;
   8624 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8625 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8626 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8627 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8628 				mii->mii_media_active |= IFM_FLOW
   8629 				    | IFM_ETH_RXPAUSE;
   8630 		}
   8631 		/* Update LED */
   8632 		wm_tbi_serdes_set_linkled(sc);
   8633 	} else {
   8634 		DPRINTF(WM_DEBUG_LINK,
   8635 		    ("%s: LINK: Receive sequence error\n",
   8636 		    device_xname(sc->sc_dev)));
   8637 	}
   8638 }
   8639 
   8640 /*
   8641  * wm_linkintr:
   8642  *
   8643  *	Helper; handle link interrupts.
   8644  */
   8645 static void
   8646 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8647 {
   8648 
   8649 	KASSERT(WM_CORE_LOCKED(sc));
   8650 
   8651 	if (sc->sc_flags & WM_F_HAS_MII)
   8652 		wm_linkintr_gmii(sc, icr);
   8653 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8654 	    && (sc->sc_type >= WM_T_82575))
   8655 		wm_linkintr_serdes(sc, icr);
   8656 	else
   8657 		wm_linkintr_tbi(sc, icr);
   8658 }
   8659 
   8660 /*
   8661  * wm_intr_legacy:
   8662  *
   8663  *	Interrupt service routine for INTx and MSI.
   8664  */
   8665 static int
   8666 wm_intr_legacy(void *arg)
   8667 {
   8668 	struct wm_softc *sc = arg;
   8669 	struct wm_queue *wmq = &sc->sc_queue[0];
   8670 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8671 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8672 	uint32_t icr, rndval = 0;
   8673 	int handled = 0;
   8674 
   8675 	while (1 /* CONSTCOND */) {
   8676 		icr = CSR_READ(sc, WMREG_ICR);
   8677 		if ((icr & sc->sc_icr) == 0)
   8678 			break;
   8679 		if (handled == 0) {
   8680 			DPRINTF(WM_DEBUG_TX,
   8681 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8682 		}
   8683 		if (rndval == 0)
   8684 			rndval = icr;
   8685 
   8686 		mutex_enter(rxq->rxq_lock);
   8687 
   8688 		if (rxq->rxq_stopping) {
   8689 			mutex_exit(rxq->rxq_lock);
   8690 			break;
   8691 		}
   8692 
   8693 		handled = 1;
   8694 
   8695 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8696 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8697 			DPRINTF(WM_DEBUG_RX,
   8698 			    ("%s: RX: got Rx intr 0x%08x\n",
   8699 			    device_xname(sc->sc_dev),
   8700 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8701 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8702 		}
   8703 #endif
   8704 		wm_rxeof(rxq, UINT_MAX);
   8705 
   8706 		mutex_exit(rxq->rxq_lock);
   8707 		mutex_enter(txq->txq_lock);
   8708 
   8709 		if (txq->txq_stopping) {
   8710 			mutex_exit(txq->txq_lock);
   8711 			break;
   8712 		}
   8713 
   8714 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8715 		if (icr & ICR_TXDW) {
   8716 			DPRINTF(WM_DEBUG_TX,
   8717 			    ("%s: TX: got TXDW interrupt\n",
   8718 			    device_xname(sc->sc_dev)));
   8719 			WM_Q_EVCNT_INCR(txq, txdw);
   8720 		}
   8721 #endif
   8722 		wm_txeof(sc, txq);
   8723 
   8724 		mutex_exit(txq->txq_lock);
   8725 		WM_CORE_LOCK(sc);
   8726 
   8727 		if (sc->sc_core_stopping) {
   8728 			WM_CORE_UNLOCK(sc);
   8729 			break;
   8730 		}
   8731 
   8732 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8733 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8734 			wm_linkintr(sc, icr);
   8735 		}
   8736 
   8737 		WM_CORE_UNLOCK(sc);
   8738 
   8739 		if (icr & ICR_RXO) {
   8740 #if defined(WM_DEBUG)
   8741 			log(LOG_WARNING, "%s: Receive overrun\n",
   8742 			    device_xname(sc->sc_dev));
   8743 #endif /* defined(WM_DEBUG) */
   8744 		}
   8745 	}
   8746 
   8747 	rnd_add_uint32(&sc->rnd_source, rndval);
   8748 
   8749 	if (handled) {
   8750 		/* Try to get more packets going. */
   8751 		softint_schedule(wmq->wmq_si);
   8752 	}
   8753 
   8754 	return handled;
   8755 }
   8756 
   8757 static inline void
   8758 wm_txrxintr_disable(struct wm_queue *wmq)
   8759 {
   8760 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8761 
   8762 	if (sc->sc_type == WM_T_82574)
   8763 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8764 	else if (sc->sc_type == WM_T_82575)
   8765 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8766 	else
   8767 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8768 }
   8769 
   8770 static inline void
   8771 wm_txrxintr_enable(struct wm_queue *wmq)
   8772 {
   8773 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8774 
   8775 	wm_itrs_calculate(sc, wmq);
   8776 
   8777 	if (sc->sc_type == WM_T_82574)
   8778 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8779 	else if (sc->sc_type == WM_T_82575)
   8780 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8781 	else
   8782 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8783 }
   8784 
   8785 static int
   8786 wm_txrxintr_msix(void *arg)
   8787 {
   8788 	struct wm_queue *wmq = arg;
   8789 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8790 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8791 	struct wm_softc *sc = txq->txq_sc;
   8792 	u_int limit = sc->sc_rx_intr_process_limit;
   8793 
   8794 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8795 
   8796 	DPRINTF(WM_DEBUG_TX,
   8797 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8798 
   8799 	wm_txrxintr_disable(wmq);
   8800 
   8801 	mutex_enter(txq->txq_lock);
   8802 
   8803 	if (txq->txq_stopping) {
   8804 		mutex_exit(txq->txq_lock);
   8805 		return 0;
   8806 	}
   8807 
   8808 	WM_Q_EVCNT_INCR(txq, txdw);
   8809 	wm_txeof(sc, txq);
   8810 	/* wm_deferred start() is done in wm_handle_queue(). */
   8811 	mutex_exit(txq->txq_lock);
   8812 
   8813 	DPRINTF(WM_DEBUG_RX,
   8814 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8815 	mutex_enter(rxq->rxq_lock);
   8816 
   8817 	if (rxq->rxq_stopping) {
   8818 		mutex_exit(rxq->rxq_lock);
   8819 		return 0;
   8820 	}
   8821 
   8822 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8823 	wm_rxeof(rxq, limit);
   8824 	mutex_exit(rxq->rxq_lock);
   8825 
   8826 	wm_itrs_writereg(sc, wmq);
   8827 
   8828 	softint_schedule(wmq->wmq_si);
   8829 
   8830 	return 1;
   8831 }
   8832 
   8833 static void
   8834 wm_handle_queue(void *arg)
   8835 {
   8836 	struct wm_queue *wmq = arg;
   8837 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8838 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8839 	struct wm_softc *sc = txq->txq_sc;
   8840 	u_int limit = sc->sc_rx_process_limit;
   8841 
   8842 	mutex_enter(txq->txq_lock);
   8843 	if (txq->txq_stopping) {
   8844 		mutex_exit(txq->txq_lock);
   8845 		return;
   8846 	}
   8847 	wm_txeof(sc, txq);
   8848 	wm_deferred_start_locked(txq);
   8849 	mutex_exit(txq->txq_lock);
   8850 
   8851 	mutex_enter(rxq->rxq_lock);
   8852 	if (rxq->rxq_stopping) {
   8853 		mutex_exit(rxq->rxq_lock);
   8854 		return;
   8855 	}
   8856 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8857 	wm_rxeof(rxq, limit);
   8858 	mutex_exit(rxq->rxq_lock);
   8859 
   8860 	wm_txrxintr_enable(wmq);
   8861 }
   8862 
   8863 /*
   8864  * wm_linkintr_msix:
   8865  *
   8866  *	Interrupt service routine for link status change for MSI-X.
   8867  */
   8868 static int
   8869 wm_linkintr_msix(void *arg)
   8870 {
   8871 	struct wm_softc *sc = arg;
   8872 	uint32_t reg;
   8873 
   8874 	DPRINTF(WM_DEBUG_LINK,
   8875 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8876 
   8877 	reg = CSR_READ(sc, WMREG_ICR);
   8878 	WM_CORE_LOCK(sc);
   8879 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8880 		goto out;
   8881 
   8882 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8883 	wm_linkintr(sc, ICR_LSC);
   8884 
   8885 out:
   8886 	WM_CORE_UNLOCK(sc);
   8887 
   8888 	if (sc->sc_type == WM_T_82574)
   8889 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8890 	else if (sc->sc_type == WM_T_82575)
   8891 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8892 	else
   8893 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8894 
   8895 	return 1;
   8896 }
   8897 
   8898 /*
   8899  * Media related.
   8900  * GMII, SGMII, TBI (and SERDES)
   8901  */
   8902 
   8903 /* Common */
   8904 
   8905 /*
   8906  * wm_tbi_serdes_set_linkled:
   8907  *
   8908  *	Update the link LED on TBI and SERDES devices.
   8909  */
   8910 static void
   8911 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8912 {
   8913 
   8914 	if (sc->sc_tbi_linkup)
   8915 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8916 	else
   8917 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8918 
   8919 	/* 82540 or newer devices are active low */
   8920 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8921 
   8922 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8923 }
   8924 
   8925 /* GMII related */
   8926 
   8927 /*
   8928  * wm_gmii_reset:
   8929  *
   8930  *	Reset the PHY.
   8931  */
   8932 static void
   8933 wm_gmii_reset(struct wm_softc *sc)
   8934 {
   8935 	uint32_t reg;
   8936 	int rv;
   8937 
   8938 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8939 		device_xname(sc->sc_dev), __func__));
   8940 
   8941 	rv = sc->phy.acquire(sc);
   8942 	if (rv != 0) {
   8943 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8944 		    __func__);
   8945 		return;
   8946 	}
   8947 
   8948 	switch (sc->sc_type) {
   8949 	case WM_T_82542_2_0:
   8950 	case WM_T_82542_2_1:
   8951 		/* null */
   8952 		break;
   8953 	case WM_T_82543:
   8954 		/*
   8955 		 * With 82543, we need to force speed and duplex on the MAC
   8956 		 * equal to what the PHY speed and duplex configuration is.
   8957 		 * In addition, we need to perform a hardware reset on the PHY
   8958 		 * to take it out of reset.
   8959 		 */
   8960 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8961 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8962 
   8963 		/* The PHY reset pin is active-low. */
   8964 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8965 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8966 		    CTRL_EXT_SWDPIN(4));
   8967 		reg |= CTRL_EXT_SWDPIO(4);
   8968 
   8969 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8970 		CSR_WRITE_FLUSH(sc);
   8971 		delay(10*1000);
   8972 
   8973 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8974 		CSR_WRITE_FLUSH(sc);
   8975 		delay(150);
   8976 #if 0
   8977 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8978 #endif
   8979 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8980 		break;
   8981 	case WM_T_82544:	/* reset 10000us */
   8982 	case WM_T_82540:
   8983 	case WM_T_82545:
   8984 	case WM_T_82545_3:
   8985 	case WM_T_82546:
   8986 	case WM_T_82546_3:
   8987 	case WM_T_82541:
   8988 	case WM_T_82541_2:
   8989 	case WM_T_82547:
   8990 	case WM_T_82547_2:
   8991 	case WM_T_82571:	/* reset 100us */
   8992 	case WM_T_82572:
   8993 	case WM_T_82573:
   8994 	case WM_T_82574:
   8995 	case WM_T_82575:
   8996 	case WM_T_82576:
   8997 	case WM_T_82580:
   8998 	case WM_T_I350:
   8999 	case WM_T_I354:
   9000 	case WM_T_I210:
   9001 	case WM_T_I211:
   9002 	case WM_T_82583:
   9003 	case WM_T_80003:
   9004 		/* generic reset */
   9005 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9006 		CSR_WRITE_FLUSH(sc);
   9007 		delay(20000);
   9008 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9009 		CSR_WRITE_FLUSH(sc);
   9010 		delay(20000);
   9011 
   9012 		if ((sc->sc_type == WM_T_82541)
   9013 		    || (sc->sc_type == WM_T_82541_2)
   9014 		    || (sc->sc_type == WM_T_82547)
   9015 		    || (sc->sc_type == WM_T_82547_2)) {
   9016 			/* workaround for igp are done in igp_reset() */
   9017 			/* XXX add code to set LED after phy reset */
   9018 		}
   9019 		break;
   9020 	case WM_T_ICH8:
   9021 	case WM_T_ICH9:
   9022 	case WM_T_ICH10:
   9023 	case WM_T_PCH:
   9024 	case WM_T_PCH2:
   9025 	case WM_T_PCH_LPT:
   9026 	case WM_T_PCH_SPT:
   9027 		/* generic reset */
   9028 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9029 		CSR_WRITE_FLUSH(sc);
   9030 		delay(100);
   9031 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9032 		CSR_WRITE_FLUSH(sc);
   9033 		delay(150);
   9034 		break;
   9035 	default:
   9036 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9037 		    __func__);
   9038 		break;
   9039 	}
   9040 
   9041 	sc->phy.release(sc);
   9042 
   9043 	/* get_cfg_done */
   9044 	wm_get_cfg_done(sc);
   9045 
   9046 	/* extra setup */
   9047 	switch (sc->sc_type) {
   9048 	case WM_T_82542_2_0:
   9049 	case WM_T_82542_2_1:
   9050 	case WM_T_82543:
   9051 	case WM_T_82544:
   9052 	case WM_T_82540:
   9053 	case WM_T_82545:
   9054 	case WM_T_82545_3:
   9055 	case WM_T_82546:
   9056 	case WM_T_82546_3:
   9057 	case WM_T_82541_2:
   9058 	case WM_T_82547_2:
   9059 	case WM_T_82571:
   9060 	case WM_T_82572:
   9061 	case WM_T_82573:
   9062 	case WM_T_82574:
   9063 	case WM_T_82583:
   9064 	case WM_T_82575:
   9065 	case WM_T_82576:
   9066 	case WM_T_82580:
   9067 	case WM_T_I350:
   9068 	case WM_T_I354:
   9069 	case WM_T_I210:
   9070 	case WM_T_I211:
   9071 	case WM_T_80003:
   9072 		/* null */
   9073 		break;
   9074 	case WM_T_82541:
   9075 	case WM_T_82547:
   9076 		/* XXX Configure actively LED after PHY reset */
   9077 		break;
   9078 	case WM_T_ICH8:
   9079 	case WM_T_ICH9:
   9080 	case WM_T_ICH10:
   9081 	case WM_T_PCH:
   9082 	case WM_T_PCH2:
   9083 	case WM_T_PCH_LPT:
   9084 	case WM_T_PCH_SPT:
   9085 		wm_phy_post_reset(sc);
   9086 		break;
   9087 	default:
   9088 		panic("%s: unknown type\n", __func__);
   9089 		break;
   9090 	}
   9091 }
   9092 
   9093 /*
   9094  * Setup sc_phytype and mii_{read|write}reg.
   9095  *
   9096  *  To identify PHY type, correct read/write function should be selected.
   9097  * To select correct read/write function, PCI ID or MAC type are required
   9098  * without accessing PHY registers.
   9099  *
   9100  *  On the first call of this function, PHY ID is not known yet. Check
   9101  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9102  * result might be incorrect.
   9103  *
   9104  *  In the second call, PHY OUI and model is used to identify PHY type.
   9105  * It might not be perfpect because of the lack of compared entry, but it
   9106  * would be better than the first call.
   9107  *
   9108  *  If the detected new result and previous assumption is different,
   9109  * diagnous message will be printed.
   9110  */
   9111 static void
   9112 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9113     uint16_t phy_model)
   9114 {
   9115 	device_t dev = sc->sc_dev;
   9116 	struct mii_data *mii = &sc->sc_mii;
   9117 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9118 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9119 	mii_readreg_t new_readreg;
   9120 	mii_writereg_t new_writereg;
   9121 
   9122 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9123 		device_xname(sc->sc_dev), __func__));
   9124 
   9125 	if (mii->mii_readreg == NULL) {
   9126 		/*
   9127 		 *  This is the first call of this function. For ICH and PCH
   9128 		 * variants, it's difficult to determine the PHY access method
   9129 		 * by sc_type, so use the PCI product ID for some devices.
   9130 		 */
   9131 
   9132 		switch (sc->sc_pcidevid) {
   9133 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9134 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9135 			/* 82577 */
   9136 			new_phytype = WMPHY_82577;
   9137 			break;
   9138 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9139 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9140 			/* 82578 */
   9141 			new_phytype = WMPHY_82578;
   9142 			break;
   9143 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9144 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9145 			/* 82579 */
   9146 			new_phytype = WMPHY_82579;
   9147 			break;
   9148 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9149 		case PCI_PRODUCT_INTEL_82801I_BM:
   9150 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9151 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9152 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9153 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9154 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9155 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9156 			/* ICH8, 9, 10 with 82567 */
   9157 			new_phytype = WMPHY_BM;
   9158 			break;
   9159 		default:
   9160 			break;
   9161 		}
   9162 	} else {
   9163 		/* It's not the first call. Use PHY OUI and model */
   9164 		switch (phy_oui) {
   9165 		case MII_OUI_ATHEROS: /* XXX ??? */
   9166 			switch (phy_model) {
   9167 			case 0x0004: /* XXX */
   9168 				new_phytype = WMPHY_82578;
   9169 				break;
   9170 			default:
   9171 				break;
   9172 			}
   9173 			break;
   9174 		case MII_OUI_xxMARVELL:
   9175 			switch (phy_model) {
   9176 			case MII_MODEL_xxMARVELL_I210:
   9177 				new_phytype = WMPHY_I210;
   9178 				break;
   9179 			case MII_MODEL_xxMARVELL_E1011:
   9180 			case MII_MODEL_xxMARVELL_E1000_3:
   9181 			case MII_MODEL_xxMARVELL_E1000_5:
   9182 			case MII_MODEL_xxMARVELL_E1112:
   9183 				new_phytype = WMPHY_M88;
   9184 				break;
   9185 			case MII_MODEL_xxMARVELL_E1149:
   9186 				new_phytype = WMPHY_BM;
   9187 				break;
   9188 			case MII_MODEL_xxMARVELL_E1111:
   9189 			case MII_MODEL_xxMARVELL_I347:
   9190 			case MII_MODEL_xxMARVELL_E1512:
   9191 			case MII_MODEL_xxMARVELL_E1340M:
   9192 			case MII_MODEL_xxMARVELL_E1543:
   9193 				new_phytype = WMPHY_M88;
   9194 				break;
   9195 			case MII_MODEL_xxMARVELL_I82563:
   9196 				new_phytype = WMPHY_GG82563;
   9197 				break;
   9198 			default:
   9199 				break;
   9200 			}
   9201 			break;
   9202 		case MII_OUI_INTEL:
   9203 			switch (phy_model) {
   9204 			case MII_MODEL_INTEL_I82577:
   9205 				new_phytype = WMPHY_82577;
   9206 				break;
   9207 			case MII_MODEL_INTEL_I82579:
   9208 				new_phytype = WMPHY_82579;
   9209 				break;
   9210 			case MII_MODEL_INTEL_I217:
   9211 				new_phytype = WMPHY_I217;
   9212 				break;
   9213 			case MII_MODEL_INTEL_I82580:
   9214 			case MII_MODEL_INTEL_I350:
   9215 				new_phytype = WMPHY_82580;
   9216 				break;
   9217 			default:
   9218 				break;
   9219 			}
   9220 			break;
   9221 		case MII_OUI_yyINTEL:
   9222 			switch (phy_model) {
   9223 			case MII_MODEL_yyINTEL_I82562G:
   9224 			case MII_MODEL_yyINTEL_I82562EM:
   9225 			case MII_MODEL_yyINTEL_I82562ET:
   9226 				new_phytype = WMPHY_IFE;
   9227 				break;
   9228 			case MII_MODEL_yyINTEL_IGP01E1000:
   9229 				new_phytype = WMPHY_IGP;
   9230 				break;
   9231 			case MII_MODEL_yyINTEL_I82566:
   9232 				new_phytype = WMPHY_IGP_3;
   9233 				break;
   9234 			default:
   9235 				break;
   9236 			}
   9237 			break;
   9238 		default:
   9239 			break;
   9240 		}
   9241 		if (new_phytype == WMPHY_UNKNOWN)
   9242 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9243 			    __func__);
   9244 
   9245 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9246 		    && (sc->sc_phytype != new_phytype )) {
   9247 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9248 			    "was incorrect. PHY type from PHY ID = %u\n",
   9249 			    sc->sc_phytype, new_phytype);
   9250 		}
   9251 	}
   9252 
   9253 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9254 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9255 		/* SGMII */
   9256 		new_readreg = wm_sgmii_readreg;
   9257 		new_writereg = wm_sgmii_writereg;
   9258 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9259 		/* BM2 (phyaddr == 1) */
   9260 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9261 		    && (new_phytype != WMPHY_BM)
   9262 		    && (new_phytype != WMPHY_UNKNOWN))
   9263 			doubt_phytype = new_phytype;
   9264 		new_phytype = WMPHY_BM;
   9265 		new_readreg = wm_gmii_bm_readreg;
   9266 		new_writereg = wm_gmii_bm_writereg;
   9267 	} else if (sc->sc_type >= WM_T_PCH) {
   9268 		/* All PCH* use _hv_ */
   9269 		new_readreg = wm_gmii_hv_readreg;
   9270 		new_writereg = wm_gmii_hv_writereg;
   9271 	} else if (sc->sc_type >= WM_T_ICH8) {
   9272 		/* non-82567 ICH8, 9 and 10 */
   9273 		new_readreg = wm_gmii_i82544_readreg;
   9274 		new_writereg = wm_gmii_i82544_writereg;
   9275 	} else if (sc->sc_type >= WM_T_80003) {
   9276 		/* 80003 */
   9277 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9278 		    && (new_phytype != WMPHY_GG82563)
   9279 		    && (new_phytype != WMPHY_UNKNOWN))
   9280 			doubt_phytype = new_phytype;
   9281 		new_phytype = WMPHY_GG82563;
   9282 		new_readreg = wm_gmii_i80003_readreg;
   9283 		new_writereg = wm_gmii_i80003_writereg;
   9284 	} else if (sc->sc_type >= WM_T_I210) {
   9285 		/* I210 and I211 */
   9286 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9287 		    && (new_phytype != WMPHY_I210)
   9288 		    && (new_phytype != WMPHY_UNKNOWN))
   9289 			doubt_phytype = new_phytype;
   9290 		new_phytype = WMPHY_I210;
   9291 		new_readreg = wm_gmii_gs40g_readreg;
   9292 		new_writereg = wm_gmii_gs40g_writereg;
   9293 	} else if (sc->sc_type >= WM_T_82580) {
   9294 		/* 82580, I350 and I354 */
   9295 		new_readreg = wm_gmii_82580_readreg;
   9296 		new_writereg = wm_gmii_82580_writereg;
   9297 	} else if (sc->sc_type >= WM_T_82544) {
   9298 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9299 		new_readreg = wm_gmii_i82544_readreg;
   9300 		new_writereg = wm_gmii_i82544_writereg;
   9301 	} else {
   9302 		new_readreg = wm_gmii_i82543_readreg;
   9303 		new_writereg = wm_gmii_i82543_writereg;
   9304 	}
   9305 
   9306 	if (new_phytype == WMPHY_BM) {
   9307 		/* All BM use _bm_ */
   9308 		new_readreg = wm_gmii_bm_readreg;
   9309 		new_writereg = wm_gmii_bm_writereg;
   9310 	}
   9311 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9312 		/* All PCH* use _hv_ */
   9313 		new_readreg = wm_gmii_hv_readreg;
   9314 		new_writereg = wm_gmii_hv_writereg;
   9315 	}
   9316 
   9317 	/* Diag output */
   9318 	if (doubt_phytype != WMPHY_UNKNOWN)
   9319 		aprint_error_dev(dev, "Assumed new PHY type was "
   9320 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9321 		    new_phytype);
   9322 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9323 	    && (sc->sc_phytype != new_phytype ))
   9324 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9325 		    "was incorrect. New PHY type = %u\n",
   9326 		    sc->sc_phytype, new_phytype);
   9327 
   9328 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9329 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9330 
   9331 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9332 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9333 		    "function was incorrect.\n");
   9334 
   9335 	/* Update now */
   9336 	sc->sc_phytype = new_phytype;
   9337 	mii->mii_readreg = new_readreg;
   9338 	mii->mii_writereg = new_writereg;
   9339 }
   9340 
   9341 /*
   9342  * wm_get_phy_id_82575:
   9343  *
   9344  * Return PHY ID. Return -1 if it failed.
   9345  */
   9346 static int
   9347 wm_get_phy_id_82575(struct wm_softc *sc)
   9348 {
   9349 	uint32_t reg;
   9350 	int phyid = -1;
   9351 
   9352 	/* XXX */
   9353 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9354 		return -1;
   9355 
   9356 	if (wm_sgmii_uses_mdio(sc)) {
   9357 		switch (sc->sc_type) {
   9358 		case WM_T_82575:
   9359 		case WM_T_82576:
   9360 			reg = CSR_READ(sc, WMREG_MDIC);
   9361 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9362 			break;
   9363 		case WM_T_82580:
   9364 		case WM_T_I350:
   9365 		case WM_T_I354:
   9366 		case WM_T_I210:
   9367 		case WM_T_I211:
   9368 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9369 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9370 			break;
   9371 		default:
   9372 			return -1;
   9373 		}
   9374 	}
   9375 
   9376 	return phyid;
   9377 }
   9378 
   9379 
   9380 /*
   9381  * wm_gmii_mediainit:
   9382  *
   9383  *	Initialize media for use on 1000BASE-T devices.
   9384  */
   9385 static void
   9386 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9387 {
   9388 	device_t dev = sc->sc_dev;
   9389 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9390 	struct mii_data *mii = &sc->sc_mii;
   9391 	uint32_t reg;
   9392 
   9393 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9394 		device_xname(sc->sc_dev), __func__));
   9395 
   9396 	/* We have GMII. */
   9397 	sc->sc_flags |= WM_F_HAS_MII;
   9398 
   9399 	if (sc->sc_type == WM_T_80003)
   9400 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9401 	else
   9402 		sc->sc_tipg = TIPG_1000T_DFLT;
   9403 
   9404 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9405 	if ((sc->sc_type == WM_T_82580)
   9406 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9407 	    || (sc->sc_type == WM_T_I211)) {
   9408 		reg = CSR_READ(sc, WMREG_PHPM);
   9409 		reg &= ~PHPM_GO_LINK_D;
   9410 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9411 	}
   9412 
   9413 	/*
   9414 	 * Let the chip set speed/duplex on its own based on
   9415 	 * signals from the PHY.
   9416 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9417 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9418 	 */
   9419 	sc->sc_ctrl |= CTRL_SLU;
   9420 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9421 
   9422 	/* Initialize our media structures and probe the GMII. */
   9423 	mii->mii_ifp = ifp;
   9424 
   9425 	mii->mii_statchg = wm_gmii_statchg;
   9426 
   9427 	/* get PHY control from SMBus to PCIe */
   9428 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9429 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9430 		wm_smbustopci(sc);
   9431 
   9432 	wm_gmii_reset(sc);
   9433 
   9434 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9435 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9436 	    wm_gmii_mediastatus);
   9437 
   9438 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9439 	    || (sc->sc_type == WM_T_82580)
   9440 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9441 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9442 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9443 			/* Attach only one port */
   9444 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9445 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9446 		} else {
   9447 			int i, id;
   9448 			uint32_t ctrl_ext;
   9449 
   9450 			id = wm_get_phy_id_82575(sc);
   9451 			if (id != -1) {
   9452 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9453 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9454 			}
   9455 			if ((id == -1)
   9456 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9457 				/* Power on sgmii phy if it is disabled */
   9458 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9459 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9460 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9461 				CSR_WRITE_FLUSH(sc);
   9462 				delay(300*1000); /* XXX too long */
   9463 
   9464 				/* from 1 to 8 */
   9465 				for (i = 1; i < 8; i++)
   9466 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9467 					    0xffffffff, i, MII_OFFSET_ANY,
   9468 					    MIIF_DOPAUSE);
   9469 
   9470 				/* restore previous sfp cage power state */
   9471 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9472 			}
   9473 		}
   9474 	} else {
   9475 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9476 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9477 	}
   9478 
   9479 	/*
   9480 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9481 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9482 	 */
   9483 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9484 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9485 		wm_set_mdio_slow_mode_hv(sc);
   9486 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9487 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9488 	}
   9489 
   9490 	/*
   9491 	 * (For ICH8 variants)
   9492 	 * If PHY detection failed, use BM's r/w function and retry.
   9493 	 */
   9494 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9495 		/* if failed, retry with *_bm_* */
   9496 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9497 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9498 		    sc->sc_phytype);
   9499 		sc->sc_phytype = WMPHY_BM;
   9500 		mii->mii_readreg = wm_gmii_bm_readreg;
   9501 		mii->mii_writereg = wm_gmii_bm_writereg;
   9502 
   9503 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9504 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9505 	}
   9506 
   9507 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9508 		/* Any PHY wasn't find */
   9509 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9510 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9511 		sc->sc_phytype = WMPHY_NONE;
   9512 	} else {
   9513 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9514 
   9515 		/*
   9516 		 * PHY Found! Check PHY type again by the second call of
   9517 		 * wm_gmii_setup_phytype.
   9518 		 */
   9519 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9520 		    child->mii_mpd_model);
   9521 
   9522 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9523 	}
   9524 }
   9525 
   9526 /*
   9527  * wm_gmii_mediachange:	[ifmedia interface function]
   9528  *
   9529  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9530  */
   9531 static int
   9532 wm_gmii_mediachange(struct ifnet *ifp)
   9533 {
   9534 	struct wm_softc *sc = ifp->if_softc;
   9535 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9536 	int rc;
   9537 
   9538 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9539 		device_xname(sc->sc_dev), __func__));
   9540 	if ((ifp->if_flags & IFF_UP) == 0)
   9541 		return 0;
   9542 
   9543 	/* Disable D0 LPLU. */
   9544 	wm_lplu_d0_disable(sc);
   9545 
   9546 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9547 	sc->sc_ctrl |= CTRL_SLU;
   9548 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9549 	    || (sc->sc_type > WM_T_82543)) {
   9550 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9551 	} else {
   9552 		sc->sc_ctrl &= ~CTRL_ASDE;
   9553 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9554 		if (ife->ifm_media & IFM_FDX)
   9555 			sc->sc_ctrl |= CTRL_FD;
   9556 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9557 		case IFM_10_T:
   9558 			sc->sc_ctrl |= CTRL_SPEED_10;
   9559 			break;
   9560 		case IFM_100_TX:
   9561 			sc->sc_ctrl |= CTRL_SPEED_100;
   9562 			break;
   9563 		case IFM_1000_T:
   9564 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9565 			break;
   9566 		default:
   9567 			panic("wm_gmii_mediachange: bad media 0x%x",
   9568 			    ife->ifm_media);
   9569 		}
   9570 	}
   9571 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9572 	CSR_WRITE_FLUSH(sc);
   9573 	if (sc->sc_type <= WM_T_82543)
   9574 		wm_gmii_reset(sc);
   9575 
   9576 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9577 		return 0;
   9578 	return rc;
   9579 }
   9580 
   9581 /*
   9582  * wm_gmii_mediastatus:	[ifmedia interface function]
   9583  *
   9584  *	Get the current interface media status on a 1000BASE-T device.
   9585  */
   9586 static void
   9587 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9588 {
   9589 	struct wm_softc *sc = ifp->if_softc;
   9590 
   9591 	ether_mediastatus(ifp, ifmr);
   9592 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9593 	    | sc->sc_flowflags;
   9594 }
   9595 
   9596 #define	MDI_IO		CTRL_SWDPIN(2)
   9597 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9598 #define	MDI_CLK		CTRL_SWDPIN(3)
   9599 
   9600 static void
   9601 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9602 {
   9603 	uint32_t i, v;
   9604 
   9605 	v = CSR_READ(sc, WMREG_CTRL);
   9606 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9607 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9608 
   9609 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9610 		if (data & i)
   9611 			v |= MDI_IO;
   9612 		else
   9613 			v &= ~MDI_IO;
   9614 		CSR_WRITE(sc, WMREG_CTRL, v);
   9615 		CSR_WRITE_FLUSH(sc);
   9616 		delay(10);
   9617 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9618 		CSR_WRITE_FLUSH(sc);
   9619 		delay(10);
   9620 		CSR_WRITE(sc, WMREG_CTRL, v);
   9621 		CSR_WRITE_FLUSH(sc);
   9622 		delay(10);
   9623 	}
   9624 }
   9625 
   9626 static uint32_t
   9627 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9628 {
   9629 	uint32_t v, i, data = 0;
   9630 
   9631 	v = CSR_READ(sc, WMREG_CTRL);
   9632 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9633 	v |= CTRL_SWDPIO(3);
   9634 
   9635 	CSR_WRITE(sc, WMREG_CTRL, v);
   9636 	CSR_WRITE_FLUSH(sc);
   9637 	delay(10);
   9638 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9639 	CSR_WRITE_FLUSH(sc);
   9640 	delay(10);
   9641 	CSR_WRITE(sc, WMREG_CTRL, v);
   9642 	CSR_WRITE_FLUSH(sc);
   9643 	delay(10);
   9644 
   9645 	for (i = 0; i < 16; i++) {
   9646 		data <<= 1;
   9647 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9648 		CSR_WRITE_FLUSH(sc);
   9649 		delay(10);
   9650 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9651 			data |= 1;
   9652 		CSR_WRITE(sc, WMREG_CTRL, v);
   9653 		CSR_WRITE_FLUSH(sc);
   9654 		delay(10);
   9655 	}
   9656 
   9657 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9658 	CSR_WRITE_FLUSH(sc);
   9659 	delay(10);
   9660 	CSR_WRITE(sc, WMREG_CTRL, v);
   9661 	CSR_WRITE_FLUSH(sc);
   9662 	delay(10);
   9663 
   9664 	return data;
   9665 }
   9666 
   9667 #undef MDI_IO
   9668 #undef MDI_DIR
   9669 #undef MDI_CLK
   9670 
   9671 /*
   9672  * wm_gmii_i82543_readreg:	[mii interface function]
   9673  *
   9674  *	Read a PHY register on the GMII (i82543 version).
   9675  */
   9676 static int
   9677 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   9678 {
   9679 	struct wm_softc *sc = device_private(dev);
   9680 	int rv;
   9681 
   9682 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9683 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9684 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9685 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9686 
   9687 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9688 	    device_xname(dev), phy, reg, rv));
   9689 
   9690 	return rv;
   9691 }
   9692 
   9693 /*
   9694  * wm_gmii_i82543_writereg:	[mii interface function]
   9695  *
   9696  *	Write a PHY register on the GMII (i82543 version).
   9697  */
   9698 static void
   9699 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   9700 {
   9701 	struct wm_softc *sc = device_private(dev);
   9702 
   9703 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9704 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9705 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9706 	    (MII_COMMAND_START << 30), 32);
   9707 }
   9708 
   9709 /*
   9710  * wm_gmii_mdic_readreg:	[mii interface function]
   9711  *
   9712  *	Read a PHY register on the GMII.
   9713  */
   9714 static int
   9715 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   9716 {
   9717 	struct wm_softc *sc = device_private(dev);
   9718 	uint32_t mdic = 0;
   9719 	int i, rv;
   9720 
   9721 	if (reg > MII_ADDRMASK) {
   9722 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9723 		    __func__, sc->sc_phytype, reg);
   9724 		reg &= MII_ADDRMASK;
   9725 	}
   9726 
   9727 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9728 	    MDIC_REGADD(reg));
   9729 
   9730 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9731 		mdic = CSR_READ(sc, WMREG_MDIC);
   9732 		if (mdic & MDIC_READY)
   9733 			break;
   9734 		delay(50);
   9735 	}
   9736 
   9737 	if ((mdic & MDIC_READY) == 0) {
   9738 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9739 		    device_xname(dev), phy, reg);
   9740 		rv = 0;
   9741 	} else if (mdic & MDIC_E) {
   9742 #if 0 /* This is normal if no PHY is present. */
   9743 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9744 		    device_xname(dev), phy, reg);
   9745 #endif
   9746 		rv = 0;
   9747 	} else {
   9748 		rv = MDIC_DATA(mdic);
   9749 		if (rv == 0xffff)
   9750 			rv = 0;
   9751 	}
   9752 
   9753 	return rv;
   9754 }
   9755 
   9756 /*
   9757  * wm_gmii_mdic_writereg:	[mii interface function]
   9758  *
   9759  *	Write a PHY register on the GMII.
   9760  */
   9761 static void
   9762 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   9763 {
   9764 	struct wm_softc *sc = device_private(dev);
   9765 	uint32_t mdic = 0;
   9766 	int i;
   9767 
   9768 	if (reg > MII_ADDRMASK) {
   9769 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9770 		    __func__, sc->sc_phytype, reg);
   9771 		reg &= MII_ADDRMASK;
   9772 	}
   9773 
   9774 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9775 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9776 
   9777 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9778 		mdic = CSR_READ(sc, WMREG_MDIC);
   9779 		if (mdic & MDIC_READY)
   9780 			break;
   9781 		delay(50);
   9782 	}
   9783 
   9784 	if ((mdic & MDIC_READY) == 0)
   9785 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9786 		    device_xname(dev), phy, reg);
   9787 	else if (mdic & MDIC_E)
   9788 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9789 		    device_xname(dev), phy, reg);
   9790 }
   9791 
   9792 /*
   9793  * wm_gmii_i82544_readreg:	[mii interface function]
   9794  *
   9795  *	Read a PHY register on the GMII.
   9796  */
   9797 static int
   9798 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   9799 {
   9800 	struct wm_softc *sc = device_private(dev);
   9801 	int rv;
   9802 
   9803 	if (sc->phy.acquire(sc)) {
   9804 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9805 		return 0;
   9806 	}
   9807 
   9808 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9809 		switch (sc->sc_phytype) {
   9810 		case WMPHY_IGP:
   9811 		case WMPHY_IGP_2:
   9812 		case WMPHY_IGP_3:
   9813 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9814 			break;
   9815 		default:
   9816 #ifdef WM_DEBUG
   9817 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   9818 			    __func__, sc->sc_phytype, reg);
   9819 #endif
   9820 			break;
   9821 		}
   9822 	}
   9823 
   9824 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9825 	sc->phy.release(sc);
   9826 
   9827 	return rv;
   9828 }
   9829 
   9830 /*
   9831  * wm_gmii_i82544_writereg:	[mii interface function]
   9832  *
   9833  *	Write a PHY register on the GMII.
   9834  */
   9835 static void
   9836 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   9837 {
   9838 	struct wm_softc *sc = device_private(dev);
   9839 
   9840 	if (sc->phy.acquire(sc)) {
   9841 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9842 		return;
   9843 	}
   9844 
   9845 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9846 		switch (sc->sc_phytype) {
   9847 		case WMPHY_IGP:
   9848 		case WMPHY_IGP_2:
   9849 		case WMPHY_IGP_3:
   9850 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9851 			break;
   9852 		default:
   9853 #ifdef WM_DEBUG
   9854 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   9855 			    __func__, sc->sc_phytype, reg);
   9856 #endif
   9857 			break;
   9858 		}
   9859 	}
   9860 
   9861 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   9862 	sc->phy.release(sc);
   9863 }
   9864 
   9865 /*
   9866  * wm_gmii_i80003_readreg:	[mii interface function]
   9867  *
   9868  *	Read a PHY register on the kumeran
   9869  * This could be handled by the PHY layer if we didn't have to lock the
   9870  * ressource ...
   9871  */
   9872 static int
   9873 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   9874 {
   9875 	struct wm_softc *sc = device_private(dev);
   9876 	int rv;
   9877 
   9878 	if (phy != 1) /* only one PHY on kumeran bus */
   9879 		return 0;
   9880 
   9881 	if (sc->phy.acquire(sc)) {
   9882 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9883 		return 0;
   9884 	}
   9885 
   9886 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9887 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT,
   9888 		    reg >> GG82563_PAGE_SHIFT);
   9889 	} else {
   9890 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9891 		    reg >> GG82563_PAGE_SHIFT);
   9892 	}
   9893 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9894 	delay(200);
   9895 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9896 	delay(200);
   9897 	sc->phy.release(sc);
   9898 
   9899 	return rv;
   9900 }
   9901 
   9902 /*
   9903  * wm_gmii_i80003_writereg:	[mii interface function]
   9904  *
   9905  *	Write a PHY register on the kumeran.
   9906  * This could be handled by the PHY layer if we didn't have to lock the
   9907  * ressource ...
   9908  */
   9909 static void
   9910 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   9911 {
   9912 	struct wm_softc *sc = device_private(dev);
   9913 
   9914 	if (phy != 1) /* only one PHY on kumeran bus */
   9915 		return;
   9916 
   9917 	if (sc->phy.acquire(sc)) {
   9918 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9919 		return;
   9920 	}
   9921 
   9922 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9923 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT,
   9924 		    reg >> GG82563_PAGE_SHIFT);
   9925 	} else {
   9926 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9927 		    reg >> GG82563_PAGE_SHIFT);
   9928 	}
   9929 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9930 	delay(200);
   9931 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   9932 	delay(200);
   9933 
   9934 	sc->phy.release(sc);
   9935 }
   9936 
   9937 /*
   9938  * wm_gmii_bm_readreg:	[mii interface function]
   9939  *
   9940  *	Read a PHY register on the kumeran
   9941  * This could be handled by the PHY layer if we didn't have to lock the
   9942  * ressource ...
   9943  */
   9944 static int
   9945 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   9946 {
   9947 	struct wm_softc *sc = device_private(dev);
   9948 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9949 	uint16_t val;
   9950 	int rv;
   9951 
   9952 	if (sc->phy.acquire(sc)) {
   9953 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9954 		return 0;
   9955 	}
   9956 
   9957 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9958 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9959 		    || (reg == 31)) ? 1 : phy;
   9960 	/* Page 800 works differently than the rest so it has its own func */
   9961 	if (page == BM_WUC_PAGE) {
   9962 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   9963 		rv = val;
   9964 		goto release;
   9965 	}
   9966 
   9967 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9968 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9969 		    && (sc->sc_type != WM_T_82583))
   9970 			wm_gmii_mdic_writereg(dev, phy,
   9971 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9972 		else
   9973 			wm_gmii_mdic_writereg(dev, phy,
   9974 			    BME1000_PHY_PAGE_SELECT, page);
   9975 	}
   9976 
   9977 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9978 
   9979 release:
   9980 	sc->phy.release(sc);
   9981 	return rv;
   9982 }
   9983 
   9984 /*
   9985  * wm_gmii_bm_writereg:	[mii interface function]
   9986  *
   9987  *	Write a PHY register on the kumeran.
   9988  * This could be handled by the PHY layer if we didn't have to lock the
   9989  * ressource ...
   9990  */
   9991 static void
   9992 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   9993 {
   9994 	struct wm_softc *sc = device_private(dev);
   9995 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9996 
   9997 	if (sc->phy.acquire(sc)) {
   9998 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9999 		return;
   10000 	}
   10001 
   10002 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10003 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10004 		    || (reg == 31)) ? 1 : phy;
   10005 	/* Page 800 works differently than the rest so it has its own func */
   10006 	if (page == BM_WUC_PAGE) {
   10007 		uint16_t tmp;
   10008 
   10009 		tmp = val;
   10010 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10011 		goto release;
   10012 	}
   10013 
   10014 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10015 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10016 		    && (sc->sc_type != WM_T_82583))
   10017 			wm_gmii_mdic_writereg(dev, phy,
   10018 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10019 		else
   10020 			wm_gmii_mdic_writereg(dev, phy,
   10021 			    BME1000_PHY_PAGE_SELECT, page);
   10022 	}
   10023 
   10024 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10025 
   10026 release:
   10027 	sc->phy.release(sc);
   10028 }
   10029 
   10030 static void
   10031 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10032 {
   10033 	struct wm_softc *sc = device_private(dev);
   10034 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10035 	uint16_t wuce, reg;
   10036 
   10037 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10038 		device_xname(dev), __func__));
   10039 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10040 	if (sc->sc_type == WM_T_PCH) {
   10041 		/* XXX e1000 driver do nothing... why? */
   10042 	}
   10043 
   10044 	/*
   10045 	 * 1) Enable PHY wakeup register first.
   10046 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10047 	 */
   10048 
   10049 	/* Set page 769 */
   10050 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10051 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10052 
   10053 	/* Read WUCE and save it */
   10054 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10055 
   10056 	reg = wuce | BM_WUC_ENABLE_BIT;
   10057 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10058 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10059 
   10060 	/* Select page 800 */
   10061 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10062 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10063 
   10064 	/*
   10065 	 * 2) Access PHY wakeup register.
   10066 	 * See e1000_access_phy_wakeup_reg_bm.
   10067 	 */
   10068 
   10069 	/* Write page 800 */
   10070 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10071 
   10072 	if (rd)
   10073 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10074 	else
   10075 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10076 
   10077 	/*
   10078 	 * 3) Disable PHY wakeup register.
   10079 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10080 	 */
   10081 	/* Set page 769 */
   10082 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10083 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10084 
   10085 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10086 }
   10087 
   10088 /*
   10089  * wm_gmii_hv_readreg:	[mii interface function]
   10090  *
   10091  *	Read a PHY register on the kumeran
   10092  * This could be handled by the PHY layer if we didn't have to lock the
   10093  * ressource ...
   10094  */
   10095 static int
   10096 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10097 {
   10098 	struct wm_softc *sc = device_private(dev);
   10099 	int rv;
   10100 
   10101 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10102 		device_xname(dev), __func__));
   10103 	if (sc->phy.acquire(sc)) {
   10104 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10105 		return 0;
   10106 	}
   10107 
   10108 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10109 	sc->phy.release(sc);
   10110 	return rv;
   10111 }
   10112 
   10113 static int
   10114 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10115 {
   10116 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10117 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10118 	uint16_t val;
   10119 	int rv;
   10120 
   10121 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10122 
   10123 	/* Page 800 works differently than the rest so it has its own func */
   10124 	if (page == BM_WUC_PAGE) {
   10125 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10126 		return val;
   10127 	}
   10128 
   10129 	/*
   10130 	 * Lower than page 768 works differently than the rest so it has its
   10131 	 * own func
   10132 	 */
   10133 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10134 		printf("gmii_hv_readreg!!!\n");
   10135 		return 0;
   10136 	}
   10137 
   10138 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10139 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10140 		    page << BME1000_PAGE_SHIFT);
   10141 	}
   10142 
   10143 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10144 	return rv;
   10145 }
   10146 
   10147 /*
   10148  * wm_gmii_hv_writereg:	[mii interface function]
   10149  *
   10150  *	Write a PHY register on the kumeran.
   10151  * This could be handled by the PHY layer if we didn't have to lock the
   10152  * ressource ...
   10153  */
   10154 static void
   10155 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10156 {
   10157 	struct wm_softc *sc = device_private(dev);
   10158 
   10159 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10160 		device_xname(dev), __func__));
   10161 
   10162 	if (sc->phy.acquire(sc)) {
   10163 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10164 		return;
   10165 	}
   10166 
   10167 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10168 	sc->phy.release(sc);
   10169 }
   10170 
   10171 static void
   10172 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10173 {
   10174 	struct wm_softc *sc = device_private(dev);
   10175 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10176 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10177 
   10178 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10179 
   10180 	/* Page 800 works differently than the rest so it has its own func */
   10181 	if (page == BM_WUC_PAGE) {
   10182 		uint16_t tmp;
   10183 
   10184 		tmp = val;
   10185 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10186 		return;
   10187 	}
   10188 
   10189 	/*
   10190 	 * Lower than page 768 works differently than the rest so it has its
   10191 	 * own func
   10192 	 */
   10193 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10194 		printf("gmii_hv_writereg!!!\n");
   10195 		return;
   10196 	}
   10197 
   10198 	{
   10199 		/*
   10200 		 * XXX Workaround MDIO accesses being disabled after entering
   10201 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10202 		 * register is set)
   10203 		 */
   10204 		if (sc->sc_phytype == WMPHY_82578) {
   10205 			struct mii_softc *child;
   10206 
   10207 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10208 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10209 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10210 			    && ((val & (1 << 11)) != 0)) {
   10211 				printf("XXX need workaround\n");
   10212 			}
   10213 		}
   10214 
   10215 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10216 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10217 			    page << BME1000_PAGE_SHIFT);
   10218 		}
   10219 	}
   10220 
   10221 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10222 }
   10223 
   10224 /*
   10225  * wm_gmii_82580_readreg:	[mii interface function]
   10226  *
   10227  *	Read a PHY register on the 82580 and I350.
   10228  * This could be handled by the PHY layer if we didn't have to lock the
   10229  * ressource ...
   10230  */
   10231 static int
   10232 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10233 {
   10234 	struct wm_softc *sc = device_private(dev);
   10235 	int rv;
   10236 
   10237 	if (sc->phy.acquire(sc) != 0) {
   10238 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10239 		return 0;
   10240 	}
   10241 
   10242 #ifdef DIAGNOSTIC
   10243 	if (reg > MII_ADDRMASK) {
   10244 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10245 		    __func__, sc->sc_phytype, reg);
   10246 		reg &= MII_ADDRMASK;
   10247 	}
   10248 #endif
   10249 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10250 
   10251 	sc->phy.release(sc);
   10252 	return rv;
   10253 }
   10254 
   10255 /*
   10256  * wm_gmii_82580_writereg:	[mii interface function]
   10257  *
   10258  *	Write a PHY register on the 82580 and I350.
   10259  * This could be handled by the PHY layer if we didn't have to lock the
   10260  * ressource ...
   10261  */
   10262 static void
   10263 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10264 {
   10265 	struct wm_softc *sc = device_private(dev);
   10266 
   10267 	if (sc->phy.acquire(sc) != 0) {
   10268 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10269 		return;
   10270 	}
   10271 
   10272 #ifdef DIAGNOSTIC
   10273 	if (reg > MII_ADDRMASK) {
   10274 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10275 		    __func__, sc->sc_phytype, reg);
   10276 		reg &= MII_ADDRMASK;
   10277 	}
   10278 #endif
   10279 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10280 
   10281 	sc->phy.release(sc);
   10282 }
   10283 
   10284 /*
   10285  * wm_gmii_gs40g_readreg:	[mii interface function]
   10286  *
   10287  *	Read a PHY register on the I2100 and I211.
   10288  * This could be handled by the PHY layer if we didn't have to lock the
   10289  * ressource ...
   10290  */
   10291 static int
   10292 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10293 {
   10294 	struct wm_softc *sc = device_private(dev);
   10295 	int page, offset;
   10296 	int rv;
   10297 
   10298 	/* Acquire semaphore */
   10299 	if (sc->phy.acquire(sc)) {
   10300 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10301 		return 0;
   10302 	}
   10303 
   10304 	/* Page select */
   10305 	page = reg >> GS40G_PAGE_SHIFT;
   10306 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10307 
   10308 	/* Read reg */
   10309 	offset = reg & GS40G_OFFSET_MASK;
   10310 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10311 
   10312 	sc->phy.release(sc);
   10313 	return rv;
   10314 }
   10315 
   10316 /*
   10317  * wm_gmii_gs40g_writereg:	[mii interface function]
   10318  *
   10319  *	Write a PHY register on the I210 and I211.
   10320  * This could be handled by the PHY layer if we didn't have to lock the
   10321  * ressource ...
   10322  */
   10323 static void
   10324 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10325 {
   10326 	struct wm_softc *sc = device_private(dev);
   10327 	int page, offset;
   10328 
   10329 	/* Acquire semaphore */
   10330 	if (sc->phy.acquire(sc)) {
   10331 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10332 		return;
   10333 	}
   10334 
   10335 	/* Page select */
   10336 	page = reg >> GS40G_PAGE_SHIFT;
   10337 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10338 
   10339 	/* Write reg */
   10340 	offset = reg & GS40G_OFFSET_MASK;
   10341 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10342 
   10343 	/* Release semaphore */
   10344 	sc->phy.release(sc);
   10345 }
   10346 
   10347 /*
   10348  * wm_gmii_statchg:	[mii interface function]
   10349  *
   10350  *	Callback from MII layer when media changes.
   10351  */
   10352 static void
   10353 wm_gmii_statchg(struct ifnet *ifp)
   10354 {
   10355 	struct wm_softc *sc = ifp->if_softc;
   10356 	struct mii_data *mii = &sc->sc_mii;
   10357 
   10358 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10359 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10360 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10361 
   10362 	/*
   10363 	 * Get flow control negotiation result.
   10364 	 */
   10365 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10366 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10367 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10368 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10369 	}
   10370 
   10371 	if (sc->sc_flowflags & IFM_FLOW) {
   10372 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10373 			sc->sc_ctrl |= CTRL_TFCE;
   10374 			sc->sc_fcrtl |= FCRTL_XONE;
   10375 		}
   10376 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10377 			sc->sc_ctrl |= CTRL_RFCE;
   10378 	}
   10379 
   10380 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10381 		DPRINTF(WM_DEBUG_LINK,
   10382 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10383 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10384 	} else {
   10385 		DPRINTF(WM_DEBUG_LINK,
   10386 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10387 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10388 	}
   10389 
   10390 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10391 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10392 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10393 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10394 	if (sc->sc_type == WM_T_80003) {
   10395 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10396 		case IFM_1000_T:
   10397 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10398 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10399 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10400 			break;
   10401 		default:
   10402 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10403 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10404 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10405 			break;
   10406 		}
   10407 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10408 	}
   10409 }
   10410 
   10411 /* kumeran related (80003, ICH* and PCH*) */
   10412 
   10413 /*
   10414  * wm_kmrn_readreg:
   10415  *
   10416  *	Read a kumeran register
   10417  */
   10418 static int
   10419 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   10420 {
   10421 	int rv;
   10422 
   10423 	if (sc->sc_type == WM_T_80003)
   10424 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10425 	else
   10426 		rv = sc->phy.acquire(sc);
   10427 	if (rv != 0) {
   10428 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10429 		    __func__);
   10430 		return 0;
   10431 	}
   10432 
   10433 	rv = wm_kmrn_readreg_locked(sc, reg);
   10434 
   10435 	if (sc->sc_type == WM_T_80003)
   10436 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10437 	else
   10438 		sc->phy.release(sc);
   10439 
   10440 	return rv;
   10441 }
   10442 
   10443 static int
   10444 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   10445 {
   10446 	int rv;
   10447 
   10448 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10449 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10450 	    KUMCTRLSTA_REN);
   10451 	CSR_WRITE_FLUSH(sc);
   10452 	delay(2);
   10453 
   10454 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10455 
   10456 	return rv;
   10457 }
   10458 
   10459 /*
   10460  * wm_kmrn_writereg:
   10461  *
   10462  *	Write a kumeran register
   10463  */
   10464 static void
   10465 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   10466 {
   10467 	int rv;
   10468 
   10469 	if (sc->sc_type == WM_T_80003)
   10470 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10471 	else
   10472 		rv = sc->phy.acquire(sc);
   10473 	if (rv != 0) {
   10474 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10475 		    __func__);
   10476 		return;
   10477 	}
   10478 
   10479 	wm_kmrn_writereg_locked(sc, reg, val);
   10480 
   10481 	if (sc->sc_type == WM_T_80003)
   10482 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10483 	else
   10484 		sc->phy.release(sc);
   10485 }
   10486 
   10487 static void
   10488 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   10489 {
   10490 
   10491 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10492 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10493 	    (val & KUMCTRLSTA_MASK));
   10494 }
   10495 
   10496 /* SGMII related */
   10497 
   10498 /*
   10499  * wm_sgmii_uses_mdio
   10500  *
   10501  * Check whether the transaction is to the internal PHY or the external
   10502  * MDIO interface. Return true if it's MDIO.
   10503  */
   10504 static bool
   10505 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10506 {
   10507 	uint32_t reg;
   10508 	bool ismdio = false;
   10509 
   10510 	switch (sc->sc_type) {
   10511 	case WM_T_82575:
   10512 	case WM_T_82576:
   10513 		reg = CSR_READ(sc, WMREG_MDIC);
   10514 		ismdio = ((reg & MDIC_DEST) != 0);
   10515 		break;
   10516 	case WM_T_82580:
   10517 	case WM_T_I350:
   10518 	case WM_T_I354:
   10519 	case WM_T_I210:
   10520 	case WM_T_I211:
   10521 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10522 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10523 		break;
   10524 	default:
   10525 		break;
   10526 	}
   10527 
   10528 	return ismdio;
   10529 }
   10530 
   10531 /*
   10532  * wm_sgmii_readreg:	[mii interface function]
   10533  *
   10534  *	Read a PHY register on the SGMII
   10535  * This could be handled by the PHY layer if we didn't have to lock the
   10536  * ressource ...
   10537  */
   10538 static int
   10539 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10540 {
   10541 	struct wm_softc *sc = device_private(dev);
   10542 	uint32_t i2ccmd;
   10543 	int i, rv;
   10544 
   10545 	if (sc->phy.acquire(sc)) {
   10546 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10547 		return 0;
   10548 	}
   10549 
   10550 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10551 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10552 	    | I2CCMD_OPCODE_READ;
   10553 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10554 
   10555 	/* Poll the ready bit */
   10556 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10557 		delay(50);
   10558 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10559 		if (i2ccmd & I2CCMD_READY)
   10560 			break;
   10561 	}
   10562 	if ((i2ccmd & I2CCMD_READY) == 0)
   10563 		device_printf(dev, "I2CCMD Read did not complete\n");
   10564 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10565 		device_printf(dev, "I2CCMD Error bit set\n");
   10566 
   10567 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10568 
   10569 	sc->phy.release(sc);
   10570 	return rv;
   10571 }
   10572 
   10573 /*
   10574  * wm_sgmii_writereg:	[mii interface function]
   10575  *
   10576  *	Write a PHY register on the SGMII.
   10577  * This could be handled by the PHY layer if we didn't have to lock the
   10578  * ressource ...
   10579  */
   10580 static void
   10581 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   10582 {
   10583 	struct wm_softc *sc = device_private(dev);
   10584 	uint32_t i2ccmd;
   10585 	int i;
   10586 	int val_swapped;
   10587 
   10588 	if (sc->phy.acquire(sc) != 0) {
   10589 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10590 		return;
   10591 	}
   10592 	/* Swap the data bytes for the I2C interface */
   10593 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10594 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10595 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10596 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10597 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10598 
   10599 	/* Poll the ready bit */
   10600 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10601 		delay(50);
   10602 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10603 		if (i2ccmd & I2CCMD_READY)
   10604 			break;
   10605 	}
   10606 	if ((i2ccmd & I2CCMD_READY) == 0)
   10607 		device_printf(dev, "I2CCMD Write did not complete\n");
   10608 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10609 		device_printf(dev, "I2CCMD Error bit set\n");
   10610 
   10611 	sc->phy.release(sc);
   10612 }
   10613 
   10614 /* TBI related */
   10615 
   10616 /*
   10617  * wm_tbi_mediainit:
   10618  *
   10619  *	Initialize media for use on 1000BASE-X devices.
   10620  */
   10621 static void
   10622 wm_tbi_mediainit(struct wm_softc *sc)
   10623 {
   10624 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10625 	const char *sep = "";
   10626 
   10627 	if (sc->sc_type < WM_T_82543)
   10628 		sc->sc_tipg = TIPG_WM_DFLT;
   10629 	else
   10630 		sc->sc_tipg = TIPG_LG_DFLT;
   10631 
   10632 	sc->sc_tbi_serdes_anegticks = 5;
   10633 
   10634 	/* Initialize our media structures */
   10635 	sc->sc_mii.mii_ifp = ifp;
   10636 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10637 
   10638 	if ((sc->sc_type >= WM_T_82575)
   10639 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10640 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10641 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10642 	else
   10643 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10644 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10645 
   10646 	/*
   10647 	 * SWD Pins:
   10648 	 *
   10649 	 *	0 = Link LED (output)
   10650 	 *	1 = Loss Of Signal (input)
   10651 	 */
   10652 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10653 
   10654 	/* XXX Perhaps this is only for TBI */
   10655 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10656 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10657 
   10658 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10659 		sc->sc_ctrl &= ~CTRL_LRST;
   10660 
   10661 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10662 
   10663 #define	ADD(ss, mm, dd)							\
   10664 do {									\
   10665 	aprint_normal("%s%s", sep, ss);					\
   10666 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10667 	sep = ", ";							\
   10668 } while (/*CONSTCOND*/0)
   10669 
   10670 	aprint_normal_dev(sc->sc_dev, "");
   10671 
   10672 	if (sc->sc_type == WM_T_I354) {
   10673 		uint32_t status;
   10674 
   10675 		status = CSR_READ(sc, WMREG_STATUS);
   10676 		if (((status & STATUS_2P5_SKU) != 0)
   10677 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10678 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10679 		} else
   10680 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10681 	} else if (sc->sc_type == WM_T_82545) {
   10682 		/* Only 82545 is LX (XXX except SFP) */
   10683 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10684 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10685 	} else {
   10686 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10687 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10688 	}
   10689 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10690 	aprint_normal("\n");
   10691 
   10692 #undef ADD
   10693 
   10694 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10695 }
   10696 
   10697 /*
   10698  * wm_tbi_mediachange:	[ifmedia interface function]
   10699  *
   10700  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10701  */
   10702 static int
   10703 wm_tbi_mediachange(struct ifnet *ifp)
   10704 {
   10705 	struct wm_softc *sc = ifp->if_softc;
   10706 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10707 	uint32_t status;
   10708 	int i;
   10709 
   10710 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10711 		/* XXX need some work for >= 82571 and < 82575 */
   10712 		if (sc->sc_type < WM_T_82575)
   10713 			return 0;
   10714 	}
   10715 
   10716 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10717 	    || (sc->sc_type >= WM_T_82575))
   10718 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10719 
   10720 	sc->sc_ctrl &= ~CTRL_LRST;
   10721 	sc->sc_txcw = TXCW_ANE;
   10722 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10723 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10724 	else if (ife->ifm_media & IFM_FDX)
   10725 		sc->sc_txcw |= TXCW_FD;
   10726 	else
   10727 		sc->sc_txcw |= TXCW_HD;
   10728 
   10729 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10730 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10731 
   10732 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10733 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10734 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10735 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10736 	CSR_WRITE_FLUSH(sc);
   10737 	delay(1000);
   10738 
   10739 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10740 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10741 
   10742 	/*
   10743 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10744 	 * optics detect a signal, 0 if they don't.
   10745 	 */
   10746 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10747 		/* Have signal; wait for the link to come up. */
   10748 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10749 			delay(10000);
   10750 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10751 				break;
   10752 		}
   10753 
   10754 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10755 			    device_xname(sc->sc_dev),i));
   10756 
   10757 		status = CSR_READ(sc, WMREG_STATUS);
   10758 		DPRINTF(WM_DEBUG_LINK,
   10759 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10760 			device_xname(sc->sc_dev),status, STATUS_LU));
   10761 		if (status & STATUS_LU) {
   10762 			/* Link is up. */
   10763 			DPRINTF(WM_DEBUG_LINK,
   10764 			    ("%s: LINK: set media -> link up %s\n",
   10765 			    device_xname(sc->sc_dev),
   10766 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10767 
   10768 			/*
   10769 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10770 			 * so we should update sc->sc_ctrl
   10771 			 */
   10772 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10773 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10774 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10775 			if (status & STATUS_FD)
   10776 				sc->sc_tctl |=
   10777 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10778 			else
   10779 				sc->sc_tctl |=
   10780 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10781 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10782 				sc->sc_fcrtl |= FCRTL_XONE;
   10783 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10784 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10785 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10786 				      sc->sc_fcrtl);
   10787 			sc->sc_tbi_linkup = 1;
   10788 		} else {
   10789 			if (i == WM_LINKUP_TIMEOUT)
   10790 				wm_check_for_link(sc);
   10791 			/* Link is down. */
   10792 			DPRINTF(WM_DEBUG_LINK,
   10793 			    ("%s: LINK: set media -> link down\n",
   10794 			    device_xname(sc->sc_dev)));
   10795 			sc->sc_tbi_linkup = 0;
   10796 		}
   10797 	} else {
   10798 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10799 		    device_xname(sc->sc_dev)));
   10800 		sc->sc_tbi_linkup = 0;
   10801 	}
   10802 
   10803 	wm_tbi_serdes_set_linkled(sc);
   10804 
   10805 	return 0;
   10806 }
   10807 
   10808 /*
   10809  * wm_tbi_mediastatus:	[ifmedia interface function]
   10810  *
   10811  *	Get the current interface media status on a 1000BASE-X device.
   10812  */
   10813 static void
   10814 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10815 {
   10816 	struct wm_softc *sc = ifp->if_softc;
   10817 	uint32_t ctrl, status;
   10818 
   10819 	ifmr->ifm_status = IFM_AVALID;
   10820 	ifmr->ifm_active = IFM_ETHER;
   10821 
   10822 	status = CSR_READ(sc, WMREG_STATUS);
   10823 	if ((status & STATUS_LU) == 0) {
   10824 		ifmr->ifm_active |= IFM_NONE;
   10825 		return;
   10826 	}
   10827 
   10828 	ifmr->ifm_status |= IFM_ACTIVE;
   10829 	/* Only 82545 is LX */
   10830 	if (sc->sc_type == WM_T_82545)
   10831 		ifmr->ifm_active |= IFM_1000_LX;
   10832 	else
   10833 		ifmr->ifm_active |= IFM_1000_SX;
   10834 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10835 		ifmr->ifm_active |= IFM_FDX;
   10836 	else
   10837 		ifmr->ifm_active |= IFM_HDX;
   10838 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10839 	if (ctrl & CTRL_RFCE)
   10840 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10841 	if (ctrl & CTRL_TFCE)
   10842 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10843 }
   10844 
   10845 /* XXX TBI only */
   10846 static int
   10847 wm_check_for_link(struct wm_softc *sc)
   10848 {
   10849 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10850 	uint32_t rxcw;
   10851 	uint32_t ctrl;
   10852 	uint32_t status;
   10853 	uint32_t sig;
   10854 
   10855 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10856 		/* XXX need some work for >= 82571 */
   10857 		if (sc->sc_type >= WM_T_82571) {
   10858 			sc->sc_tbi_linkup = 1;
   10859 			return 0;
   10860 		}
   10861 	}
   10862 
   10863 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10864 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10865 	status = CSR_READ(sc, WMREG_STATUS);
   10866 
   10867 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10868 
   10869 	DPRINTF(WM_DEBUG_LINK,
   10870 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10871 		device_xname(sc->sc_dev), __func__,
   10872 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10873 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10874 
   10875 	/*
   10876 	 * SWDPIN   LU RXCW
   10877 	 *      0    0    0
   10878 	 *      0    0    1	(should not happen)
   10879 	 *      0    1    0	(should not happen)
   10880 	 *      0    1    1	(should not happen)
   10881 	 *      1    0    0	Disable autonego and force linkup
   10882 	 *      1    0    1	got /C/ but not linkup yet
   10883 	 *      1    1    0	(linkup)
   10884 	 *      1    1    1	If IFM_AUTO, back to autonego
   10885 	 *
   10886 	 */
   10887 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10888 	    && ((status & STATUS_LU) == 0)
   10889 	    && ((rxcw & RXCW_C) == 0)) {
   10890 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10891 			__func__));
   10892 		sc->sc_tbi_linkup = 0;
   10893 		/* Disable auto-negotiation in the TXCW register */
   10894 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10895 
   10896 		/*
   10897 		 * Force link-up and also force full-duplex.
   10898 		 *
   10899 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10900 		 * so we should update sc->sc_ctrl
   10901 		 */
   10902 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10903 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10904 	} else if (((status & STATUS_LU) != 0)
   10905 	    && ((rxcw & RXCW_C) != 0)
   10906 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10907 		sc->sc_tbi_linkup = 1;
   10908 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10909 			__func__));
   10910 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10911 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10912 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10913 	    && ((rxcw & RXCW_C) != 0)) {
   10914 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10915 	} else {
   10916 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10917 			status));
   10918 	}
   10919 
   10920 	return 0;
   10921 }
   10922 
   10923 /*
   10924  * wm_tbi_tick:
   10925  *
   10926  *	Check the link on TBI devices.
   10927  *	This function acts as mii_tick().
   10928  */
   10929 static void
   10930 wm_tbi_tick(struct wm_softc *sc)
   10931 {
   10932 	struct mii_data *mii = &sc->sc_mii;
   10933 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10934 	uint32_t status;
   10935 
   10936 	KASSERT(WM_CORE_LOCKED(sc));
   10937 
   10938 	status = CSR_READ(sc, WMREG_STATUS);
   10939 
   10940 	/* XXX is this needed? */
   10941 	(void)CSR_READ(sc, WMREG_RXCW);
   10942 	(void)CSR_READ(sc, WMREG_CTRL);
   10943 
   10944 	/* set link status */
   10945 	if ((status & STATUS_LU) == 0) {
   10946 		DPRINTF(WM_DEBUG_LINK,
   10947 		    ("%s: LINK: checklink -> down\n",
   10948 			device_xname(sc->sc_dev)));
   10949 		sc->sc_tbi_linkup = 0;
   10950 	} else if (sc->sc_tbi_linkup == 0) {
   10951 		DPRINTF(WM_DEBUG_LINK,
   10952 		    ("%s: LINK: checklink -> up %s\n",
   10953 			device_xname(sc->sc_dev),
   10954 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10955 		sc->sc_tbi_linkup = 1;
   10956 		sc->sc_tbi_serdes_ticks = 0;
   10957 	}
   10958 
   10959 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   10960 		goto setled;
   10961 
   10962 	if ((status & STATUS_LU) == 0) {
   10963 		sc->sc_tbi_linkup = 0;
   10964 		/* If the timer expired, retry autonegotiation */
   10965 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10966 		    && (++sc->sc_tbi_serdes_ticks
   10967 			>= sc->sc_tbi_serdes_anegticks)) {
   10968 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10969 			sc->sc_tbi_serdes_ticks = 0;
   10970 			/*
   10971 			 * Reset the link, and let autonegotiation do
   10972 			 * its thing
   10973 			 */
   10974 			sc->sc_ctrl |= CTRL_LRST;
   10975 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10976 			CSR_WRITE_FLUSH(sc);
   10977 			delay(1000);
   10978 			sc->sc_ctrl &= ~CTRL_LRST;
   10979 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10980 			CSR_WRITE_FLUSH(sc);
   10981 			delay(1000);
   10982 			CSR_WRITE(sc, WMREG_TXCW,
   10983 			    sc->sc_txcw & ~TXCW_ANE);
   10984 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10985 		}
   10986 	}
   10987 
   10988 setled:
   10989 	wm_tbi_serdes_set_linkled(sc);
   10990 }
   10991 
   10992 /* SERDES related */
   10993 static void
   10994 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10995 {
   10996 	uint32_t reg;
   10997 
   10998 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10999 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11000 		return;
   11001 
   11002 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11003 	reg |= PCS_CFG_PCS_EN;
   11004 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11005 
   11006 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11007 	reg &= ~CTRL_EXT_SWDPIN(3);
   11008 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11009 	CSR_WRITE_FLUSH(sc);
   11010 }
   11011 
   11012 static int
   11013 wm_serdes_mediachange(struct ifnet *ifp)
   11014 {
   11015 	struct wm_softc *sc = ifp->if_softc;
   11016 	bool pcs_autoneg = true; /* XXX */
   11017 	uint32_t ctrl_ext, pcs_lctl, reg;
   11018 
   11019 	/* XXX Currently, this function is not called on 8257[12] */
   11020 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11021 	    || (sc->sc_type >= WM_T_82575))
   11022 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11023 
   11024 	wm_serdes_power_up_link_82575(sc);
   11025 
   11026 	sc->sc_ctrl |= CTRL_SLU;
   11027 
   11028 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11029 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11030 
   11031 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11032 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11033 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11034 	case CTRL_EXT_LINK_MODE_SGMII:
   11035 		pcs_autoneg = true;
   11036 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11037 		break;
   11038 	case CTRL_EXT_LINK_MODE_1000KX:
   11039 		pcs_autoneg = false;
   11040 		/* FALLTHROUGH */
   11041 	default:
   11042 		if ((sc->sc_type == WM_T_82575)
   11043 		    || (sc->sc_type == WM_T_82576)) {
   11044 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11045 				pcs_autoneg = false;
   11046 		}
   11047 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11048 		    | CTRL_FRCFDX;
   11049 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11050 	}
   11051 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11052 
   11053 	if (pcs_autoneg) {
   11054 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11055 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11056 
   11057 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11058 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11059 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11060 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11061 	} else
   11062 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11063 
   11064 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11065 
   11066 
   11067 	return 0;
   11068 }
   11069 
   11070 static void
   11071 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11072 {
   11073 	struct wm_softc *sc = ifp->if_softc;
   11074 	struct mii_data *mii = &sc->sc_mii;
   11075 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11076 	uint32_t pcs_adv, pcs_lpab, reg;
   11077 
   11078 	ifmr->ifm_status = IFM_AVALID;
   11079 	ifmr->ifm_active = IFM_ETHER;
   11080 
   11081 	/* Check PCS */
   11082 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11083 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11084 		ifmr->ifm_active |= IFM_NONE;
   11085 		sc->sc_tbi_linkup = 0;
   11086 		goto setled;
   11087 	}
   11088 
   11089 	sc->sc_tbi_linkup = 1;
   11090 	ifmr->ifm_status |= IFM_ACTIVE;
   11091 	if (sc->sc_type == WM_T_I354) {
   11092 		uint32_t status;
   11093 
   11094 		status = CSR_READ(sc, WMREG_STATUS);
   11095 		if (((status & STATUS_2P5_SKU) != 0)
   11096 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11097 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11098 		} else
   11099 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11100 	} else {
   11101 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11102 		case PCS_LSTS_SPEED_10:
   11103 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11104 			break;
   11105 		case PCS_LSTS_SPEED_100:
   11106 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11107 			break;
   11108 		case PCS_LSTS_SPEED_1000:
   11109 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11110 			break;
   11111 		default:
   11112 			device_printf(sc->sc_dev, "Unknown speed\n");
   11113 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11114 			break;
   11115 		}
   11116 	}
   11117 	if ((reg & PCS_LSTS_FDX) != 0)
   11118 		ifmr->ifm_active |= IFM_FDX;
   11119 	else
   11120 		ifmr->ifm_active |= IFM_HDX;
   11121 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11122 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11123 		/* Check flow */
   11124 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11125 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11126 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11127 			goto setled;
   11128 		}
   11129 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11130 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11131 		DPRINTF(WM_DEBUG_LINK,
   11132 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11133 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11134 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11135 			mii->mii_media_active |= IFM_FLOW
   11136 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11137 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11138 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11139 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11140 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11141 			mii->mii_media_active |= IFM_FLOW
   11142 			    | IFM_ETH_TXPAUSE;
   11143 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11144 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11145 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11146 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11147 			mii->mii_media_active |= IFM_FLOW
   11148 			    | IFM_ETH_RXPAUSE;
   11149 		}
   11150 	}
   11151 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11152 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11153 setled:
   11154 	wm_tbi_serdes_set_linkled(sc);
   11155 }
   11156 
   11157 /*
   11158  * wm_serdes_tick:
   11159  *
   11160  *	Check the link on serdes devices.
   11161  */
   11162 static void
   11163 wm_serdes_tick(struct wm_softc *sc)
   11164 {
   11165 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11166 	struct mii_data *mii = &sc->sc_mii;
   11167 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11168 	uint32_t reg;
   11169 
   11170 	KASSERT(WM_CORE_LOCKED(sc));
   11171 
   11172 	mii->mii_media_status = IFM_AVALID;
   11173 	mii->mii_media_active = IFM_ETHER;
   11174 
   11175 	/* Check PCS */
   11176 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11177 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11178 		mii->mii_media_status |= IFM_ACTIVE;
   11179 		sc->sc_tbi_linkup = 1;
   11180 		sc->sc_tbi_serdes_ticks = 0;
   11181 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11182 		if ((reg & PCS_LSTS_FDX) != 0)
   11183 			mii->mii_media_active |= IFM_FDX;
   11184 		else
   11185 			mii->mii_media_active |= IFM_HDX;
   11186 	} else {
   11187 		mii->mii_media_status |= IFM_NONE;
   11188 		sc->sc_tbi_linkup = 0;
   11189 		/* If the timer expired, retry autonegotiation */
   11190 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11191 		    && (++sc->sc_tbi_serdes_ticks
   11192 			>= sc->sc_tbi_serdes_anegticks)) {
   11193 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11194 			sc->sc_tbi_serdes_ticks = 0;
   11195 			/* XXX */
   11196 			wm_serdes_mediachange(ifp);
   11197 		}
   11198 	}
   11199 
   11200 	wm_tbi_serdes_set_linkled(sc);
   11201 }
   11202 
   11203 /* SFP related */
   11204 
   11205 static int
   11206 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11207 {
   11208 	uint32_t i2ccmd;
   11209 	int i;
   11210 
   11211 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11212 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11213 
   11214 	/* Poll the ready bit */
   11215 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11216 		delay(50);
   11217 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11218 		if (i2ccmd & I2CCMD_READY)
   11219 			break;
   11220 	}
   11221 	if ((i2ccmd & I2CCMD_READY) == 0)
   11222 		return -1;
   11223 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11224 		return -1;
   11225 
   11226 	*data = i2ccmd & 0x00ff;
   11227 
   11228 	return 0;
   11229 }
   11230 
   11231 static uint32_t
   11232 wm_sfp_get_media_type(struct wm_softc *sc)
   11233 {
   11234 	uint32_t ctrl_ext;
   11235 	uint8_t val = 0;
   11236 	int timeout = 3;
   11237 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11238 	int rv = -1;
   11239 
   11240 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11241 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11242 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11243 	CSR_WRITE_FLUSH(sc);
   11244 
   11245 	/* Read SFP module data */
   11246 	while (timeout) {
   11247 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11248 		if (rv == 0)
   11249 			break;
   11250 		delay(100*1000); /* XXX too big */
   11251 		timeout--;
   11252 	}
   11253 	if (rv != 0)
   11254 		goto out;
   11255 	switch (val) {
   11256 	case SFF_SFP_ID_SFF:
   11257 		aprint_normal_dev(sc->sc_dev,
   11258 		    "Module/Connector soldered to board\n");
   11259 		break;
   11260 	case SFF_SFP_ID_SFP:
   11261 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11262 		break;
   11263 	case SFF_SFP_ID_UNKNOWN:
   11264 		goto out;
   11265 	default:
   11266 		break;
   11267 	}
   11268 
   11269 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11270 	if (rv != 0) {
   11271 		goto out;
   11272 	}
   11273 
   11274 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11275 		mediatype = WM_MEDIATYPE_SERDES;
   11276 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11277 		sc->sc_flags |= WM_F_SGMII;
   11278 		mediatype = WM_MEDIATYPE_COPPER;
   11279 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11280 		sc->sc_flags |= WM_F_SGMII;
   11281 		mediatype = WM_MEDIATYPE_SERDES;
   11282 	}
   11283 
   11284 out:
   11285 	/* Restore I2C interface setting */
   11286 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11287 
   11288 	return mediatype;
   11289 }
   11290 
   11291 /*
   11292  * NVM related.
   11293  * Microwire, SPI (w/wo EERD) and Flash.
   11294  */
   11295 
   11296 /* Both spi and uwire */
   11297 
   11298 /*
   11299  * wm_eeprom_sendbits:
   11300  *
   11301  *	Send a series of bits to the EEPROM.
   11302  */
   11303 static void
   11304 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11305 {
   11306 	uint32_t reg;
   11307 	int x;
   11308 
   11309 	reg = CSR_READ(sc, WMREG_EECD);
   11310 
   11311 	for (x = nbits; x > 0; x--) {
   11312 		if (bits & (1U << (x - 1)))
   11313 			reg |= EECD_DI;
   11314 		else
   11315 			reg &= ~EECD_DI;
   11316 		CSR_WRITE(sc, WMREG_EECD, reg);
   11317 		CSR_WRITE_FLUSH(sc);
   11318 		delay(2);
   11319 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11320 		CSR_WRITE_FLUSH(sc);
   11321 		delay(2);
   11322 		CSR_WRITE(sc, WMREG_EECD, reg);
   11323 		CSR_WRITE_FLUSH(sc);
   11324 		delay(2);
   11325 	}
   11326 }
   11327 
   11328 /*
   11329  * wm_eeprom_recvbits:
   11330  *
   11331  *	Receive a series of bits from the EEPROM.
   11332  */
   11333 static void
   11334 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11335 {
   11336 	uint32_t reg, val;
   11337 	int x;
   11338 
   11339 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11340 
   11341 	val = 0;
   11342 	for (x = nbits; x > 0; x--) {
   11343 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11344 		CSR_WRITE_FLUSH(sc);
   11345 		delay(2);
   11346 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11347 			val |= (1U << (x - 1));
   11348 		CSR_WRITE(sc, WMREG_EECD, reg);
   11349 		CSR_WRITE_FLUSH(sc);
   11350 		delay(2);
   11351 	}
   11352 	*valp = val;
   11353 }
   11354 
   11355 /* Microwire */
   11356 
   11357 /*
   11358  * wm_nvm_read_uwire:
   11359  *
   11360  *	Read a word from the EEPROM using the MicroWire protocol.
   11361  */
   11362 static int
   11363 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11364 {
   11365 	uint32_t reg, val;
   11366 	int i;
   11367 
   11368 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11369 		device_xname(sc->sc_dev), __func__));
   11370 
   11371 	for (i = 0; i < wordcnt; i++) {
   11372 		/* Clear SK and DI. */
   11373 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11374 		CSR_WRITE(sc, WMREG_EECD, reg);
   11375 
   11376 		/*
   11377 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11378 		 * and Xen.
   11379 		 *
   11380 		 * We use this workaround only for 82540 because qemu's
   11381 		 * e1000 act as 82540.
   11382 		 */
   11383 		if (sc->sc_type == WM_T_82540) {
   11384 			reg |= EECD_SK;
   11385 			CSR_WRITE(sc, WMREG_EECD, reg);
   11386 			reg &= ~EECD_SK;
   11387 			CSR_WRITE(sc, WMREG_EECD, reg);
   11388 			CSR_WRITE_FLUSH(sc);
   11389 			delay(2);
   11390 		}
   11391 		/* XXX: end of workaround */
   11392 
   11393 		/* Set CHIP SELECT. */
   11394 		reg |= EECD_CS;
   11395 		CSR_WRITE(sc, WMREG_EECD, reg);
   11396 		CSR_WRITE_FLUSH(sc);
   11397 		delay(2);
   11398 
   11399 		/* Shift in the READ command. */
   11400 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11401 
   11402 		/* Shift in address. */
   11403 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11404 
   11405 		/* Shift out the data. */
   11406 		wm_eeprom_recvbits(sc, &val, 16);
   11407 		data[i] = val & 0xffff;
   11408 
   11409 		/* Clear CHIP SELECT. */
   11410 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11411 		CSR_WRITE(sc, WMREG_EECD, reg);
   11412 		CSR_WRITE_FLUSH(sc);
   11413 		delay(2);
   11414 	}
   11415 
   11416 	return 0;
   11417 }
   11418 
   11419 /* SPI */
   11420 
   11421 /*
   11422  * Set SPI and FLASH related information from the EECD register.
   11423  * For 82541 and 82547, the word size is taken from EEPROM.
   11424  */
   11425 static int
   11426 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11427 {
   11428 	int size;
   11429 	uint32_t reg;
   11430 	uint16_t data;
   11431 
   11432 	reg = CSR_READ(sc, WMREG_EECD);
   11433 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11434 
   11435 	/* Read the size of NVM from EECD by default */
   11436 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11437 	switch (sc->sc_type) {
   11438 	case WM_T_82541:
   11439 	case WM_T_82541_2:
   11440 	case WM_T_82547:
   11441 	case WM_T_82547_2:
   11442 		/* Set dummy value to access EEPROM */
   11443 		sc->sc_nvm_wordsize = 64;
   11444 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11445 		reg = data;
   11446 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11447 		if (size == 0)
   11448 			size = 6; /* 64 word size */
   11449 		else
   11450 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11451 		break;
   11452 	case WM_T_80003:
   11453 	case WM_T_82571:
   11454 	case WM_T_82572:
   11455 	case WM_T_82573: /* SPI case */
   11456 	case WM_T_82574: /* SPI case */
   11457 	case WM_T_82583: /* SPI case */
   11458 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11459 		if (size > 14)
   11460 			size = 14;
   11461 		break;
   11462 	case WM_T_82575:
   11463 	case WM_T_82576:
   11464 	case WM_T_82580:
   11465 	case WM_T_I350:
   11466 	case WM_T_I354:
   11467 	case WM_T_I210:
   11468 	case WM_T_I211:
   11469 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11470 		if (size > 15)
   11471 			size = 15;
   11472 		break;
   11473 	default:
   11474 		aprint_error_dev(sc->sc_dev,
   11475 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11476 		return -1;
   11477 		break;
   11478 	}
   11479 
   11480 	sc->sc_nvm_wordsize = 1 << size;
   11481 
   11482 	return 0;
   11483 }
   11484 
   11485 /*
   11486  * wm_nvm_ready_spi:
   11487  *
   11488  *	Wait for a SPI EEPROM to be ready for commands.
   11489  */
   11490 static int
   11491 wm_nvm_ready_spi(struct wm_softc *sc)
   11492 {
   11493 	uint32_t val;
   11494 	int usec;
   11495 
   11496 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11497 		device_xname(sc->sc_dev), __func__));
   11498 
   11499 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11500 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11501 		wm_eeprom_recvbits(sc, &val, 8);
   11502 		if ((val & SPI_SR_RDY) == 0)
   11503 			break;
   11504 	}
   11505 	if (usec >= SPI_MAX_RETRIES) {
   11506 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11507 		return 1;
   11508 	}
   11509 	return 0;
   11510 }
   11511 
   11512 /*
   11513  * wm_nvm_read_spi:
   11514  *
   11515  *	Read a work from the EEPROM using the SPI protocol.
   11516  */
   11517 static int
   11518 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11519 {
   11520 	uint32_t reg, val;
   11521 	int i;
   11522 	uint8_t opc;
   11523 
   11524 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11525 		device_xname(sc->sc_dev), __func__));
   11526 
   11527 	/* Clear SK and CS. */
   11528 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11529 	CSR_WRITE(sc, WMREG_EECD, reg);
   11530 	CSR_WRITE_FLUSH(sc);
   11531 	delay(2);
   11532 
   11533 	if (wm_nvm_ready_spi(sc))
   11534 		return 1;
   11535 
   11536 	/* Toggle CS to flush commands. */
   11537 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11538 	CSR_WRITE_FLUSH(sc);
   11539 	delay(2);
   11540 	CSR_WRITE(sc, WMREG_EECD, reg);
   11541 	CSR_WRITE_FLUSH(sc);
   11542 	delay(2);
   11543 
   11544 	opc = SPI_OPC_READ;
   11545 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11546 		opc |= SPI_OPC_A8;
   11547 
   11548 	wm_eeprom_sendbits(sc, opc, 8);
   11549 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11550 
   11551 	for (i = 0; i < wordcnt; i++) {
   11552 		wm_eeprom_recvbits(sc, &val, 16);
   11553 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11554 	}
   11555 
   11556 	/* Raise CS and clear SK. */
   11557 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11558 	CSR_WRITE(sc, WMREG_EECD, reg);
   11559 	CSR_WRITE_FLUSH(sc);
   11560 	delay(2);
   11561 
   11562 	return 0;
   11563 }
   11564 
   11565 /* Using with EERD */
   11566 
   11567 static int
   11568 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11569 {
   11570 	uint32_t attempts = 100000;
   11571 	uint32_t i, reg = 0;
   11572 	int32_t done = -1;
   11573 
   11574 	for (i = 0; i < attempts; i++) {
   11575 		reg = CSR_READ(sc, rw);
   11576 
   11577 		if (reg & EERD_DONE) {
   11578 			done = 0;
   11579 			break;
   11580 		}
   11581 		delay(5);
   11582 	}
   11583 
   11584 	return done;
   11585 }
   11586 
   11587 static int
   11588 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11589     uint16_t *data)
   11590 {
   11591 	int i, eerd = 0;
   11592 	int error = 0;
   11593 
   11594 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11595 		device_xname(sc->sc_dev), __func__));
   11596 
   11597 	for (i = 0; i < wordcnt; i++) {
   11598 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11599 
   11600 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11601 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11602 		if (error != 0)
   11603 			break;
   11604 
   11605 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11606 	}
   11607 
   11608 	return error;
   11609 }
   11610 
   11611 /* Flash */
   11612 
   11613 static int
   11614 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11615 {
   11616 	uint32_t eecd;
   11617 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11618 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11619 	uint8_t sig_byte = 0;
   11620 
   11621 	switch (sc->sc_type) {
   11622 	case WM_T_PCH_SPT:
   11623 		/*
   11624 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11625 		 * sector valid bits from the NVM.
   11626 		 */
   11627 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11628 		if ((*bank == 0) || (*bank == 1)) {
   11629 			aprint_error_dev(sc->sc_dev,
   11630 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11631 				*bank);
   11632 			return -1;
   11633 		} else {
   11634 			*bank = *bank - 2;
   11635 			return 0;
   11636 		}
   11637 	case WM_T_ICH8:
   11638 	case WM_T_ICH9:
   11639 		eecd = CSR_READ(sc, WMREG_EECD);
   11640 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11641 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11642 			return 0;
   11643 		}
   11644 		/* FALLTHROUGH */
   11645 	default:
   11646 		/* Default to 0 */
   11647 		*bank = 0;
   11648 
   11649 		/* Check bank 0 */
   11650 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11651 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11652 			*bank = 0;
   11653 			return 0;
   11654 		}
   11655 
   11656 		/* Check bank 1 */
   11657 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11658 		    &sig_byte);
   11659 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11660 			*bank = 1;
   11661 			return 0;
   11662 		}
   11663 	}
   11664 
   11665 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11666 		device_xname(sc->sc_dev)));
   11667 	return -1;
   11668 }
   11669 
   11670 /******************************************************************************
   11671  * This function does initial flash setup so that a new read/write/erase cycle
   11672  * can be started.
   11673  *
   11674  * sc - The pointer to the hw structure
   11675  ****************************************************************************/
   11676 static int32_t
   11677 wm_ich8_cycle_init(struct wm_softc *sc)
   11678 {
   11679 	uint16_t hsfsts;
   11680 	int32_t error = 1;
   11681 	int32_t i     = 0;
   11682 
   11683 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11684 
   11685 	/* May be check the Flash Des Valid bit in Hw status */
   11686 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11687 		return error;
   11688 	}
   11689 
   11690 	/* Clear FCERR in Hw status by writing 1 */
   11691 	/* Clear DAEL in Hw status by writing a 1 */
   11692 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11693 
   11694 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11695 
   11696 	/*
   11697 	 * Either we should have a hardware SPI cycle in progress bit to check
   11698 	 * against, in order to start a new cycle or FDONE bit should be
   11699 	 * changed in the hardware so that it is 1 after harware reset, which
   11700 	 * can then be used as an indication whether a cycle is in progress or
   11701 	 * has been completed .. we should also have some software semaphore
   11702 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11703 	 * threads access to those bits can be sequentiallized or a way so that
   11704 	 * 2 threads dont start the cycle at the same time
   11705 	 */
   11706 
   11707 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11708 		/*
   11709 		 * There is no cycle running at present, so we can start a
   11710 		 * cycle
   11711 		 */
   11712 
   11713 		/* Begin by setting Flash Cycle Done. */
   11714 		hsfsts |= HSFSTS_DONE;
   11715 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11716 		error = 0;
   11717 	} else {
   11718 		/*
   11719 		 * otherwise poll for sometime so the current cycle has a
   11720 		 * chance to end before giving up.
   11721 		 */
   11722 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11723 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11724 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11725 				error = 0;
   11726 				break;
   11727 			}
   11728 			delay(1);
   11729 		}
   11730 		if (error == 0) {
   11731 			/*
   11732 			 * Successful in waiting for previous cycle to timeout,
   11733 			 * now set the Flash Cycle Done.
   11734 			 */
   11735 			hsfsts |= HSFSTS_DONE;
   11736 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11737 		}
   11738 	}
   11739 	return error;
   11740 }
   11741 
   11742 /******************************************************************************
   11743  * This function starts a flash cycle and waits for its completion
   11744  *
   11745  * sc - The pointer to the hw structure
   11746  ****************************************************************************/
   11747 static int32_t
   11748 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11749 {
   11750 	uint16_t hsflctl;
   11751 	uint16_t hsfsts;
   11752 	int32_t error = 1;
   11753 	uint32_t i = 0;
   11754 
   11755 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11756 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11757 	hsflctl |= HSFCTL_GO;
   11758 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11759 
   11760 	/* Wait till FDONE bit is set to 1 */
   11761 	do {
   11762 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11763 		if (hsfsts & HSFSTS_DONE)
   11764 			break;
   11765 		delay(1);
   11766 		i++;
   11767 	} while (i < timeout);
   11768 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11769 		error = 0;
   11770 
   11771 	return error;
   11772 }
   11773 
   11774 /******************************************************************************
   11775  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11776  *
   11777  * sc - The pointer to the hw structure
   11778  * index - The index of the byte or word to read.
   11779  * size - Size of data to read, 1=byte 2=word, 4=dword
   11780  * data - Pointer to the word to store the value read.
   11781  *****************************************************************************/
   11782 static int32_t
   11783 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11784     uint32_t size, uint32_t *data)
   11785 {
   11786 	uint16_t hsfsts;
   11787 	uint16_t hsflctl;
   11788 	uint32_t flash_linear_address;
   11789 	uint32_t flash_data = 0;
   11790 	int32_t error = 1;
   11791 	int32_t count = 0;
   11792 
   11793 	if (size < 1  || size > 4 || data == 0x0 ||
   11794 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11795 		return error;
   11796 
   11797 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11798 	    sc->sc_ich8_flash_base;
   11799 
   11800 	do {
   11801 		delay(1);
   11802 		/* Steps */
   11803 		error = wm_ich8_cycle_init(sc);
   11804 		if (error)
   11805 			break;
   11806 
   11807 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11808 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11809 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11810 		    & HSFCTL_BCOUNT_MASK;
   11811 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11812 		if (sc->sc_type == WM_T_PCH_SPT) {
   11813 			/*
   11814 			 * In SPT, This register is in Lan memory space, not
   11815 			 * flash. Therefore, only 32 bit access is supported.
   11816 			 */
   11817 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11818 			    (uint32_t)hsflctl);
   11819 		} else
   11820 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11821 
   11822 		/*
   11823 		 * Write the last 24 bits of index into Flash Linear address
   11824 		 * field in Flash Address
   11825 		 */
   11826 		/* TODO: TBD maybe check the index against the size of flash */
   11827 
   11828 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11829 
   11830 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11831 
   11832 		/*
   11833 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11834 		 * the whole sequence a few more times, else read in (shift in)
   11835 		 * the Flash Data0, the order is least significant byte first
   11836 		 * msb to lsb
   11837 		 */
   11838 		if (error == 0) {
   11839 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11840 			if (size == 1)
   11841 				*data = (uint8_t)(flash_data & 0x000000FF);
   11842 			else if (size == 2)
   11843 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11844 			else if (size == 4)
   11845 				*data = (uint32_t)flash_data;
   11846 			break;
   11847 		} else {
   11848 			/*
   11849 			 * If we've gotten here, then things are probably
   11850 			 * completely hosed, but if the error condition is
   11851 			 * detected, it won't hurt to give it another try...
   11852 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11853 			 */
   11854 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11855 			if (hsfsts & HSFSTS_ERR) {
   11856 				/* Repeat for some time before giving up. */
   11857 				continue;
   11858 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11859 				break;
   11860 		}
   11861 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11862 
   11863 	return error;
   11864 }
   11865 
   11866 /******************************************************************************
   11867  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11868  *
   11869  * sc - pointer to wm_hw structure
   11870  * index - The index of the byte to read.
   11871  * data - Pointer to a byte to store the value read.
   11872  *****************************************************************************/
   11873 static int32_t
   11874 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11875 {
   11876 	int32_t status;
   11877 	uint32_t word = 0;
   11878 
   11879 	status = wm_read_ich8_data(sc, index, 1, &word);
   11880 	if (status == 0)
   11881 		*data = (uint8_t)word;
   11882 	else
   11883 		*data = 0;
   11884 
   11885 	return status;
   11886 }
   11887 
   11888 /******************************************************************************
   11889  * Reads a word from the NVM using the ICH8 flash access registers.
   11890  *
   11891  * sc - pointer to wm_hw structure
   11892  * index - The starting byte index of the word to read.
   11893  * data - Pointer to a word to store the value read.
   11894  *****************************************************************************/
   11895 static int32_t
   11896 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11897 {
   11898 	int32_t status;
   11899 	uint32_t word = 0;
   11900 
   11901 	status = wm_read_ich8_data(sc, index, 2, &word);
   11902 	if (status == 0)
   11903 		*data = (uint16_t)word;
   11904 	else
   11905 		*data = 0;
   11906 
   11907 	return status;
   11908 }
   11909 
   11910 /******************************************************************************
   11911  * Reads a dword from the NVM using the ICH8 flash access registers.
   11912  *
   11913  * sc - pointer to wm_hw structure
   11914  * index - The starting byte index of the word to read.
   11915  * data - Pointer to a word to store the value read.
   11916  *****************************************************************************/
   11917 static int32_t
   11918 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11919 {
   11920 	int32_t status;
   11921 
   11922 	status = wm_read_ich8_data(sc, index, 4, data);
   11923 	return status;
   11924 }
   11925 
   11926 /******************************************************************************
   11927  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11928  * register.
   11929  *
   11930  * sc - Struct containing variables accessed by shared code
   11931  * offset - offset of word in the EEPROM to read
   11932  * data - word read from the EEPROM
   11933  * words - number of words to read
   11934  *****************************************************************************/
   11935 static int
   11936 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11937 {
   11938 	int32_t  error = 0;
   11939 	uint32_t flash_bank = 0;
   11940 	uint32_t act_offset = 0;
   11941 	uint32_t bank_offset = 0;
   11942 	uint16_t word = 0;
   11943 	uint16_t i = 0;
   11944 
   11945 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11946 		device_xname(sc->sc_dev), __func__));
   11947 
   11948 	/*
   11949 	 * We need to know which is the valid flash bank.  In the event
   11950 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11951 	 * managing flash_bank.  So it cannot be trusted and needs
   11952 	 * to be updated with each read.
   11953 	 */
   11954 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11955 	if (error) {
   11956 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11957 			device_xname(sc->sc_dev)));
   11958 		flash_bank = 0;
   11959 	}
   11960 
   11961 	/*
   11962 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11963 	 * size
   11964 	 */
   11965 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11966 
   11967 	for (i = 0; i < words; i++) {
   11968 		/* The NVM part needs a byte offset, hence * 2 */
   11969 		act_offset = bank_offset + ((offset + i) * 2);
   11970 		error = wm_read_ich8_word(sc, act_offset, &word);
   11971 		if (error) {
   11972 			aprint_error_dev(sc->sc_dev,
   11973 			    "%s: failed to read NVM\n", __func__);
   11974 			break;
   11975 		}
   11976 		data[i] = word;
   11977 	}
   11978 
   11979 	return error;
   11980 }
   11981 
   11982 /******************************************************************************
   11983  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11984  * register.
   11985  *
   11986  * sc - Struct containing variables accessed by shared code
   11987  * offset - offset of word in the EEPROM to read
   11988  * data - word read from the EEPROM
   11989  * words - number of words to read
   11990  *****************************************************************************/
   11991 static int
   11992 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11993 {
   11994 	int32_t  error = 0;
   11995 	uint32_t flash_bank = 0;
   11996 	uint32_t act_offset = 0;
   11997 	uint32_t bank_offset = 0;
   11998 	uint32_t dword = 0;
   11999 	uint16_t i = 0;
   12000 
   12001 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12002 		device_xname(sc->sc_dev), __func__));
   12003 
   12004 	/*
   12005 	 * We need to know which is the valid flash bank.  In the event
   12006 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12007 	 * managing flash_bank.  So it cannot be trusted and needs
   12008 	 * to be updated with each read.
   12009 	 */
   12010 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12011 	if (error) {
   12012 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12013 			device_xname(sc->sc_dev)));
   12014 		flash_bank = 0;
   12015 	}
   12016 
   12017 	/*
   12018 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12019 	 * size
   12020 	 */
   12021 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12022 
   12023 	for (i = 0; i < words; i++) {
   12024 		/* The NVM part needs a byte offset, hence * 2 */
   12025 		act_offset = bank_offset + ((offset + i) * 2);
   12026 		/* but we must read dword aligned, so mask ... */
   12027 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12028 		if (error) {
   12029 			aprint_error_dev(sc->sc_dev,
   12030 			    "%s: failed to read NVM\n", __func__);
   12031 			break;
   12032 		}
   12033 		/* ... and pick out low or high word */
   12034 		if ((act_offset & 0x2) == 0)
   12035 			data[i] = (uint16_t)(dword & 0xFFFF);
   12036 		else
   12037 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12038 	}
   12039 
   12040 	return error;
   12041 }
   12042 
   12043 /* iNVM */
   12044 
   12045 static int
   12046 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12047 {
   12048 	int32_t  rv = 0;
   12049 	uint32_t invm_dword;
   12050 	uint16_t i;
   12051 	uint8_t record_type, word_address;
   12052 
   12053 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12054 		device_xname(sc->sc_dev), __func__));
   12055 
   12056 	for (i = 0; i < INVM_SIZE; i++) {
   12057 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12058 		/* Get record type */
   12059 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12060 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12061 			break;
   12062 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12063 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12064 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12065 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12066 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12067 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12068 			if (word_address == address) {
   12069 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12070 				rv = 0;
   12071 				break;
   12072 			}
   12073 		}
   12074 	}
   12075 
   12076 	return rv;
   12077 }
   12078 
   12079 static int
   12080 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12081 {
   12082 	int rv = 0;
   12083 	int i;
   12084 
   12085 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12086 		device_xname(sc->sc_dev), __func__));
   12087 
   12088 	for (i = 0; i < words; i++) {
   12089 		switch (offset + i) {
   12090 		case NVM_OFF_MACADDR:
   12091 		case NVM_OFF_MACADDR1:
   12092 		case NVM_OFF_MACADDR2:
   12093 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12094 			if (rv != 0) {
   12095 				data[i] = 0xffff;
   12096 				rv = -1;
   12097 			}
   12098 			break;
   12099 		case NVM_OFF_CFG2:
   12100 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12101 			if (rv != 0) {
   12102 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12103 				rv = 0;
   12104 			}
   12105 			break;
   12106 		case NVM_OFF_CFG4:
   12107 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12108 			if (rv != 0) {
   12109 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12110 				rv = 0;
   12111 			}
   12112 			break;
   12113 		case NVM_OFF_LED_1_CFG:
   12114 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12115 			if (rv != 0) {
   12116 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12117 				rv = 0;
   12118 			}
   12119 			break;
   12120 		case NVM_OFF_LED_0_2_CFG:
   12121 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12122 			if (rv != 0) {
   12123 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12124 				rv = 0;
   12125 			}
   12126 			break;
   12127 		case NVM_OFF_ID_LED_SETTINGS:
   12128 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12129 			if (rv != 0) {
   12130 				*data = ID_LED_RESERVED_FFFF;
   12131 				rv = 0;
   12132 			}
   12133 			break;
   12134 		default:
   12135 			DPRINTF(WM_DEBUG_NVM,
   12136 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12137 			*data = NVM_RESERVED_WORD;
   12138 			break;
   12139 		}
   12140 	}
   12141 
   12142 	return rv;
   12143 }
   12144 
   12145 /* Lock, detecting NVM type, validate checksum, version and read */
   12146 
   12147 /*
   12148  * wm_nvm_acquire:
   12149  *
   12150  *	Perform the EEPROM handshake required on some chips.
   12151  */
   12152 static int
   12153 wm_nvm_acquire(struct wm_softc *sc)
   12154 {
   12155 	uint32_t reg;
   12156 	int x;
   12157 	int ret = 0;
   12158 
   12159 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12160 		device_xname(sc->sc_dev), __func__));
   12161 
   12162 	if (sc->sc_type >= WM_T_ICH8) {
   12163 		ret = wm_get_nvm_ich8lan(sc);
   12164 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   12165 		ret = wm_get_swfwhw_semaphore(sc);
   12166 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   12167 		/* This will also do wm_get_swsm_semaphore() if needed */
   12168 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   12169 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12170 		ret = wm_get_swsm_semaphore(sc);
   12171 	}
   12172 
   12173 	if (ret) {
   12174 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   12175 			__func__);
   12176 		return 1;
   12177 	}
   12178 
   12179 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   12180 		reg = CSR_READ(sc, WMREG_EECD);
   12181 
   12182 		/* Request EEPROM access. */
   12183 		reg |= EECD_EE_REQ;
   12184 		CSR_WRITE(sc, WMREG_EECD, reg);
   12185 
   12186 		/* ..and wait for it to be granted. */
   12187 		for (x = 0; x < 1000; x++) {
   12188 			reg = CSR_READ(sc, WMREG_EECD);
   12189 			if (reg & EECD_EE_GNT)
   12190 				break;
   12191 			delay(5);
   12192 		}
   12193 		if ((reg & EECD_EE_GNT) == 0) {
   12194 			aprint_error_dev(sc->sc_dev,
   12195 			    "could not acquire EEPROM GNT\n");
   12196 			reg &= ~EECD_EE_REQ;
   12197 			CSR_WRITE(sc, WMREG_EECD, reg);
   12198 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   12199 				wm_put_swfwhw_semaphore(sc);
   12200 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   12201 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12202 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12203 				wm_put_swsm_semaphore(sc);
   12204 			return 1;
   12205 		}
   12206 	}
   12207 
   12208 	return 0;
   12209 }
   12210 
   12211 /*
   12212  * wm_nvm_release:
   12213  *
   12214  *	Release the EEPROM mutex.
   12215  */
   12216 static void
   12217 wm_nvm_release(struct wm_softc *sc)
   12218 {
   12219 	uint32_t reg;
   12220 
   12221 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12222 		device_xname(sc->sc_dev), __func__));
   12223 
   12224 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   12225 		reg = CSR_READ(sc, WMREG_EECD);
   12226 		reg &= ~EECD_EE_REQ;
   12227 		CSR_WRITE(sc, WMREG_EECD, reg);
   12228 	}
   12229 
   12230 	if (sc->sc_type >= WM_T_ICH8) {
   12231 		wm_put_nvm_ich8lan(sc);
   12232 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   12233 		wm_put_swfwhw_semaphore(sc);
   12234 	else if (sc->sc_flags & WM_F_LOCK_SWFW)
   12235 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12236 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12237 		wm_put_swsm_semaphore(sc);
   12238 }
   12239 
   12240 static int
   12241 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12242 {
   12243 	uint32_t eecd = 0;
   12244 
   12245 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12246 	    || sc->sc_type == WM_T_82583) {
   12247 		eecd = CSR_READ(sc, WMREG_EECD);
   12248 
   12249 		/* Isolate bits 15 & 16 */
   12250 		eecd = ((eecd >> 15) & 0x03);
   12251 
   12252 		/* If both bits are set, device is Flash type */
   12253 		if (eecd == 0x03)
   12254 			return 0;
   12255 	}
   12256 	return 1;
   12257 }
   12258 
   12259 static int
   12260 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12261 {
   12262 	uint32_t eec;
   12263 
   12264 	eec = CSR_READ(sc, WMREG_EEC);
   12265 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12266 		return 1;
   12267 
   12268 	return 0;
   12269 }
   12270 
   12271 /*
   12272  * wm_nvm_validate_checksum
   12273  *
   12274  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12275  */
   12276 static int
   12277 wm_nvm_validate_checksum(struct wm_softc *sc)
   12278 {
   12279 	uint16_t checksum;
   12280 	uint16_t eeprom_data;
   12281 #ifdef WM_DEBUG
   12282 	uint16_t csum_wordaddr, valid_checksum;
   12283 #endif
   12284 	int i;
   12285 
   12286 	checksum = 0;
   12287 
   12288 	/* Don't check for I211 */
   12289 	if (sc->sc_type == WM_T_I211)
   12290 		return 0;
   12291 
   12292 #ifdef WM_DEBUG
   12293 	if (sc->sc_type == WM_T_PCH_LPT) {
   12294 		csum_wordaddr = NVM_OFF_COMPAT;
   12295 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12296 	} else {
   12297 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12298 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12299 	}
   12300 
   12301 	/* Dump EEPROM image for debug */
   12302 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12303 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12304 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12305 		/* XXX PCH_SPT? */
   12306 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12307 		if ((eeprom_data & valid_checksum) == 0) {
   12308 			DPRINTF(WM_DEBUG_NVM,
   12309 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12310 				device_xname(sc->sc_dev), eeprom_data,
   12311 				    valid_checksum));
   12312 		}
   12313 	}
   12314 
   12315 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12316 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12317 		for (i = 0; i < NVM_SIZE; i++) {
   12318 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12319 				printf("XXXX ");
   12320 			else
   12321 				printf("%04hx ", eeprom_data);
   12322 			if (i % 8 == 7)
   12323 				printf("\n");
   12324 		}
   12325 	}
   12326 
   12327 #endif /* WM_DEBUG */
   12328 
   12329 	for (i = 0; i < NVM_SIZE; i++) {
   12330 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12331 			return 1;
   12332 		checksum += eeprom_data;
   12333 	}
   12334 
   12335 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12336 #ifdef WM_DEBUG
   12337 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12338 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12339 #endif
   12340 	}
   12341 
   12342 	return 0;
   12343 }
   12344 
   12345 static void
   12346 wm_nvm_version_invm(struct wm_softc *sc)
   12347 {
   12348 	uint32_t dword;
   12349 
   12350 	/*
   12351 	 * Linux's code to decode version is very strange, so we don't
   12352 	 * obey that algorithm and just use word 61 as the document.
   12353 	 * Perhaps it's not perfect though...
   12354 	 *
   12355 	 * Example:
   12356 	 *
   12357 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12358 	 */
   12359 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12360 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12361 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12362 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12363 }
   12364 
   12365 static void
   12366 wm_nvm_version(struct wm_softc *sc)
   12367 {
   12368 	uint16_t major, minor, build, patch;
   12369 	uint16_t uid0, uid1;
   12370 	uint16_t nvm_data;
   12371 	uint16_t off;
   12372 	bool check_version = false;
   12373 	bool check_optionrom = false;
   12374 	bool have_build = false;
   12375 	bool have_uid = true;
   12376 
   12377 	/*
   12378 	 * Version format:
   12379 	 *
   12380 	 * XYYZ
   12381 	 * X0YZ
   12382 	 * X0YY
   12383 	 *
   12384 	 * Example:
   12385 	 *
   12386 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12387 	 *	82571	0x50a6	5.10.6?
   12388 	 *	82572	0x506a	5.6.10?
   12389 	 *	82572EI	0x5069	5.6.9?
   12390 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12391 	 *		0x2013	2.1.3?
   12392 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12393 	 */
   12394 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12395 	switch (sc->sc_type) {
   12396 	case WM_T_82571:
   12397 	case WM_T_82572:
   12398 	case WM_T_82574:
   12399 	case WM_T_82583:
   12400 		check_version = true;
   12401 		check_optionrom = true;
   12402 		have_build = true;
   12403 		break;
   12404 	case WM_T_82575:
   12405 	case WM_T_82576:
   12406 	case WM_T_82580:
   12407 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12408 			check_version = true;
   12409 		break;
   12410 	case WM_T_I211:
   12411 		wm_nvm_version_invm(sc);
   12412 		have_uid = false;
   12413 		goto printver;
   12414 	case WM_T_I210:
   12415 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12416 			wm_nvm_version_invm(sc);
   12417 			have_uid = false;
   12418 			goto printver;
   12419 		}
   12420 		/* FALLTHROUGH */
   12421 	case WM_T_I350:
   12422 	case WM_T_I354:
   12423 		check_version = true;
   12424 		check_optionrom = true;
   12425 		break;
   12426 	default:
   12427 		return;
   12428 	}
   12429 	if (check_version) {
   12430 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12431 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12432 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12433 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12434 			build = nvm_data & NVM_BUILD_MASK;
   12435 			have_build = true;
   12436 		} else
   12437 			minor = nvm_data & 0x00ff;
   12438 
   12439 		/* Decimal */
   12440 		minor = (minor / 16) * 10 + (minor % 16);
   12441 		sc->sc_nvm_ver_major = major;
   12442 		sc->sc_nvm_ver_minor = minor;
   12443 
   12444 printver:
   12445 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12446 		    sc->sc_nvm_ver_minor);
   12447 		if (have_build) {
   12448 			sc->sc_nvm_ver_build = build;
   12449 			aprint_verbose(".%d", build);
   12450 		}
   12451 	}
   12452 	if (check_optionrom) {
   12453 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12454 		/* Option ROM Version */
   12455 		if ((off != 0x0000) && (off != 0xffff)) {
   12456 			off += NVM_COMBO_VER_OFF;
   12457 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12458 			wm_nvm_read(sc, off, 1, &uid0);
   12459 			if ((uid0 != 0) && (uid0 != 0xffff)
   12460 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12461 				/* 16bits */
   12462 				major = uid0 >> 8;
   12463 				build = (uid0 << 8) | (uid1 >> 8);
   12464 				patch = uid1 & 0x00ff;
   12465 				aprint_verbose(", option ROM Version %d.%d.%d",
   12466 				    major, build, patch);
   12467 			}
   12468 		}
   12469 	}
   12470 
   12471 	if (have_uid) {
   12472 		wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12473 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12474 	}
   12475 }
   12476 
   12477 /*
   12478  * wm_nvm_read:
   12479  *
   12480  *	Read data from the serial EEPROM.
   12481  */
   12482 static int
   12483 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12484 {
   12485 	int rv;
   12486 
   12487 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12488 		device_xname(sc->sc_dev), __func__));
   12489 
   12490 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12491 		return 1;
   12492 
   12493 	if (wm_nvm_acquire(sc))
   12494 		return 1;
   12495 
   12496 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12497 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12498 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   12499 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   12500 	else if (sc->sc_type == WM_T_PCH_SPT)
   12501 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   12502 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   12503 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   12504 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   12505 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   12506 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   12507 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   12508 	else
   12509 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   12510 
   12511 	wm_nvm_release(sc);
   12512 	return rv;
   12513 }
   12514 
   12515 /*
   12516  * Hardware semaphores.
   12517  * Very complexed...
   12518  */
   12519 
   12520 static int
   12521 wm_get_null(struct wm_softc *sc)
   12522 {
   12523 
   12524 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12525 		device_xname(sc->sc_dev), __func__));
   12526 	return 0;
   12527 }
   12528 
   12529 static void
   12530 wm_put_null(struct wm_softc *sc)
   12531 {
   12532 
   12533 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12534 		device_xname(sc->sc_dev), __func__));
   12535 	return;
   12536 }
   12537 
   12538 /*
   12539  * Get hardware semaphore.
   12540  * Same as e1000_get_hw_semaphore_generic()
   12541  */
   12542 static int
   12543 wm_get_swsm_semaphore(struct wm_softc *sc)
   12544 {
   12545 	int32_t timeout;
   12546 	uint32_t swsm;
   12547 
   12548 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12549 		device_xname(sc->sc_dev), __func__));
   12550 	KASSERT(sc->sc_nvm_wordsize > 0);
   12551 
   12552 	/* Get the SW semaphore. */
   12553 	timeout = sc->sc_nvm_wordsize + 1;
   12554 	while (timeout) {
   12555 		swsm = CSR_READ(sc, WMREG_SWSM);
   12556 
   12557 		if ((swsm & SWSM_SMBI) == 0)
   12558 			break;
   12559 
   12560 		delay(50);
   12561 		timeout--;
   12562 	}
   12563 
   12564 	if (timeout == 0) {
   12565 		aprint_error_dev(sc->sc_dev,
   12566 		    "could not acquire SWSM SMBI\n");
   12567 		return 1;
   12568 	}
   12569 
   12570 	/* Get the FW semaphore. */
   12571 	timeout = sc->sc_nvm_wordsize + 1;
   12572 	while (timeout) {
   12573 		swsm = CSR_READ(sc, WMREG_SWSM);
   12574 		swsm |= SWSM_SWESMBI;
   12575 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12576 		/* If we managed to set the bit we got the semaphore. */
   12577 		swsm = CSR_READ(sc, WMREG_SWSM);
   12578 		if (swsm & SWSM_SWESMBI)
   12579 			break;
   12580 
   12581 		delay(50);
   12582 		timeout--;
   12583 	}
   12584 
   12585 	if (timeout == 0) {
   12586 		aprint_error_dev(sc->sc_dev,
   12587 		    "could not acquire SWSM SWESMBI\n");
   12588 		/* Release semaphores */
   12589 		wm_put_swsm_semaphore(sc);
   12590 		return 1;
   12591 	}
   12592 	return 0;
   12593 }
   12594 
   12595 /*
   12596  * Put hardware semaphore.
   12597  * Same as e1000_put_hw_semaphore_generic()
   12598  */
   12599 static void
   12600 wm_put_swsm_semaphore(struct wm_softc *sc)
   12601 {
   12602 	uint32_t swsm;
   12603 
   12604 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12605 		device_xname(sc->sc_dev), __func__));
   12606 
   12607 	swsm = CSR_READ(sc, WMREG_SWSM);
   12608 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12609 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12610 }
   12611 
   12612 /*
   12613  * Get SW/FW semaphore.
   12614  * Same as e1000_acquire_swfw_sync_82575().
   12615  */
   12616 static int
   12617 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12618 {
   12619 	uint32_t swfw_sync;
   12620 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12621 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12622 	int timeout = 200;
   12623 
   12624 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12625 		device_xname(sc->sc_dev), __func__));
   12626 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12627 
   12628 	for (timeout = 0; timeout < 200; timeout++) {
   12629 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12630 			if (wm_get_swsm_semaphore(sc)) {
   12631 				aprint_error_dev(sc->sc_dev,
   12632 				    "%s: failed to get semaphore\n",
   12633 				    __func__);
   12634 				return 1;
   12635 			}
   12636 		}
   12637 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12638 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12639 			swfw_sync |= swmask;
   12640 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12641 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   12642 				wm_put_swsm_semaphore(sc);
   12643 			return 0;
   12644 		}
   12645 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   12646 			wm_put_swsm_semaphore(sc);
   12647 		delay(5000);
   12648 	}
   12649 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12650 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12651 	return 1;
   12652 }
   12653 
   12654 static void
   12655 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12656 {
   12657 	uint32_t swfw_sync;
   12658 
   12659 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12660 		device_xname(sc->sc_dev), __func__));
   12661 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12662 
   12663 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12664 		while (wm_get_swsm_semaphore(sc) != 0)
   12665 			continue;
   12666 	}
   12667 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12668 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12669 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12670 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   12671 		wm_put_swsm_semaphore(sc);
   12672 }
   12673 
   12674 static int
   12675 wm_get_phy_82575(struct wm_softc *sc)
   12676 {
   12677 
   12678 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12679 		device_xname(sc->sc_dev), __func__));
   12680 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12681 }
   12682 
   12683 static void
   12684 wm_put_phy_82575(struct wm_softc *sc)
   12685 {
   12686 
   12687 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12688 		device_xname(sc->sc_dev), __func__));
   12689 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12690 }
   12691 
   12692 static int
   12693 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12694 {
   12695 	uint32_t ext_ctrl;
   12696 	int timeout = 200;
   12697 
   12698 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12699 		device_xname(sc->sc_dev), __func__));
   12700 
   12701 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12702 	for (timeout = 0; timeout < 200; timeout++) {
   12703 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12704 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12705 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12706 
   12707 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12708 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12709 			return 0;
   12710 		delay(5000);
   12711 	}
   12712 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12713 	    device_xname(sc->sc_dev), ext_ctrl);
   12714 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12715 	return 1;
   12716 }
   12717 
   12718 static void
   12719 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12720 {
   12721 	uint32_t ext_ctrl;
   12722 
   12723 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12724 		device_xname(sc->sc_dev), __func__));
   12725 
   12726 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12727 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12728 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12729 
   12730 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12731 }
   12732 
   12733 static int
   12734 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12735 {
   12736 	uint32_t ext_ctrl;
   12737 	int timeout;
   12738 
   12739 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12740 		device_xname(sc->sc_dev), __func__));
   12741 	mutex_enter(sc->sc_ich_phymtx);
   12742 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12743 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12744 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12745 			break;
   12746 		delay(1000);
   12747 	}
   12748 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12749 		printf("%s: SW has already locked the resource\n",
   12750 		    device_xname(sc->sc_dev));
   12751 		goto out;
   12752 	}
   12753 
   12754 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12755 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12756 	for (timeout = 0; timeout < 1000; timeout++) {
   12757 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12758 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12759 			break;
   12760 		delay(1000);
   12761 	}
   12762 	if (timeout >= 1000) {
   12763 		printf("%s: failed to acquire semaphore\n",
   12764 		    device_xname(sc->sc_dev));
   12765 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12766 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12767 		goto out;
   12768 	}
   12769 	return 0;
   12770 
   12771 out:
   12772 	mutex_exit(sc->sc_ich_phymtx);
   12773 	return 1;
   12774 }
   12775 
   12776 static void
   12777 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12778 {
   12779 	uint32_t ext_ctrl;
   12780 
   12781 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12782 		device_xname(sc->sc_dev), __func__));
   12783 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12784 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12785 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12786 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12787 	} else {
   12788 		printf("%s: Semaphore unexpectedly released\n",
   12789 		    device_xname(sc->sc_dev));
   12790 	}
   12791 
   12792 	mutex_exit(sc->sc_ich_phymtx);
   12793 }
   12794 
   12795 static int
   12796 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12797 {
   12798 
   12799 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12800 		device_xname(sc->sc_dev), __func__));
   12801 	mutex_enter(sc->sc_ich_nvmmtx);
   12802 
   12803 	return 0;
   12804 }
   12805 
   12806 static void
   12807 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12808 {
   12809 
   12810 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12811 		device_xname(sc->sc_dev), __func__));
   12812 	mutex_exit(sc->sc_ich_nvmmtx);
   12813 }
   12814 
   12815 static int
   12816 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12817 {
   12818 	int i = 0;
   12819 	uint32_t reg;
   12820 
   12821 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12822 		device_xname(sc->sc_dev), __func__));
   12823 
   12824 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12825 	do {
   12826 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12827 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12828 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12829 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12830 			break;
   12831 		delay(2*1000);
   12832 		i++;
   12833 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12834 
   12835 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12836 		wm_put_hw_semaphore_82573(sc);
   12837 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12838 		    device_xname(sc->sc_dev));
   12839 		return -1;
   12840 	}
   12841 
   12842 	return 0;
   12843 }
   12844 
   12845 static void
   12846 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12847 {
   12848 	uint32_t reg;
   12849 
   12850 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12851 		device_xname(sc->sc_dev), __func__));
   12852 
   12853 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12854 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12855 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12856 }
   12857 
   12858 /*
   12859  * Management mode and power management related subroutines.
   12860  * BMC, AMT, suspend/resume and EEE.
   12861  */
   12862 
   12863 #ifdef WM_WOL
   12864 static int
   12865 wm_check_mng_mode(struct wm_softc *sc)
   12866 {
   12867 	int rv;
   12868 
   12869 	switch (sc->sc_type) {
   12870 	case WM_T_ICH8:
   12871 	case WM_T_ICH9:
   12872 	case WM_T_ICH10:
   12873 	case WM_T_PCH:
   12874 	case WM_T_PCH2:
   12875 	case WM_T_PCH_LPT:
   12876 	case WM_T_PCH_SPT:
   12877 		rv = wm_check_mng_mode_ich8lan(sc);
   12878 		break;
   12879 	case WM_T_82574:
   12880 	case WM_T_82583:
   12881 		rv = wm_check_mng_mode_82574(sc);
   12882 		break;
   12883 	case WM_T_82571:
   12884 	case WM_T_82572:
   12885 	case WM_T_82573:
   12886 	case WM_T_80003:
   12887 		rv = wm_check_mng_mode_generic(sc);
   12888 		break;
   12889 	default:
   12890 		/* noting to do */
   12891 		rv = 0;
   12892 		break;
   12893 	}
   12894 
   12895 	return rv;
   12896 }
   12897 
   12898 static int
   12899 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12900 {
   12901 	uint32_t fwsm;
   12902 
   12903 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12904 
   12905 	if (((fwsm & FWSM_FW_VALID) != 0)
   12906 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12907 		return 1;
   12908 
   12909 	return 0;
   12910 }
   12911 
   12912 static int
   12913 wm_check_mng_mode_82574(struct wm_softc *sc)
   12914 {
   12915 	uint16_t data;
   12916 
   12917 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12918 
   12919 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12920 		return 1;
   12921 
   12922 	return 0;
   12923 }
   12924 
   12925 static int
   12926 wm_check_mng_mode_generic(struct wm_softc *sc)
   12927 {
   12928 	uint32_t fwsm;
   12929 
   12930 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12931 
   12932 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12933 		return 1;
   12934 
   12935 	return 0;
   12936 }
   12937 #endif /* WM_WOL */
   12938 
   12939 static int
   12940 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12941 {
   12942 	uint32_t manc, fwsm, factps;
   12943 
   12944 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12945 		return 0;
   12946 
   12947 	manc = CSR_READ(sc, WMREG_MANC);
   12948 
   12949 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   12950 		device_xname(sc->sc_dev), manc));
   12951 	if ((manc & MANC_RECV_TCO_EN) == 0)
   12952 		return 0;
   12953 
   12954 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   12955 		fwsm = CSR_READ(sc, WMREG_FWSM);
   12956 		factps = CSR_READ(sc, WMREG_FACTPS);
   12957 		if (((factps & FACTPS_MNGCG) == 0)
   12958 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12959 			return 1;
   12960 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   12961 		uint16_t data;
   12962 
   12963 		factps = CSR_READ(sc, WMREG_FACTPS);
   12964 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12965 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   12966 			device_xname(sc->sc_dev), factps, data));
   12967 		if (((factps & FACTPS_MNGCG) == 0)
   12968 		    && ((data & NVM_CFG2_MNGM_MASK)
   12969 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   12970 			return 1;
   12971 	} else if (((manc & MANC_SMBUS_EN) != 0)
   12972 	    && ((manc & MANC_ASF_EN) == 0))
   12973 		return 1;
   12974 
   12975 	return 0;
   12976 }
   12977 
   12978 static bool
   12979 wm_phy_resetisblocked(struct wm_softc *sc)
   12980 {
   12981 	bool blocked = false;
   12982 	uint32_t reg;
   12983 	int i = 0;
   12984 
   12985 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12986 		device_xname(sc->sc_dev), __func__));
   12987 
   12988 	switch (sc->sc_type) {
   12989 	case WM_T_ICH8:
   12990 	case WM_T_ICH9:
   12991 	case WM_T_ICH10:
   12992 	case WM_T_PCH:
   12993 	case WM_T_PCH2:
   12994 	case WM_T_PCH_LPT:
   12995 	case WM_T_PCH_SPT:
   12996 		do {
   12997 			reg = CSR_READ(sc, WMREG_FWSM);
   12998 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12999 				blocked = true;
   13000 				delay(10*1000);
   13001 				continue;
   13002 			}
   13003 			blocked = false;
   13004 		} while (blocked && (i++ < 30));
   13005 		return blocked;
   13006 		break;
   13007 	case WM_T_82571:
   13008 	case WM_T_82572:
   13009 	case WM_T_82573:
   13010 	case WM_T_82574:
   13011 	case WM_T_82583:
   13012 	case WM_T_80003:
   13013 		reg = CSR_READ(sc, WMREG_MANC);
   13014 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13015 			return true;
   13016 		else
   13017 			return false;
   13018 		break;
   13019 	default:
   13020 		/* no problem */
   13021 		break;
   13022 	}
   13023 
   13024 	return false;
   13025 }
   13026 
   13027 static void
   13028 wm_get_hw_control(struct wm_softc *sc)
   13029 {
   13030 	uint32_t reg;
   13031 
   13032 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13033 		device_xname(sc->sc_dev), __func__));
   13034 
   13035 	if (sc->sc_type == WM_T_82573) {
   13036 		reg = CSR_READ(sc, WMREG_SWSM);
   13037 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13038 	} else if (sc->sc_type >= WM_T_82571) {
   13039 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13040 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13041 	}
   13042 }
   13043 
   13044 static void
   13045 wm_release_hw_control(struct wm_softc *sc)
   13046 {
   13047 	uint32_t reg;
   13048 
   13049 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13050 		device_xname(sc->sc_dev), __func__));
   13051 
   13052 	if (sc->sc_type == WM_T_82573) {
   13053 		reg = CSR_READ(sc, WMREG_SWSM);
   13054 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13055 	} else if (sc->sc_type >= WM_T_82571) {
   13056 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13057 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13058 	}
   13059 }
   13060 
   13061 static void
   13062 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13063 {
   13064 	uint32_t reg;
   13065 
   13066 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13067 		device_xname(sc->sc_dev), __func__));
   13068 
   13069 	if (sc->sc_type < WM_T_PCH2)
   13070 		return;
   13071 
   13072 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13073 
   13074 	if (gate)
   13075 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13076 	else
   13077 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13078 
   13079 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13080 }
   13081 
   13082 static void
   13083 wm_smbustopci(struct wm_softc *sc)
   13084 {
   13085 	uint32_t fwsm, reg;
   13086 	int rv = 0;
   13087 
   13088 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13089 		device_xname(sc->sc_dev), __func__));
   13090 
   13091 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13092 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13093 
   13094 	/* Disable ULP */
   13095 	wm_ulp_disable(sc);
   13096 
   13097 	/* Acquire PHY semaphore */
   13098 	sc->phy.acquire(sc);
   13099 
   13100 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13101 	switch (sc->sc_type) {
   13102 	case WM_T_PCH_LPT:
   13103 	case WM_T_PCH_SPT:
   13104 		if (wm_phy_is_accessible_pchlan(sc))
   13105 			break;
   13106 
   13107 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13108 		reg |= CTRL_EXT_FORCE_SMBUS;
   13109 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13110 #if 0
   13111 		/* XXX Isn't this required??? */
   13112 		CSR_WRITE_FLUSH(sc);
   13113 #endif
   13114 		delay(50 * 1000);
   13115 		/* FALLTHROUGH */
   13116 	case WM_T_PCH2:
   13117 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13118 			break;
   13119 		/* FALLTHROUGH */
   13120 	case WM_T_PCH:
   13121 		if (sc->sc_type == WM_T_PCH)
   13122 			if ((fwsm & FWSM_FW_VALID) != 0)
   13123 				break;
   13124 
   13125 		if (wm_phy_resetisblocked(sc) == true) {
   13126 			printf("XXX reset is blocked(3)\n");
   13127 			break;
   13128 		}
   13129 
   13130 		wm_toggle_lanphypc_pch_lpt(sc);
   13131 
   13132 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13133 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13134 				break;
   13135 
   13136 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13137 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13138 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13139 
   13140 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13141 				break;
   13142 			rv = -1;
   13143 		}
   13144 		break;
   13145 	default:
   13146 		break;
   13147 	}
   13148 
   13149 	/* Release semaphore */
   13150 	sc->phy.release(sc);
   13151 
   13152 	if (rv == 0) {
   13153 		if (wm_phy_resetisblocked(sc)) {
   13154 			printf("XXX reset is blocked(4)\n");
   13155 			goto out;
   13156 		}
   13157 		wm_reset_phy(sc);
   13158 		if (wm_phy_resetisblocked(sc))
   13159 			printf("XXX reset is blocked(4)\n");
   13160 	}
   13161 
   13162 out:
   13163 	/*
   13164 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13165 	 */
   13166 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13167 		delay(10*1000);
   13168 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13169 	}
   13170 }
   13171 
   13172 static void
   13173 wm_init_manageability(struct wm_softc *sc)
   13174 {
   13175 
   13176 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13177 		device_xname(sc->sc_dev), __func__));
   13178 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13179 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13180 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13181 
   13182 		/* Disable hardware interception of ARP */
   13183 		manc &= ~MANC_ARP_EN;
   13184 
   13185 		/* Enable receiving management packets to the host */
   13186 		if (sc->sc_type >= WM_T_82571) {
   13187 			manc |= MANC_EN_MNG2HOST;
   13188 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   13189 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13190 		}
   13191 
   13192 		CSR_WRITE(sc, WMREG_MANC, manc);
   13193 	}
   13194 }
   13195 
   13196 static void
   13197 wm_release_manageability(struct wm_softc *sc)
   13198 {
   13199 
   13200 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13201 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13202 
   13203 		manc |= MANC_ARP_EN;
   13204 		if (sc->sc_type >= WM_T_82571)
   13205 			manc &= ~MANC_EN_MNG2HOST;
   13206 
   13207 		CSR_WRITE(sc, WMREG_MANC, manc);
   13208 	}
   13209 }
   13210 
   13211 static void
   13212 wm_get_wakeup(struct wm_softc *sc)
   13213 {
   13214 
   13215 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13216 	switch (sc->sc_type) {
   13217 	case WM_T_82573:
   13218 	case WM_T_82583:
   13219 		sc->sc_flags |= WM_F_HAS_AMT;
   13220 		/* FALLTHROUGH */
   13221 	case WM_T_80003:
   13222 	case WM_T_82575:
   13223 	case WM_T_82576:
   13224 	case WM_T_82580:
   13225 	case WM_T_I350:
   13226 	case WM_T_I354:
   13227 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13228 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13229 		/* FALLTHROUGH */
   13230 	case WM_T_82541:
   13231 	case WM_T_82541_2:
   13232 	case WM_T_82547:
   13233 	case WM_T_82547_2:
   13234 	case WM_T_82571:
   13235 	case WM_T_82572:
   13236 	case WM_T_82574:
   13237 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13238 		break;
   13239 	case WM_T_ICH8:
   13240 	case WM_T_ICH9:
   13241 	case WM_T_ICH10:
   13242 	case WM_T_PCH:
   13243 	case WM_T_PCH2:
   13244 	case WM_T_PCH_LPT:
   13245 	case WM_T_PCH_SPT:
   13246 		sc->sc_flags |= WM_F_HAS_AMT;
   13247 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13248 		break;
   13249 	default:
   13250 		break;
   13251 	}
   13252 
   13253 	/* 1: HAS_MANAGE */
   13254 	if (wm_enable_mng_pass_thru(sc) != 0)
   13255 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13256 
   13257 	/*
   13258 	 * Note that the WOL flags is set after the resetting of the eeprom
   13259 	 * stuff
   13260 	 */
   13261 }
   13262 
   13263 /*
   13264  * Unconfigure Ultra Low Power mode.
   13265  * Only for I217 and newer (see below).
   13266  */
   13267 static void
   13268 wm_ulp_disable(struct wm_softc *sc)
   13269 {
   13270 	uint32_t reg;
   13271 	int i = 0;
   13272 
   13273 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13274 		device_xname(sc->sc_dev), __func__));
   13275 	/* Exclude old devices */
   13276 	if ((sc->sc_type < WM_T_PCH_LPT)
   13277 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13278 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13279 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13280 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13281 		return;
   13282 
   13283 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13284 		/* Request ME un-configure ULP mode in the PHY */
   13285 		reg = CSR_READ(sc, WMREG_H2ME);
   13286 		reg &= ~H2ME_ULP;
   13287 		reg |= H2ME_ENFORCE_SETTINGS;
   13288 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13289 
   13290 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13291 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13292 			if (i++ == 30) {
   13293 				printf("%s timed out\n", __func__);
   13294 				return;
   13295 			}
   13296 			delay(10 * 1000);
   13297 		}
   13298 		reg = CSR_READ(sc, WMREG_H2ME);
   13299 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13300 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13301 
   13302 		return;
   13303 	}
   13304 
   13305 	/* Acquire semaphore */
   13306 	sc->phy.acquire(sc);
   13307 
   13308 	/* Toggle LANPHYPC */
   13309 	wm_toggle_lanphypc_pch_lpt(sc);
   13310 
   13311 	/* Unforce SMBus mode in PHY */
   13312 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13313 	if (reg == 0x0000 || reg == 0xffff) {
   13314 		uint32_t reg2;
   13315 
   13316 		printf("%s: Force SMBus first.\n", __func__);
   13317 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13318 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13319 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13320 		delay(50 * 1000);
   13321 
   13322 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13323 	}
   13324 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13325 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13326 
   13327 	/* Unforce SMBus mode in MAC */
   13328 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13329 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13330 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13331 
   13332 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13333 	reg |= HV_PM_CTRL_K1_ENA;
   13334 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13335 
   13336 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13337 	reg &= ~(I218_ULP_CONFIG1_IND
   13338 	    | I218_ULP_CONFIG1_STICKY_ULP
   13339 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13340 	    | I218_ULP_CONFIG1_WOL_HOST
   13341 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13342 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13343 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13344 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13345 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13346 	reg |= I218_ULP_CONFIG1_START;
   13347 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13348 
   13349 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13350 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13351 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13352 
   13353 	/* Release semaphore */
   13354 	sc->phy.release(sc);
   13355 	wm_gmii_reset(sc);
   13356 	delay(50 * 1000);
   13357 }
   13358 
   13359 /* WOL in the newer chipset interfaces (pchlan) */
   13360 static void
   13361 wm_enable_phy_wakeup(struct wm_softc *sc)
   13362 {
   13363 #if 0
   13364 	uint16_t preg;
   13365 
   13366 	/* Copy MAC RARs to PHY RARs */
   13367 
   13368 	/* Copy MAC MTA to PHY MTA */
   13369 
   13370 	/* Configure PHY Rx Control register */
   13371 
   13372 	/* Enable PHY wakeup in MAC register */
   13373 
   13374 	/* Configure and enable PHY wakeup in PHY registers */
   13375 
   13376 	/* Activate PHY wakeup */
   13377 
   13378 	/* XXX */
   13379 #endif
   13380 }
   13381 
   13382 /* Power down workaround on D3 */
   13383 static void
   13384 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13385 {
   13386 	uint32_t reg;
   13387 	int i;
   13388 
   13389 	for (i = 0; i < 2; i++) {
   13390 		/* Disable link */
   13391 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13392 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13393 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13394 
   13395 		/*
   13396 		 * Call gig speed drop workaround on Gig disable before
   13397 		 * accessing any PHY registers
   13398 		 */
   13399 		if (sc->sc_type == WM_T_ICH8)
   13400 			wm_gig_downshift_workaround_ich8lan(sc);
   13401 
   13402 		/* Write VR power-down enable */
   13403 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13404 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13405 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13406 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13407 
   13408 		/* Read it back and test */
   13409 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13410 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13411 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13412 			break;
   13413 
   13414 		/* Issue PHY reset and repeat at most one more time */
   13415 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13416 	}
   13417 }
   13418 
   13419 static void
   13420 wm_enable_wakeup(struct wm_softc *sc)
   13421 {
   13422 	uint32_t reg, pmreg;
   13423 	pcireg_t pmode;
   13424 
   13425 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13426 		device_xname(sc->sc_dev), __func__));
   13427 
   13428 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13429 		&pmreg, NULL) == 0)
   13430 		return;
   13431 
   13432 	/* Advertise the wakeup capability */
   13433 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13434 	    | CTRL_SWDPIN(3));
   13435 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13436 
   13437 	/* ICH workaround */
   13438 	switch (sc->sc_type) {
   13439 	case WM_T_ICH8:
   13440 	case WM_T_ICH9:
   13441 	case WM_T_ICH10:
   13442 	case WM_T_PCH:
   13443 	case WM_T_PCH2:
   13444 	case WM_T_PCH_LPT:
   13445 	case WM_T_PCH_SPT:
   13446 		/* Disable gig during WOL */
   13447 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13448 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13449 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13450 		if (sc->sc_type == WM_T_PCH)
   13451 			wm_gmii_reset(sc);
   13452 
   13453 		/* Power down workaround */
   13454 		if (sc->sc_phytype == WMPHY_82577) {
   13455 			struct mii_softc *child;
   13456 
   13457 			/* Assume that the PHY is copper */
   13458 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13459 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13460 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13461 				    (768 << 5) | 25, 0x0444); /* magic num */
   13462 		}
   13463 		break;
   13464 	default:
   13465 		break;
   13466 	}
   13467 
   13468 	/* Keep the laser running on fiber adapters */
   13469 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13470 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13471 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13472 		reg |= CTRL_EXT_SWDPIN(3);
   13473 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13474 	}
   13475 
   13476 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13477 #if 0	/* for the multicast packet */
   13478 	reg |= WUFC_MC;
   13479 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13480 #endif
   13481 
   13482 	if (sc->sc_type >= WM_T_PCH)
   13483 		wm_enable_phy_wakeup(sc);
   13484 	else {
   13485 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13486 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13487 	}
   13488 
   13489 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13490 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13491 		|| (sc->sc_type == WM_T_PCH2))
   13492 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13493 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13494 
   13495 	/* Request PME */
   13496 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13497 #if 0
   13498 	/* Disable WOL */
   13499 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13500 #else
   13501 	/* For WOL */
   13502 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13503 #endif
   13504 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13505 }
   13506 
   13507 /* LPLU */
   13508 
   13509 static void
   13510 wm_lplu_d0_disable(struct wm_softc *sc)
   13511 {
   13512 	struct mii_data *mii = &sc->sc_mii;
   13513 	uint32_t reg;
   13514 
   13515 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13516 		device_xname(sc->sc_dev), __func__));
   13517 
   13518 	if (sc->sc_phytype == WMPHY_IFE)
   13519 		return;
   13520 
   13521 	switch (sc->sc_type) {
   13522 	case WM_T_82571:
   13523 	case WM_T_82572:
   13524 	case WM_T_82573:
   13525 	case WM_T_82575:
   13526 	case WM_T_82576:
   13527 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   13528 		reg &= ~PMR_D0_LPLU;
   13529 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   13530 		break;
   13531 	case WM_T_82580:
   13532 	case WM_T_I350:
   13533 	case WM_T_I210:
   13534 	case WM_T_I211:
   13535 		reg = CSR_READ(sc, WMREG_PHPM);
   13536 		reg &= ~PHPM_D0A_LPLU;
   13537 		CSR_WRITE(sc, WMREG_PHPM, reg);
   13538 		break;
   13539 	case WM_T_82574:
   13540 	case WM_T_82583:
   13541 	case WM_T_ICH8:
   13542 	case WM_T_ICH9:
   13543 	case WM_T_ICH10:
   13544 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13545 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13546 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13547 		CSR_WRITE_FLUSH(sc);
   13548 		break;
   13549 	case WM_T_PCH:
   13550 	case WM_T_PCH2:
   13551 	case WM_T_PCH_LPT:
   13552 	case WM_T_PCH_SPT:
   13553 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13554 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13555 		if (wm_phy_resetisblocked(sc) == false)
   13556 			reg |= HV_OEM_BITS_ANEGNOW;
   13557 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13558 		break;
   13559 	default:
   13560 		break;
   13561 	}
   13562 }
   13563 
   13564 /* EEE */
   13565 
   13566 static void
   13567 wm_set_eee_i350(struct wm_softc *sc)
   13568 {
   13569 	uint32_t ipcnfg, eeer;
   13570 
   13571 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13572 	eeer = CSR_READ(sc, WMREG_EEER);
   13573 
   13574 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13575 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13576 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13577 		    | EEER_LPI_FC);
   13578 	} else {
   13579 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13580 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13581 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13582 		    | EEER_LPI_FC);
   13583 	}
   13584 
   13585 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13586 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13587 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13588 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13589 }
   13590 
   13591 /*
   13592  * Workarounds (mainly PHY related).
   13593  * Basically, PHY's workarounds are in the PHY drivers.
   13594  */
   13595 
   13596 /* Work-around for 82566 Kumeran PCS lock loss */
   13597 static void
   13598 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13599 {
   13600 	struct mii_data *mii = &sc->sc_mii;
   13601 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   13602 	int i;
   13603 	int reg;
   13604 
   13605 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13606 		device_xname(sc->sc_dev), __func__));
   13607 
   13608 	/* If the link is not up, do nothing */
   13609 	if ((status & STATUS_LU) == 0)
   13610 		return;
   13611 
   13612 	/* Nothing to do if the link is other than 1Gbps */
   13613 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   13614 		return;
   13615 
   13616 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13617 	for (i = 0; i < 10; i++) {
   13618 		/* read twice */
   13619 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13620 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13621 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13622 			goto out;	/* GOOD! */
   13623 
   13624 		/* Reset the PHY */
   13625 		wm_reset_phy(sc);
   13626 		delay(5*1000);
   13627 	}
   13628 
   13629 	/* Disable GigE link negotiation */
   13630 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13631 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13632 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13633 
   13634 	/*
   13635 	 * Call gig speed drop workaround on Gig disable before accessing
   13636 	 * any PHY registers.
   13637 	 */
   13638 	wm_gig_downshift_workaround_ich8lan(sc);
   13639 
   13640 out:
   13641 	return;
   13642 }
   13643 
   13644 /* WOL from S5 stops working */
   13645 static void
   13646 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13647 {
   13648 	uint16_t kmrn_reg;
   13649 
   13650 	/* Only for igp3 */
   13651 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13652 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13653 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13654 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13655 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13656 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13657 	}
   13658 }
   13659 
   13660 /*
   13661  * Workaround for pch's PHYs
   13662  * XXX should be moved to new PHY driver?
   13663  */
   13664 static void
   13665 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13666 {
   13667 
   13668 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13669 		device_xname(sc->sc_dev), __func__));
   13670 	KASSERT(sc->sc_type == WM_T_PCH);
   13671 
   13672 	if (sc->sc_phytype == WMPHY_82577)
   13673 		wm_set_mdio_slow_mode_hv(sc);
   13674 
   13675 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13676 
   13677 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13678 
   13679 	/* 82578 */
   13680 	if (sc->sc_phytype == WMPHY_82578) {
   13681 		struct mii_softc *child;
   13682 
   13683 		/*
   13684 		 * Return registers to default by doing a soft reset then
   13685 		 * writing 0x3140 to the control register
   13686 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13687 		 */
   13688 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13689 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13690 			PHY_RESET(child);
   13691 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13692 			    0x3140);
   13693 		}
   13694 	}
   13695 
   13696 	/* Select page 0 */
   13697 	sc->phy.acquire(sc);
   13698 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13699 	sc->phy.release(sc);
   13700 
   13701 	/*
   13702 	 * Configure the K1 Si workaround during phy reset assuming there is
   13703 	 * link so that it disables K1 if link is in 1Gbps.
   13704 	 */
   13705 	wm_k1_gig_workaround_hv(sc, 1);
   13706 }
   13707 
   13708 static void
   13709 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13710 {
   13711 
   13712 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13713 		device_xname(sc->sc_dev), __func__));
   13714 	KASSERT(sc->sc_type == WM_T_PCH2);
   13715 
   13716 	wm_set_mdio_slow_mode_hv(sc);
   13717 }
   13718 
   13719 static int
   13720 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13721 {
   13722 	int k1_enable = sc->sc_nvm_k1_enabled;
   13723 
   13724 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13725 		device_xname(sc->sc_dev), __func__));
   13726 
   13727 	if (sc->phy.acquire(sc) != 0)
   13728 		return -1;
   13729 
   13730 	if (link) {
   13731 		k1_enable = 0;
   13732 
   13733 		/* Link stall fix for link up */
   13734 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13735 	} else {
   13736 		/* Link stall fix for link down */
   13737 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13738 	}
   13739 
   13740 	wm_configure_k1_ich8lan(sc, k1_enable);
   13741 	sc->phy.release(sc);
   13742 
   13743 	return 0;
   13744 }
   13745 
   13746 static void
   13747 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13748 {
   13749 	uint32_t reg;
   13750 
   13751 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13752 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13753 	    reg | HV_KMRN_MDIO_SLOW);
   13754 }
   13755 
   13756 static void
   13757 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13758 {
   13759 	uint32_t ctrl, ctrl_ext, tmp;
   13760 	uint16_t kmrn_reg;
   13761 
   13762 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13763 
   13764 	if (k1_enable)
   13765 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13766 	else
   13767 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13768 
   13769 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13770 
   13771 	delay(20);
   13772 
   13773 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13774 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13775 
   13776 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13777 	tmp |= CTRL_FRCSPD;
   13778 
   13779 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13780 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13781 	CSR_WRITE_FLUSH(sc);
   13782 	delay(20);
   13783 
   13784 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13785 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13786 	CSR_WRITE_FLUSH(sc);
   13787 	delay(20);
   13788 }
   13789 
   13790 /* special case - for 82575 - need to do manual init ... */
   13791 static void
   13792 wm_reset_init_script_82575(struct wm_softc *sc)
   13793 {
   13794 	/*
   13795 	 * remark: this is untested code - we have no board without EEPROM
   13796 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13797 	 */
   13798 
   13799 	/* SerDes configuration via SERDESCTRL */
   13800 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13801 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13802 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13803 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13804 
   13805 	/* CCM configuration via CCMCTL register */
   13806 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13807 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13808 
   13809 	/* PCIe lanes configuration */
   13810 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13811 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13812 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13813 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13814 
   13815 	/* PCIe PLL Configuration */
   13816 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13817 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13818 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13819 }
   13820 
   13821 static void
   13822 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13823 {
   13824 	uint32_t reg;
   13825 	uint16_t nvmword;
   13826 	int rv;
   13827 
   13828 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13829 		return;
   13830 
   13831 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13832 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13833 	if (rv != 0) {
   13834 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13835 		    __func__);
   13836 		return;
   13837 	}
   13838 
   13839 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13840 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13841 		reg |= MDICNFG_DEST;
   13842 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13843 		reg |= MDICNFG_COM_MDIO;
   13844 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13845 }
   13846 
   13847 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13848 
   13849 static bool
   13850 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13851 {
   13852 	int i;
   13853 	uint32_t reg;
   13854 	uint16_t id1, id2;
   13855 
   13856 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13857 		device_xname(sc->sc_dev), __func__));
   13858 	id1 = id2 = 0xffff;
   13859 	for (i = 0; i < 2; i++) {
   13860 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13861 		if (MII_INVALIDID(id1))
   13862 			continue;
   13863 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13864 		if (MII_INVALIDID(id2))
   13865 			continue;
   13866 		break;
   13867 	}
   13868 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13869 		goto out;
   13870 	}
   13871 
   13872 	if (sc->sc_type < WM_T_PCH_LPT) {
   13873 		sc->phy.release(sc);
   13874 		wm_set_mdio_slow_mode_hv(sc);
   13875 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13876 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13877 		sc->phy.acquire(sc);
   13878 	}
   13879 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13880 		printf("XXX return with false\n");
   13881 		return false;
   13882 	}
   13883 out:
   13884 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13885 		/* Only unforce SMBus if ME is not active */
   13886 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13887 			/* Unforce SMBus mode in PHY */
   13888 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13889 			    CV_SMB_CTRL);
   13890 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13891 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13892 			    CV_SMB_CTRL, reg);
   13893 
   13894 			/* Unforce SMBus mode in MAC */
   13895 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13896 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13897 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13898 		}
   13899 	}
   13900 	return true;
   13901 }
   13902 
   13903 static void
   13904 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13905 {
   13906 	uint32_t reg;
   13907 	int i;
   13908 
   13909 	/* Set PHY Config Counter to 50msec */
   13910 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13911 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13912 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13913 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13914 
   13915 	/* Toggle LANPHYPC */
   13916 	reg = CSR_READ(sc, WMREG_CTRL);
   13917 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13918 	reg &= ~CTRL_LANPHYPC_VALUE;
   13919 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13920 	CSR_WRITE_FLUSH(sc);
   13921 	delay(1000);
   13922 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13923 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13924 	CSR_WRITE_FLUSH(sc);
   13925 
   13926 	if (sc->sc_type < WM_T_PCH_LPT)
   13927 		delay(50 * 1000);
   13928 	else {
   13929 		i = 20;
   13930 
   13931 		do {
   13932 			delay(5 * 1000);
   13933 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13934 		    && i--);
   13935 
   13936 		delay(30 * 1000);
   13937 	}
   13938 }
   13939 
   13940 static int
   13941 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13942 {
   13943 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13944 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13945 	uint32_t rxa;
   13946 	uint16_t scale = 0, lat_enc = 0;
   13947 	int32_t obff_hwm = 0;
   13948 	int64_t lat_ns, value;
   13949 
   13950 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13951 		device_xname(sc->sc_dev), __func__));
   13952 
   13953 	if (link) {
   13954 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   13955 		uint32_t status;
   13956 		uint16_t speed;
   13957 		pcireg_t preg;
   13958 
   13959 		status = CSR_READ(sc, WMREG_STATUS);
   13960 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   13961 		case STATUS_SPEED_10:
   13962 			speed = 10;
   13963 			break;
   13964 		case STATUS_SPEED_100:
   13965 			speed = 100;
   13966 			break;
   13967 		case STATUS_SPEED_1000:
   13968 			speed = 1000;
   13969 			break;
   13970 		default:
   13971 			device_printf(sc->sc_dev, "Unknown speed "
   13972 			    "(status = %08x)\n", status);
   13973 			return -1;
   13974 		}
   13975 
   13976 		/* Rx Packet Buffer Allocation size (KB) */
   13977 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   13978 
   13979 		/*
   13980 		 * Determine the maximum latency tolerated by the device.
   13981 		 *
   13982 		 * Per the PCIe spec, the tolerated latencies are encoded as
   13983 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   13984 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   13985 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   13986 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   13987 		 */
   13988 		lat_ns = ((int64_t)rxa * 1024 -
   13989 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   13990 			+ ETHER_HDR_LEN))) * 8 * 1000;
   13991 		if (lat_ns < 0)
   13992 			lat_ns = 0;
   13993 		else
   13994 			lat_ns /= speed;
   13995 		value = lat_ns;
   13996 
   13997 		while (value > LTRV_VALUE) {
   13998 			scale ++;
   13999 			value = howmany(value, __BIT(5));
   14000 		}
   14001 		if (scale > LTRV_SCALE_MAX) {
   14002 			printf("%s: Invalid LTR latency scale %d\n",
   14003 			    device_xname(sc->sc_dev), scale);
   14004 			return -1;
   14005 		}
   14006 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14007 
   14008 		/* Determine the maximum latency tolerated by the platform */
   14009 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14010 		    WM_PCI_LTR_CAP_LPT);
   14011 		max_snoop = preg & 0xffff;
   14012 		max_nosnoop = preg >> 16;
   14013 
   14014 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14015 
   14016 		if (lat_enc > max_ltr_enc) {
   14017 			lat_enc = max_ltr_enc;
   14018 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14019 			    * PCI_LTR_SCALETONS(
   14020 				    __SHIFTOUT(lat_enc,
   14021 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14022 		}
   14023 
   14024 		if (lat_ns) {
   14025 			lat_ns *= speed * 1000;
   14026 			lat_ns /= 8;
   14027 			lat_ns /= 1000000000;
   14028 			obff_hwm = (int32_t)(rxa - lat_ns);
   14029 		}
   14030 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14031 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14032 			    "(rxa = %d, lat_ns = %d)\n",
   14033 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14034 			return -1;
   14035 		}
   14036 	}
   14037 	/* Snoop and No-Snoop latencies the same */
   14038 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14039 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14040 
   14041 	/* Set OBFF high water mark */
   14042 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14043 	reg |= obff_hwm;
   14044 	CSR_WRITE(sc, WMREG_SVT, reg);
   14045 
   14046 	/* Enable OBFF */
   14047 	reg = CSR_READ(sc, WMREG_SVCR);
   14048 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14049 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14050 
   14051 	return 0;
   14052 }
   14053 
   14054 /*
   14055  * I210 Errata 25 and I211 Errata 10
   14056  * Slow System Clock.
   14057  */
   14058 static void
   14059 wm_pll_workaround_i210(struct wm_softc *sc)
   14060 {
   14061 	uint32_t mdicnfg, wuc;
   14062 	uint32_t reg;
   14063 	pcireg_t pcireg;
   14064 	uint32_t pmreg;
   14065 	uint16_t nvmword, tmp_nvmword;
   14066 	int phyval;
   14067 	bool wa_done = false;
   14068 	int i;
   14069 
   14070 	/* Save WUC and MDICNFG registers */
   14071 	wuc = CSR_READ(sc, WMREG_WUC);
   14072 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14073 
   14074 	reg = mdicnfg & ~MDICNFG_DEST;
   14075 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14076 
   14077 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14078 		nvmword = INVM_DEFAULT_AL;
   14079 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14080 
   14081 	/* Get Power Management cap offset */
   14082 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14083 		&pmreg, NULL) == 0)
   14084 		return;
   14085 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14086 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14087 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14088 
   14089 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14090 			break; /* OK */
   14091 		}
   14092 
   14093 		wa_done = true;
   14094 		/* Directly reset the internal PHY */
   14095 		reg = CSR_READ(sc, WMREG_CTRL);
   14096 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14097 
   14098 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14099 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14100 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14101 
   14102 		CSR_WRITE(sc, WMREG_WUC, 0);
   14103 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14104 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14105 
   14106 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14107 		    pmreg + PCI_PMCSR);
   14108 		pcireg |= PCI_PMCSR_STATE_D3;
   14109 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14110 		    pmreg + PCI_PMCSR, pcireg);
   14111 		delay(1000);
   14112 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14113 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14114 		    pmreg + PCI_PMCSR, pcireg);
   14115 
   14116 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14117 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14118 
   14119 		/* Restore WUC register */
   14120 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14121 	}
   14122 
   14123 	/* Restore MDICNFG setting */
   14124 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14125 	if (wa_done)
   14126 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14127 }
   14128 
   14129 static void
   14130 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14131 {
   14132 	uint32_t reg;
   14133 
   14134 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14135 		device_xname(sc->sc_dev), __func__));
   14136 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   14137 
   14138 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14139 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14140 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14141 
   14142 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14143 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14144 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14145 }
   14146