Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.507
      1 /*	$NetBSD: if_wm.c,v 1.507 2017/04/12 05:08:00 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Split header buffer for newer descriptors
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.507 2017/04/12 05:08:00 knakahara Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #include "opt_if_wm.h"
     92 #endif
     93 
     94 #include <sys/param.h>
     95 #include <sys/systm.h>
     96 #include <sys/callout.h>
     97 #include <sys/mbuf.h>
     98 #include <sys/malloc.h>
     99 #include <sys/kmem.h>
    100 #include <sys/kernel.h>
    101 #include <sys/socket.h>
    102 #include <sys/ioctl.h>
    103 #include <sys/errno.h>
    104 #include <sys/device.h>
    105 #include <sys/queue.h>
    106 #include <sys/syslog.h>
    107 #include <sys/interrupt.h>
    108 #include <sys/cpu.h>
    109 #include <sys/pcq.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/miivar.h>
    132 #include <dev/mii/miidevs.h>
    133 #include <dev/mii/mii_bitbang.h>
    134 #include <dev/mii/ikphyreg.h>
    135 #include <dev/mii/igphyreg.h>
    136 #include <dev/mii/igphyvar.h>
    137 #include <dev/mii/inbmphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 /*
    177  * Transmit descriptor list size.  Due to errata, we can only have
    178  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    179  * on >= 82544.  We tell the upper layers that they can queue a lot
    180  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    181  * of them at a time.
    182  *
    183  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    184  * chains containing many small mbufs have been observed in zero-copy
    185  * situations with jumbo frames.
    186  */
    187 #define	WM_NTXSEGS		256
    188 #define	WM_IFQUEUELEN		256
    189 #define	WM_TXQUEUELEN_MAX	64
    190 #define	WM_TXQUEUELEN_MAX_82547	16
    191 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    192 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    193 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    194 #define	WM_NTXDESC_82542	256
    195 #define	WM_NTXDESC_82544	4096
    196 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    197 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    198 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    199 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    200 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    201 
    202 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    203 
    204 #define	WM_TXINTERQSIZE		256
    205 
    206 /*
    207  * Receive descriptor list size.  We have one Rx buffer for normal
    208  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    209  * packet.  We allocate 256 receive descriptors, each with a 2k
    210  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    211  */
    212 #define	WM_NRXDESC		256
    213 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    214 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    215 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    216 
    217 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    218 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    219 #endif
    220 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    221 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    222 #endif
    223 
    224 typedef union txdescs {
    225 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    226 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    227 } txdescs_t;
    228 
    229 typedef union rxdescs {
    230 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    231 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    232 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    233 } rxdescs_t;
    234 
    235 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    236 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    237 
    238 /*
    239  * Software state for transmit jobs.
    240  */
    241 struct wm_txsoft {
    242 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    243 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    244 	int txs_firstdesc;		/* first descriptor in packet */
    245 	int txs_lastdesc;		/* last descriptor in packet */
    246 	int txs_ndesc;			/* # of descriptors used */
    247 };
    248 
    249 /*
    250  * Software state for receive buffers.  Each descriptor gets a
    251  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    252  * more than one buffer, we chain them together.
    253  */
    254 struct wm_rxsoft {
    255 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    256 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    257 };
    258 
    259 #define WM_LINKUP_TIMEOUT	50
    260 
    261 static uint16_t swfwphysem[] = {
    262 	SWFW_PHY0_SM,
    263 	SWFW_PHY1_SM,
    264 	SWFW_PHY2_SM,
    265 	SWFW_PHY3_SM
    266 };
    267 
    268 static const uint32_t wm_82580_rxpbs_table[] = {
    269 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    270 };
    271 
    272 struct wm_softc;
    273 
    274 #ifdef WM_EVENT_COUNTERS
    275 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    276 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    277 	struct evcnt qname##_ev_##evname;
    278 
    279 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    280 	do{								\
    281 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    282 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    283 		    "%s%02d%s", #qname, (qnum), #evname);		\
    284 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    285 		    (evtype), NULL, (xname),				\
    286 		    (q)->qname##_##evname##_evcnt_name);		\
    287 	}while(0)
    288 
    289 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    290 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    291 
    292 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    293 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    294 
    295 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    296 	evcnt_detach(&(q)->qname##_ev_##evname);
    297 #endif /* WM_EVENT_COUNTERS */
    298 
    299 struct wm_txqueue {
    300 	kmutex_t *txq_lock;		/* lock for tx operations */
    301 
    302 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    303 
    304 	/* Software state for the transmit descriptors. */
    305 	int txq_num;			/* must be a power of two */
    306 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    307 
    308 	/* TX control data structures. */
    309 	int txq_ndesc;			/* must be a power of two */
    310 	size_t txq_descsize;		/* a tx descriptor size */
    311 	txdescs_t *txq_descs_u;
    312         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    313 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    314 	int txq_desc_rseg;		/* real number of control segment */
    315 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    316 #define	txq_descs	txq_descs_u->sctxu_txdescs
    317 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    318 
    319 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    320 
    321 	int txq_free;			/* number of free Tx descriptors */
    322 	int txq_next;			/* next ready Tx descriptor */
    323 
    324 	int txq_sfree;			/* number of free Tx jobs */
    325 	int txq_snext;			/* next free Tx job */
    326 	int txq_sdirty;			/* dirty Tx jobs */
    327 
    328 	/* These 4 variables are used only on the 82547. */
    329 	int txq_fifo_size;		/* Tx FIFO size */
    330 	int txq_fifo_head;		/* current head of FIFO */
    331 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    332 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    333 
    334 	/*
    335 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    336 	 * CPUs. This queue intermediate them without block.
    337 	 */
    338 	pcq_t *txq_interq;
    339 
    340 	/*
    341 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    342 	 * to manage Tx H/W queue's busy flag.
    343 	 */
    344 	int txq_flags;			/* flags for H/W queue, see below */
    345 #define	WM_TXQ_NO_SPACE	0x1
    346 
    347 	bool txq_stopping;
    348 
    349 	uint32_t txq_packets;		/* for AIM */
    350 	uint32_t txq_bytes;		/* for AIM */
    351 #ifdef WM_EVENT_COUNTERS
    352 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    353 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    354 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    355 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    356 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    357 						/* XXX not used? */
    358 
    359 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    360 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    361 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    362 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    363 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    364 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    365 
    366 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    367 
    368 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    369 
    370 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    371 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    372 #endif /* WM_EVENT_COUNTERS */
    373 };
    374 
    375 struct wm_rxqueue {
    376 	kmutex_t *rxq_lock;		/* lock for rx operations */
    377 
    378 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    379 
    380 	/* Software state for the receive descriptors. */
    381 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    382 
    383 	/* RX control data structures. */
    384 	int rxq_ndesc;			/* must be a power of two */
    385 	size_t rxq_descsize;		/* a rx descriptor size */
    386 	rxdescs_t *rxq_descs_u;
    387 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    388 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    389 	int rxq_desc_rseg;		/* real number of control segment */
    390 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    391 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    392 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    393 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    394 
    395 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    396 
    397 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    398 	int rxq_discard;
    399 	int rxq_len;
    400 	struct mbuf *rxq_head;
    401 	struct mbuf *rxq_tail;
    402 	struct mbuf **rxq_tailp;
    403 
    404 	bool rxq_stopping;
    405 
    406 	uint32_t rxq_packets;		/* for AIM */
    407 	uint32_t rxq_bytes;		/* for AIM */
    408 #ifdef WM_EVENT_COUNTERS
    409 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    410 
    411 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    412 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    413 #endif
    414 };
    415 
    416 struct wm_queue {
    417 	int wmq_id;			/* index of transmit and receive queues */
    418 	int wmq_intr_idx;		/* index of MSI-X tables */
    419 
    420 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    421 	bool wmq_set_itr;
    422 
    423 	struct wm_txqueue wmq_txq;
    424 	struct wm_rxqueue wmq_rxq;
    425 
    426 	void *wmq_si;
    427 };
    428 
    429 struct wm_phyop {
    430 	int (*acquire)(struct wm_softc *);
    431 	void (*release)(struct wm_softc *);
    432 	int reset_delay_us;
    433 };
    434 
    435 /*
    436  * Software state per device.
    437  */
    438 struct wm_softc {
    439 	device_t sc_dev;		/* generic device information */
    440 	bus_space_tag_t sc_st;		/* bus space tag */
    441 	bus_space_handle_t sc_sh;	/* bus space handle */
    442 	bus_size_t sc_ss;		/* bus space size */
    443 	bus_space_tag_t sc_iot;		/* I/O space tag */
    444 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    445 	bus_size_t sc_ios;		/* I/O space size */
    446 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    447 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    448 	bus_size_t sc_flashs;		/* flash registers space size */
    449 	off_t sc_flashreg_offset;	/*
    450 					 * offset to flash registers from
    451 					 * start of BAR
    452 					 */
    453 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    454 
    455 	struct ethercom sc_ethercom;	/* ethernet common data */
    456 	struct mii_data sc_mii;		/* MII/media information */
    457 
    458 	pci_chipset_tag_t sc_pc;
    459 	pcitag_t sc_pcitag;
    460 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    461 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    462 
    463 	uint16_t sc_pcidevid;		/* PCI device ID */
    464 	wm_chip_type sc_type;		/* MAC type */
    465 	int sc_rev;			/* MAC revision */
    466 	wm_phy_type sc_phytype;		/* PHY type */
    467 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    468 #define	WM_MEDIATYPE_UNKNOWN		0x00
    469 #define	WM_MEDIATYPE_FIBER		0x01
    470 #define	WM_MEDIATYPE_COPPER		0x02
    471 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    472 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    473 	int sc_flags;			/* flags; see below */
    474 	int sc_if_flags;		/* last if_flags */
    475 	int sc_flowflags;		/* 802.3x flow control flags */
    476 	int sc_align_tweak;
    477 
    478 	void *sc_ihs[WM_MAX_NINTR];	/*
    479 					 * interrupt cookie.
    480 					 * - legacy and msi use sc_ihs[0] only
    481 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    482 					 */
    483 	pci_intr_handle_t *sc_intrs;	/*
    484 					 * legacy and msi use sc_intrs[0] only
    485 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    486 					 */
    487 	int sc_nintrs;			/* number of interrupts */
    488 
    489 	int sc_link_intr_idx;		/* index of MSI-X tables */
    490 
    491 	callout_t sc_tick_ch;		/* tick callout */
    492 	bool sc_core_stopping;
    493 
    494 	int sc_nvm_ver_major;
    495 	int sc_nvm_ver_minor;
    496 	int sc_nvm_ver_build;
    497 	int sc_nvm_addrbits;		/* NVM address bits */
    498 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    499 	int sc_ich8_flash_base;
    500 	int sc_ich8_flash_bank_size;
    501 	int sc_nvm_k1_enabled;
    502 
    503 	int sc_nqueues;
    504 	struct wm_queue *sc_queue;
    505 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    506 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    507 
    508 	int sc_affinity_offset;
    509 
    510 #ifdef WM_EVENT_COUNTERS
    511 	/* Event counters. */
    512 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    513 
    514         /* WM_T_82542_2_1 only */
    515 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    516 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    517 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    518 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    519 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    520 #endif /* WM_EVENT_COUNTERS */
    521 
    522 	/* This variable are used only on the 82547. */
    523 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    524 
    525 	uint32_t sc_ctrl;		/* prototype CTRL register */
    526 #if 0
    527 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    528 #endif
    529 	uint32_t sc_icr;		/* prototype interrupt bits */
    530 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    531 	uint32_t sc_tctl;		/* prototype TCTL register */
    532 	uint32_t sc_rctl;		/* prototype RCTL register */
    533 	uint32_t sc_txcw;		/* prototype TXCW register */
    534 	uint32_t sc_tipg;		/* prototype TIPG register */
    535 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    536 	uint32_t sc_pba;		/* prototype PBA register */
    537 
    538 	int sc_tbi_linkup;		/* TBI link status */
    539 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    540 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    541 
    542 	int sc_mchash_type;		/* multicast filter offset */
    543 
    544 	krndsource_t rnd_source;	/* random source */
    545 
    546 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    547 
    548 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    549 	kmutex_t *sc_ich_phymtx;	/*
    550 					 * 82574/82583/ICH/PCH specific PHY
    551 					 * mutex. For 82574/82583, the mutex
    552 					 * is used for both PHY and NVM.
    553 					 */
    554 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    555 
    556 	struct wm_phyop phy;
    557 };
    558 
    559 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    560 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    561 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    562 
    563 #define	WM_RXCHAIN_RESET(rxq)						\
    564 do {									\
    565 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    566 	*(rxq)->rxq_tailp = NULL;					\
    567 	(rxq)->rxq_len = 0;						\
    568 } while (/*CONSTCOND*/0)
    569 
    570 #define	WM_RXCHAIN_LINK(rxq, m)						\
    571 do {									\
    572 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    573 	(rxq)->rxq_tailp = &(m)->m_next;				\
    574 } while (/*CONSTCOND*/0)
    575 
    576 #ifdef WM_EVENT_COUNTERS
    577 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    578 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    579 
    580 #define WM_Q_EVCNT_INCR(qname, evname)			\
    581 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    582 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    583 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    584 #else /* !WM_EVENT_COUNTERS */
    585 #define	WM_EVCNT_INCR(ev)	/* nothing */
    586 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    587 
    588 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    589 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    590 #endif /* !WM_EVENT_COUNTERS */
    591 
    592 #define	CSR_READ(sc, reg)						\
    593 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    594 #define	CSR_WRITE(sc, reg, val)						\
    595 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    596 #define	CSR_WRITE_FLUSH(sc)						\
    597 	(void) CSR_READ((sc), WMREG_STATUS)
    598 
    599 #define ICH8_FLASH_READ32(sc, reg)					\
    600 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    601 	    (reg) + sc->sc_flashreg_offset)
    602 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    603 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    604 	    (reg) + sc->sc_flashreg_offset, (data))
    605 
    606 #define ICH8_FLASH_READ16(sc, reg)					\
    607 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    608 	    (reg) + sc->sc_flashreg_offset)
    609 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    610 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    611 	    (reg) + sc->sc_flashreg_offset, (data))
    612 
    613 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    614 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    615 
    616 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    617 #define	WM_CDTXADDR_HI(txq, x)						\
    618 	(sizeof(bus_addr_t) == 8 ?					\
    619 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    620 
    621 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    622 #define	WM_CDRXADDR_HI(rxq, x)						\
    623 	(sizeof(bus_addr_t) == 8 ?					\
    624 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    625 
    626 /*
    627  * Register read/write functions.
    628  * Other than CSR_{READ|WRITE}().
    629  */
    630 #if 0
    631 static inline uint32_t wm_io_read(struct wm_softc *, int);
    632 #endif
    633 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    634 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    635 	uint32_t, uint32_t);
    636 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    637 
    638 /*
    639  * Descriptor sync/init functions.
    640  */
    641 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    642 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    643 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    644 
    645 /*
    646  * Device driver interface functions and commonly used functions.
    647  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    648  */
    649 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    650 static int	wm_match(device_t, cfdata_t, void *);
    651 static void	wm_attach(device_t, device_t, void *);
    652 static int	wm_detach(device_t, int);
    653 static bool	wm_suspend(device_t, const pmf_qual_t *);
    654 static bool	wm_resume(device_t, const pmf_qual_t *);
    655 static void	wm_watchdog(struct ifnet *);
    656 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    657 static void	wm_tick(void *);
    658 static int	wm_ifflags_cb(struct ethercom *);
    659 static int	wm_ioctl(struct ifnet *, u_long, void *);
    660 /* MAC address related */
    661 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    662 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    663 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    664 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    665 static void	wm_set_filter(struct wm_softc *);
    666 /* Reset and init related */
    667 static void	wm_set_vlan(struct wm_softc *);
    668 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    669 static void	wm_get_auto_rd_done(struct wm_softc *);
    670 static void	wm_lan_init_done(struct wm_softc *);
    671 static void	wm_get_cfg_done(struct wm_softc *);
    672 static void	wm_initialize_hardware_bits(struct wm_softc *);
    673 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    674 static void	wm_reset_phy(struct wm_softc *);
    675 static void	wm_flush_desc_rings(struct wm_softc *);
    676 static void	wm_reset(struct wm_softc *);
    677 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    678 static void	wm_rxdrain(struct wm_rxqueue *);
    679 static void	wm_rss_getkey(uint8_t *);
    680 static void	wm_init_rss(struct wm_softc *);
    681 static void	wm_adjust_qnum(struct wm_softc *, int);
    682 static inline bool	wm_is_using_msix(struct wm_softc *);
    683 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    684 static int	wm_softint_establish(struct wm_softc *, int, int);
    685 static int	wm_setup_legacy(struct wm_softc *);
    686 static int	wm_setup_msix(struct wm_softc *);
    687 static int	wm_init(struct ifnet *);
    688 static int	wm_init_locked(struct ifnet *);
    689 static void	wm_turnon(struct wm_softc *);
    690 static void	wm_turnoff(struct wm_softc *);
    691 static void	wm_stop(struct ifnet *, int);
    692 static void	wm_stop_locked(struct ifnet *, int);
    693 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    694 static void	wm_82547_txfifo_stall(void *);
    695 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    696 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    697 /* DMA related */
    698 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    699 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    700 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    701 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    702     struct wm_txqueue *);
    703 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    704 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    705 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    706     struct wm_rxqueue *);
    707 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    708 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    709 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    710 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    711 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    712 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    713 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    714     struct wm_txqueue *);
    715 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    716     struct wm_rxqueue *);
    717 static int	wm_alloc_txrx_queues(struct wm_softc *);
    718 static void	wm_free_txrx_queues(struct wm_softc *);
    719 static int	wm_init_txrx_queues(struct wm_softc *);
    720 /* Start */
    721 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    722     struct wm_txsoft *, uint32_t *, uint8_t *);
    723 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    724 static void	wm_start(struct ifnet *);
    725 static void	wm_start_locked(struct ifnet *);
    726 static int	wm_transmit(struct ifnet *, struct mbuf *);
    727 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    728 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    729 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    730     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    731 static void	wm_nq_start(struct ifnet *);
    732 static void	wm_nq_start_locked(struct ifnet *);
    733 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    734 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    735 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    736 static void	wm_deferred_start_locked(struct wm_txqueue *);
    737 static void	wm_handle_queue(void *);
    738 /* Interrupt */
    739 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    740 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    741 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    742 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    743 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    744 static void	wm_linkintr(struct wm_softc *, uint32_t);
    745 static int	wm_intr_legacy(void *);
    746 static inline void	wm_txrxintr_disable(struct wm_queue *);
    747 static inline void	wm_txrxintr_enable(struct wm_queue *);
    748 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    749 static int	wm_txrxintr_msix(void *);
    750 static int	wm_linkintr_msix(void *);
    751 
    752 /*
    753  * Media related.
    754  * GMII, SGMII, TBI, SERDES and SFP.
    755  */
    756 /* Common */
    757 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    758 /* GMII related */
    759 static void	wm_gmii_reset(struct wm_softc *);
    760 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    761 static int	wm_get_phy_id_82575(struct wm_softc *);
    762 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    763 static int	wm_gmii_mediachange(struct ifnet *);
    764 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    765 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    766 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    767 static int	wm_gmii_i82543_readreg(device_t, int, int);
    768 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    769 static int	wm_gmii_mdic_readreg(device_t, int, int);
    770 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    771 static int	wm_gmii_i82544_readreg(device_t, int, int);
    772 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    773 static int	wm_gmii_i80003_readreg(device_t, int, int);
    774 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    775 static int	wm_gmii_bm_readreg(device_t, int, int);
    776 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    777 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    778 static int	wm_gmii_hv_readreg(device_t, int, int);
    779 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    780 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    781 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    782 static int	wm_gmii_82580_readreg(device_t, int, int);
    783 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    784 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    785 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    786 static void	wm_gmii_statchg(struct ifnet *);
    787 /*
    788  * kumeran related (80003, ICH* and PCH*).
    789  * These functions are not for accessing MII registers but for accessing
    790  * kumeran specific registers.
    791  */
    792 static int	wm_kmrn_readreg(struct wm_softc *, int);
    793 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    794 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    795 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    796 /* SGMII */
    797 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    798 static int	wm_sgmii_readreg(device_t, int, int);
    799 static void	wm_sgmii_writereg(device_t, int, int, int);
    800 /* TBI related */
    801 static void	wm_tbi_mediainit(struct wm_softc *);
    802 static int	wm_tbi_mediachange(struct ifnet *);
    803 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    804 static int	wm_check_for_link(struct wm_softc *);
    805 static void	wm_tbi_tick(struct wm_softc *);
    806 /* SERDES related */
    807 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    808 static int	wm_serdes_mediachange(struct ifnet *);
    809 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    810 static void	wm_serdes_tick(struct wm_softc *);
    811 /* SFP related */
    812 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    813 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    814 
    815 /*
    816  * NVM related.
    817  * Microwire, SPI (w/wo EERD) and Flash.
    818  */
    819 /* Misc functions */
    820 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    821 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    822 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    823 /* Microwire */
    824 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    825 /* SPI */
    826 static int	wm_nvm_ready_spi(struct wm_softc *);
    827 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    828 /* Using with EERD */
    829 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    830 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    831 /* Flash */
    832 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    833     unsigned int *);
    834 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    835 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    836 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    837 	uint32_t *);
    838 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    839 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    840 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    841 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    842 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    843 /* iNVM */
    844 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    845 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    846 /* Lock, detecting NVM type, validate checksum and read */
    847 static int	wm_nvm_acquire(struct wm_softc *);
    848 static void	wm_nvm_release(struct wm_softc *);
    849 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    850 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    851 static int	wm_nvm_validate_checksum(struct wm_softc *);
    852 static void	wm_nvm_version_invm(struct wm_softc *);
    853 static void	wm_nvm_version(struct wm_softc *);
    854 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    855 
    856 /*
    857  * Hardware semaphores.
    858  * Very complexed...
    859  */
    860 static int	wm_get_null(struct wm_softc *);
    861 static void	wm_put_null(struct wm_softc *);
    862 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    863 static void	wm_put_swsm_semaphore(struct wm_softc *);
    864 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    865 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    866 static int	wm_get_phy_82575(struct wm_softc *);
    867 static void	wm_put_phy_82575(struct wm_softc *);
    868 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    869 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    870 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    871 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    872 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    873 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    874 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    875 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    876 
    877 /*
    878  * Management mode and power management related subroutines.
    879  * BMC, AMT, suspend/resume and EEE.
    880  */
    881 #if 0
    882 static int	wm_check_mng_mode(struct wm_softc *);
    883 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    884 static int	wm_check_mng_mode_82574(struct wm_softc *);
    885 static int	wm_check_mng_mode_generic(struct wm_softc *);
    886 #endif
    887 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    888 static bool	wm_phy_resetisblocked(struct wm_softc *);
    889 static void	wm_get_hw_control(struct wm_softc *);
    890 static void	wm_release_hw_control(struct wm_softc *);
    891 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    892 static void	wm_smbustopci(struct wm_softc *);
    893 static void	wm_init_manageability(struct wm_softc *);
    894 static void	wm_release_manageability(struct wm_softc *);
    895 static void	wm_get_wakeup(struct wm_softc *);
    896 static void	wm_ulp_disable(struct wm_softc *);
    897 static void	wm_enable_phy_wakeup(struct wm_softc *);
    898 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    899 static void	wm_enable_wakeup(struct wm_softc *);
    900 /* LPLU (Low Power Link Up) */
    901 static void	wm_lplu_d0_disable(struct wm_softc *);
    902 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    903 /* EEE */
    904 static void	wm_set_eee_i350(struct wm_softc *);
    905 
    906 /*
    907  * Workarounds (mainly PHY related).
    908  * Basically, PHY's workarounds are in the PHY drivers.
    909  */
    910 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    911 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    912 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    913 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    914 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    915 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    916 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    917 static void	wm_reset_init_script_82575(struct wm_softc *);
    918 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    919 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    920 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    921 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    922 static void	wm_pll_workaround_i210(struct wm_softc *);
    923 
    924 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    925     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    926 
    927 /*
    928  * Devices supported by this driver.
    929  */
    930 static const struct wm_product {
    931 	pci_vendor_id_t		wmp_vendor;
    932 	pci_product_id_t	wmp_product;
    933 	const char		*wmp_name;
    934 	wm_chip_type		wmp_type;
    935 	uint32_t		wmp_flags;
    936 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    937 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    938 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    939 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    940 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    941 } wm_products[] = {
    942 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    943 	  "Intel i82542 1000BASE-X Ethernet",
    944 	  WM_T_82542_2_1,	WMP_F_FIBER },
    945 
    946 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    947 	  "Intel i82543GC 1000BASE-X Ethernet",
    948 	  WM_T_82543,		WMP_F_FIBER },
    949 
    950 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    951 	  "Intel i82543GC 1000BASE-T Ethernet",
    952 	  WM_T_82543,		WMP_F_COPPER },
    953 
    954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    955 	  "Intel i82544EI 1000BASE-T Ethernet",
    956 	  WM_T_82544,		WMP_F_COPPER },
    957 
    958 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    959 	  "Intel i82544EI 1000BASE-X Ethernet",
    960 	  WM_T_82544,		WMP_F_FIBER },
    961 
    962 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    963 	  "Intel i82544GC 1000BASE-T Ethernet",
    964 	  WM_T_82544,		WMP_F_COPPER },
    965 
    966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    967 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    968 	  WM_T_82544,		WMP_F_COPPER },
    969 
    970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    971 	  "Intel i82540EM 1000BASE-T Ethernet",
    972 	  WM_T_82540,		WMP_F_COPPER },
    973 
    974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    975 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    976 	  WM_T_82540,		WMP_F_COPPER },
    977 
    978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    979 	  "Intel i82540EP 1000BASE-T Ethernet",
    980 	  WM_T_82540,		WMP_F_COPPER },
    981 
    982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    983 	  "Intel i82540EP 1000BASE-T Ethernet",
    984 	  WM_T_82540,		WMP_F_COPPER },
    985 
    986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    987 	  "Intel i82540EP 1000BASE-T Ethernet",
    988 	  WM_T_82540,		WMP_F_COPPER },
    989 
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    991 	  "Intel i82545EM 1000BASE-T Ethernet",
    992 	  WM_T_82545,		WMP_F_COPPER },
    993 
    994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    995 	  "Intel i82545GM 1000BASE-T Ethernet",
    996 	  WM_T_82545_3,		WMP_F_COPPER },
    997 
    998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    999 	  "Intel i82545GM 1000BASE-X Ethernet",
   1000 	  WM_T_82545_3,		WMP_F_FIBER },
   1001 
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1003 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1004 	  WM_T_82545_3,		WMP_F_SERDES },
   1005 
   1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1007 	  "Intel i82546EB 1000BASE-T Ethernet",
   1008 	  WM_T_82546,		WMP_F_COPPER },
   1009 
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1011 	  "Intel i82546EB 1000BASE-T Ethernet",
   1012 	  WM_T_82546,		WMP_F_COPPER },
   1013 
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1015 	  "Intel i82545EM 1000BASE-X Ethernet",
   1016 	  WM_T_82545,		WMP_F_FIBER },
   1017 
   1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1019 	  "Intel i82546EB 1000BASE-X Ethernet",
   1020 	  WM_T_82546,		WMP_F_FIBER },
   1021 
   1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1023 	  "Intel i82546GB 1000BASE-T Ethernet",
   1024 	  WM_T_82546_3,		WMP_F_COPPER },
   1025 
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1027 	  "Intel i82546GB 1000BASE-X Ethernet",
   1028 	  WM_T_82546_3,		WMP_F_FIBER },
   1029 
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1031 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1032 	  WM_T_82546_3,		WMP_F_SERDES },
   1033 
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1035 	  "i82546GB quad-port Gigabit Ethernet",
   1036 	  WM_T_82546_3,		WMP_F_COPPER },
   1037 
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1039 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1040 	  WM_T_82546_3,		WMP_F_COPPER },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1043 	  "Intel PRO/1000MT (82546GB)",
   1044 	  WM_T_82546_3,		WMP_F_COPPER },
   1045 
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1047 	  "Intel i82541EI 1000BASE-T Ethernet",
   1048 	  WM_T_82541,		WMP_F_COPPER },
   1049 
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1051 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1052 	  WM_T_82541,		WMP_F_COPPER },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1055 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1056 	  WM_T_82541,		WMP_F_COPPER },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1059 	  "Intel i82541ER 1000BASE-T Ethernet",
   1060 	  WM_T_82541_2,		WMP_F_COPPER },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1063 	  "Intel i82541GI 1000BASE-T Ethernet",
   1064 	  WM_T_82541_2,		WMP_F_COPPER },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1067 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1068 	  WM_T_82541_2,		WMP_F_COPPER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1071 	  "Intel i82541PI 1000BASE-T Ethernet",
   1072 	  WM_T_82541_2,		WMP_F_COPPER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1075 	  "Intel i82547EI 1000BASE-T Ethernet",
   1076 	  WM_T_82547,		WMP_F_COPPER },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1079 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1080 	  WM_T_82547,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1083 	  "Intel i82547GI 1000BASE-T Ethernet",
   1084 	  WM_T_82547_2,		WMP_F_COPPER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1087 	  "Intel PRO/1000 PT (82571EB)",
   1088 	  WM_T_82571,		WMP_F_COPPER },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1091 	  "Intel PRO/1000 PF (82571EB)",
   1092 	  WM_T_82571,		WMP_F_FIBER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1095 	  "Intel PRO/1000 PB (82571EB)",
   1096 	  WM_T_82571,		WMP_F_SERDES },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1099 	  "Intel PRO/1000 QT (82571EB)",
   1100 	  WM_T_82571,		WMP_F_COPPER },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1103 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1104 	  WM_T_82571,		WMP_F_COPPER, },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1107 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1108 	  WM_T_82571,		WMP_F_COPPER, },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1111 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1112 	  WM_T_82571,		WMP_F_SERDES, },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1115 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1116 	  WM_T_82571,		WMP_F_SERDES, },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1119 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1120 	  WM_T_82571,		WMP_F_FIBER, },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1123 	  "Intel i82572EI 1000baseT Ethernet",
   1124 	  WM_T_82572,		WMP_F_COPPER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1127 	  "Intel i82572EI 1000baseX Ethernet",
   1128 	  WM_T_82572,		WMP_F_FIBER },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1131 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1132 	  WM_T_82572,		WMP_F_SERDES },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1135 	  "Intel i82572EI 1000baseT Ethernet",
   1136 	  WM_T_82572,		WMP_F_COPPER },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1139 	  "Intel i82573E",
   1140 	  WM_T_82573,		WMP_F_COPPER },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1143 	  "Intel i82573E IAMT",
   1144 	  WM_T_82573,		WMP_F_COPPER },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1147 	  "Intel i82573L Gigabit Ethernet",
   1148 	  WM_T_82573,		WMP_F_COPPER },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1151 	  "Intel i82574L",
   1152 	  WM_T_82574,		WMP_F_COPPER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1155 	  "Intel i82574L",
   1156 	  WM_T_82574,		WMP_F_COPPER },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1159 	  "Intel i82583V",
   1160 	  WM_T_82583,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1163 	  "i80003 dual 1000baseT Ethernet",
   1164 	  WM_T_80003,		WMP_F_COPPER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1167 	  "i80003 dual 1000baseX Ethernet",
   1168 	  WM_T_80003,		WMP_F_COPPER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1171 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1172 	  WM_T_80003,		WMP_F_SERDES },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1175 	  "Intel i80003 1000baseT Ethernet",
   1176 	  WM_T_80003,		WMP_F_COPPER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1179 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1180 	  WM_T_80003,		WMP_F_SERDES },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1183 	  "Intel i82801H (M_AMT) LAN Controller",
   1184 	  WM_T_ICH8,		WMP_F_COPPER },
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1186 	  "Intel i82801H (AMT) LAN Controller",
   1187 	  WM_T_ICH8,		WMP_F_COPPER },
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1189 	  "Intel i82801H LAN Controller",
   1190 	  WM_T_ICH8,		WMP_F_COPPER },
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1192 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1193 	  WM_T_ICH8,		WMP_F_COPPER },
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1195 	  "Intel i82801H (M) LAN Controller",
   1196 	  WM_T_ICH8,		WMP_F_COPPER },
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1198 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1199 	  WM_T_ICH8,		WMP_F_COPPER },
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1201 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1202 	  WM_T_ICH8,		WMP_F_COPPER },
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1204 	  "82567V-3 LAN Controller",
   1205 	  WM_T_ICH8,		WMP_F_COPPER },
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1207 	  "82801I (AMT) LAN Controller",
   1208 	  WM_T_ICH9,		WMP_F_COPPER },
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1210 	  "82801I 10/100 LAN Controller",
   1211 	  WM_T_ICH9,		WMP_F_COPPER },
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1213 	  "82801I (G) 10/100 LAN Controller",
   1214 	  WM_T_ICH9,		WMP_F_COPPER },
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1216 	  "82801I (GT) 10/100 LAN Controller",
   1217 	  WM_T_ICH9,		WMP_F_COPPER },
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1219 	  "82801I (C) LAN Controller",
   1220 	  WM_T_ICH9,		WMP_F_COPPER },
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1222 	  "82801I mobile LAN Controller",
   1223 	  WM_T_ICH9,		WMP_F_COPPER },
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1225 	  "82801I mobile (V) LAN Controller",
   1226 	  WM_T_ICH9,		WMP_F_COPPER },
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1228 	  "82801I mobile (AMT) LAN Controller",
   1229 	  WM_T_ICH9,		WMP_F_COPPER },
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1231 	  "82567LM-4 LAN Controller",
   1232 	  WM_T_ICH9,		WMP_F_COPPER },
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1234 	  "82567LM-2 LAN Controller",
   1235 	  WM_T_ICH10,		WMP_F_COPPER },
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1237 	  "82567LF-2 LAN Controller",
   1238 	  WM_T_ICH10,		WMP_F_COPPER },
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1240 	  "82567LM-3 LAN Controller",
   1241 	  WM_T_ICH10,		WMP_F_COPPER },
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1243 	  "82567LF-3 LAN Controller",
   1244 	  WM_T_ICH10,		WMP_F_COPPER },
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1246 	  "82567V-2 LAN Controller",
   1247 	  WM_T_ICH10,		WMP_F_COPPER },
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1249 	  "82567V-3? LAN Controller",
   1250 	  WM_T_ICH10,		WMP_F_COPPER },
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1252 	  "HANKSVILLE LAN Controller",
   1253 	  WM_T_ICH10,		WMP_F_COPPER },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1255 	  "PCH LAN (82577LM) Controller",
   1256 	  WM_T_PCH,		WMP_F_COPPER },
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1258 	  "PCH LAN (82577LC) Controller",
   1259 	  WM_T_PCH,		WMP_F_COPPER },
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1261 	  "PCH LAN (82578DM) Controller",
   1262 	  WM_T_PCH,		WMP_F_COPPER },
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1264 	  "PCH LAN (82578DC) Controller",
   1265 	  WM_T_PCH,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1267 	  "PCH2 LAN (82579LM) Controller",
   1268 	  WM_T_PCH2,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1270 	  "PCH2 LAN (82579V) Controller",
   1271 	  WM_T_PCH2,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1273 	  "82575EB dual-1000baseT Ethernet",
   1274 	  WM_T_82575,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1276 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1277 	  WM_T_82575,		WMP_F_SERDES },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1279 	  "82575GB quad-1000baseT Ethernet",
   1280 	  WM_T_82575,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1282 	  "82575GB quad-1000baseT Ethernet (PM)",
   1283 	  WM_T_82575,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1285 	  "82576 1000BaseT Ethernet",
   1286 	  WM_T_82576,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1288 	  "82576 1000BaseX Ethernet",
   1289 	  WM_T_82576,		WMP_F_FIBER },
   1290 
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1292 	  "82576 gigabit Ethernet (SERDES)",
   1293 	  WM_T_82576,		WMP_F_SERDES },
   1294 
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1296 	  "82576 quad-1000BaseT Ethernet",
   1297 	  WM_T_82576,		WMP_F_COPPER },
   1298 
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1300 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1301 	  WM_T_82576,		WMP_F_COPPER },
   1302 
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1304 	  "82576 gigabit Ethernet",
   1305 	  WM_T_82576,		WMP_F_COPPER },
   1306 
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1308 	  "82576 gigabit Ethernet (SERDES)",
   1309 	  WM_T_82576,		WMP_F_SERDES },
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1311 	  "82576 quad-gigabit Ethernet (SERDES)",
   1312 	  WM_T_82576,		WMP_F_SERDES },
   1313 
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1315 	  "82580 1000BaseT Ethernet",
   1316 	  WM_T_82580,		WMP_F_COPPER },
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1318 	  "82580 1000BaseX Ethernet",
   1319 	  WM_T_82580,		WMP_F_FIBER },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1322 	  "82580 1000BaseT Ethernet (SERDES)",
   1323 	  WM_T_82580,		WMP_F_SERDES },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1326 	  "82580 gigabit Ethernet (SGMII)",
   1327 	  WM_T_82580,		WMP_F_COPPER },
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1329 	  "82580 dual-1000BaseT Ethernet",
   1330 	  WM_T_82580,		WMP_F_COPPER },
   1331 
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1333 	  "82580 quad-1000BaseX Ethernet",
   1334 	  WM_T_82580,		WMP_F_FIBER },
   1335 
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1337 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1338 	  WM_T_82580,		WMP_F_COPPER },
   1339 
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1341 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1342 	  WM_T_82580,		WMP_F_SERDES },
   1343 
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1345 	  "DH89XXCC 1000BASE-KX Ethernet",
   1346 	  WM_T_82580,		WMP_F_SERDES },
   1347 
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1349 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1350 	  WM_T_82580,		WMP_F_SERDES },
   1351 
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1353 	  "I350 Gigabit Network Connection",
   1354 	  WM_T_I350,		WMP_F_COPPER },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1357 	  "I350 Gigabit Fiber Network Connection",
   1358 	  WM_T_I350,		WMP_F_FIBER },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1361 	  "I350 Gigabit Backplane Connection",
   1362 	  WM_T_I350,		WMP_F_SERDES },
   1363 
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1365 	  "I350 Quad Port Gigabit Ethernet",
   1366 	  WM_T_I350,		WMP_F_SERDES },
   1367 
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1369 	  "I350 Gigabit Connection",
   1370 	  WM_T_I350,		WMP_F_COPPER },
   1371 
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1373 	  "I354 Gigabit Ethernet (KX)",
   1374 	  WM_T_I354,		WMP_F_SERDES },
   1375 
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1377 	  "I354 Gigabit Ethernet (SGMII)",
   1378 	  WM_T_I354,		WMP_F_COPPER },
   1379 
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1381 	  "I354 Gigabit Ethernet (2.5G)",
   1382 	  WM_T_I354,		WMP_F_COPPER },
   1383 
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1385 	  "I210-T1 Ethernet Server Adapter",
   1386 	  WM_T_I210,		WMP_F_COPPER },
   1387 
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1389 	  "I210 Ethernet (Copper OEM)",
   1390 	  WM_T_I210,		WMP_F_COPPER },
   1391 
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1393 	  "I210 Ethernet (Copper IT)",
   1394 	  WM_T_I210,		WMP_F_COPPER },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1397 	  "I210 Ethernet (FLASH less)",
   1398 	  WM_T_I210,		WMP_F_COPPER },
   1399 
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1401 	  "I210 Gigabit Ethernet (Fiber)",
   1402 	  WM_T_I210,		WMP_F_FIBER },
   1403 
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1405 	  "I210 Gigabit Ethernet (SERDES)",
   1406 	  WM_T_I210,		WMP_F_SERDES },
   1407 
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1409 	  "I210 Gigabit Ethernet (FLASH less)",
   1410 	  WM_T_I210,		WMP_F_SERDES },
   1411 
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1413 	  "I210 Gigabit Ethernet (SGMII)",
   1414 	  WM_T_I210,		WMP_F_COPPER },
   1415 
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1417 	  "I211 Ethernet (COPPER)",
   1418 	  WM_T_I211,		WMP_F_COPPER },
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1420 	  "I217 V Ethernet Connection",
   1421 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1423 	  "I217 LM Ethernet Connection",
   1424 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1426 	  "I218 V Ethernet Connection",
   1427 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1429 	  "I218 V Ethernet Connection",
   1430 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1432 	  "I218 V Ethernet Connection",
   1433 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1435 	  "I218 LM Ethernet Connection",
   1436 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1438 	  "I218 LM Ethernet Connection",
   1439 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1441 	  "I218 LM Ethernet Connection",
   1442 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1443 #if 0
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1445 	  "I219 V Ethernet Connection",
   1446 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1448 	  "I219 V Ethernet Connection",
   1449 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1451 	  "I219 V Ethernet Connection",
   1452 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1454 	  "I219 V Ethernet Connection",
   1455 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1457 	  "I219 LM Ethernet Connection",
   1458 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1460 	  "I219 LM Ethernet Connection",
   1461 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1463 	  "I219 LM Ethernet Connection",
   1464 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1466 	  "I219 LM Ethernet Connection",
   1467 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1469 	  "I219 LM Ethernet Connection",
   1470 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1471 #endif
   1472 	{ 0,			0,
   1473 	  NULL,
   1474 	  0,			0 },
   1475 };
   1476 
   1477 /*
   1478  * Register read/write functions.
   1479  * Other than CSR_{READ|WRITE}().
   1480  */
   1481 
   1482 #if 0 /* Not currently used */
   1483 static inline uint32_t
   1484 wm_io_read(struct wm_softc *sc, int reg)
   1485 {
   1486 
   1487 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1488 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1489 }
   1490 #endif
   1491 
   1492 static inline void
   1493 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1494 {
   1495 
   1496 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1497 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1498 }
   1499 
   1500 static inline void
   1501 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1502     uint32_t data)
   1503 {
   1504 	uint32_t regval;
   1505 	int i;
   1506 
   1507 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1508 
   1509 	CSR_WRITE(sc, reg, regval);
   1510 
   1511 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1512 		delay(5);
   1513 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1514 			break;
   1515 	}
   1516 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1517 		aprint_error("%s: WARNING:"
   1518 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1519 		    device_xname(sc->sc_dev), reg);
   1520 	}
   1521 }
   1522 
   1523 static inline void
   1524 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1525 {
   1526 	wa->wa_low = htole32(v & 0xffffffffU);
   1527 	if (sizeof(bus_addr_t) == 8)
   1528 		wa->wa_high = htole32((uint64_t) v >> 32);
   1529 	else
   1530 		wa->wa_high = 0;
   1531 }
   1532 
   1533 /*
   1534  * Descriptor sync/init functions.
   1535  */
   1536 static inline void
   1537 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1538 {
   1539 	struct wm_softc *sc = txq->txq_sc;
   1540 
   1541 	/* If it will wrap around, sync to the end of the ring. */
   1542 	if ((start + num) > WM_NTXDESC(txq)) {
   1543 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1544 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1545 		    (WM_NTXDESC(txq) - start), ops);
   1546 		num -= (WM_NTXDESC(txq) - start);
   1547 		start = 0;
   1548 	}
   1549 
   1550 	/* Now sync whatever is left. */
   1551 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1552 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1553 }
   1554 
   1555 static inline void
   1556 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1557 {
   1558 	struct wm_softc *sc = rxq->rxq_sc;
   1559 
   1560 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1561 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1562 }
   1563 
   1564 static inline void
   1565 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1566 {
   1567 	struct wm_softc *sc = rxq->rxq_sc;
   1568 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1569 	struct mbuf *m = rxs->rxs_mbuf;
   1570 
   1571 	/*
   1572 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1573 	 * so that the payload after the Ethernet header is aligned
   1574 	 * to a 4-byte boundary.
   1575 
   1576 	 * XXX BRAINDAMAGE ALERT!
   1577 	 * The stupid chip uses the same size for every buffer, which
   1578 	 * is set in the Receive Control register.  We are using the 2K
   1579 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1580 	 * reason, we can't "scoot" packets longer than the standard
   1581 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1582 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1583 	 * the upper layer copy the headers.
   1584 	 */
   1585 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1586 
   1587 	if (sc->sc_type == WM_T_82574) {
   1588 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1589 		rxd->erx_data.erxd_addr =
   1590 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1591 		rxd->erx_data.erxd_dd = 0;
   1592 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1593 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1594 
   1595 		rxd->nqrx_data.nrxd_paddr =
   1596 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1597 		/* Currently, split header is not supported. */
   1598 		rxd->nqrx_data.nrxd_haddr = 0;
   1599 	} else {
   1600 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1601 
   1602 		wm_set_dma_addr(&rxd->wrx_addr,
   1603 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1604 		rxd->wrx_len = 0;
   1605 		rxd->wrx_cksum = 0;
   1606 		rxd->wrx_status = 0;
   1607 		rxd->wrx_errors = 0;
   1608 		rxd->wrx_special = 0;
   1609 	}
   1610 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1611 
   1612 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1613 }
   1614 
   1615 /*
   1616  * Device driver interface functions and commonly used functions.
   1617  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1618  */
   1619 
   1620 /* Lookup supported device table */
   1621 static const struct wm_product *
   1622 wm_lookup(const struct pci_attach_args *pa)
   1623 {
   1624 	const struct wm_product *wmp;
   1625 
   1626 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1627 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1628 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1629 			return wmp;
   1630 	}
   1631 	return NULL;
   1632 }
   1633 
   1634 /* The match function (ca_match) */
   1635 static int
   1636 wm_match(device_t parent, cfdata_t cf, void *aux)
   1637 {
   1638 	struct pci_attach_args *pa = aux;
   1639 
   1640 	if (wm_lookup(pa) != NULL)
   1641 		return 1;
   1642 
   1643 	return 0;
   1644 }
   1645 
   1646 /* The attach function (ca_attach) */
   1647 static void
   1648 wm_attach(device_t parent, device_t self, void *aux)
   1649 {
   1650 	struct wm_softc *sc = device_private(self);
   1651 	struct pci_attach_args *pa = aux;
   1652 	prop_dictionary_t dict;
   1653 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1654 	pci_chipset_tag_t pc = pa->pa_pc;
   1655 	int counts[PCI_INTR_TYPE_SIZE];
   1656 	pci_intr_type_t max_type;
   1657 	const char *eetype, *xname;
   1658 	bus_space_tag_t memt;
   1659 	bus_space_handle_t memh;
   1660 	bus_size_t memsize;
   1661 	int memh_valid;
   1662 	int i, error;
   1663 	const struct wm_product *wmp;
   1664 	prop_data_t ea;
   1665 	prop_number_t pn;
   1666 	uint8_t enaddr[ETHER_ADDR_LEN];
   1667 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1668 	pcireg_t preg, memtype;
   1669 	uint16_t eeprom_data, apme_mask;
   1670 	bool force_clear_smbi;
   1671 	uint32_t link_mode;
   1672 	uint32_t reg;
   1673 
   1674 	sc->sc_dev = self;
   1675 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1676 	sc->sc_core_stopping = false;
   1677 
   1678 	wmp = wm_lookup(pa);
   1679 #ifdef DIAGNOSTIC
   1680 	if (wmp == NULL) {
   1681 		printf("\n");
   1682 		panic("wm_attach: impossible");
   1683 	}
   1684 #endif
   1685 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1686 
   1687 	sc->sc_pc = pa->pa_pc;
   1688 	sc->sc_pcitag = pa->pa_tag;
   1689 
   1690 	if (pci_dma64_available(pa))
   1691 		sc->sc_dmat = pa->pa_dmat64;
   1692 	else
   1693 		sc->sc_dmat = pa->pa_dmat;
   1694 
   1695 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1696 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1697 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1698 
   1699 	sc->sc_type = wmp->wmp_type;
   1700 
   1701 	/* Set default function pointers */
   1702 	sc->phy.acquire = wm_get_null;
   1703 	sc->phy.release = wm_put_null;
   1704 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1705 
   1706 	if (sc->sc_type < WM_T_82543) {
   1707 		if (sc->sc_rev < 2) {
   1708 			aprint_error_dev(sc->sc_dev,
   1709 			    "i82542 must be at least rev. 2\n");
   1710 			return;
   1711 		}
   1712 		if (sc->sc_rev < 3)
   1713 			sc->sc_type = WM_T_82542_2_0;
   1714 	}
   1715 
   1716 	/*
   1717 	 * Disable MSI for Errata:
   1718 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1719 	 *
   1720 	 *  82544: Errata 25
   1721 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1722 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1723 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1724 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1725 	 *
   1726 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1727 	 *
   1728 	 *  82571 & 82572: Errata 63
   1729 	 */
   1730 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1731 	    || (sc->sc_type == WM_T_82572))
   1732 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1733 
   1734 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1735 	    || (sc->sc_type == WM_T_82580)
   1736 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1737 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1738 		sc->sc_flags |= WM_F_NEWQUEUE;
   1739 
   1740 	/* Set device properties (mactype) */
   1741 	dict = device_properties(sc->sc_dev);
   1742 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1743 
   1744 	/*
   1745 	 * Map the device.  All devices support memory-mapped acccess,
   1746 	 * and it is really required for normal operation.
   1747 	 */
   1748 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1749 	switch (memtype) {
   1750 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1751 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1752 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1753 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1754 		break;
   1755 	default:
   1756 		memh_valid = 0;
   1757 		break;
   1758 	}
   1759 
   1760 	if (memh_valid) {
   1761 		sc->sc_st = memt;
   1762 		sc->sc_sh = memh;
   1763 		sc->sc_ss = memsize;
   1764 	} else {
   1765 		aprint_error_dev(sc->sc_dev,
   1766 		    "unable to map device registers\n");
   1767 		return;
   1768 	}
   1769 
   1770 	/*
   1771 	 * In addition, i82544 and later support I/O mapped indirect
   1772 	 * register access.  It is not desirable (nor supported in
   1773 	 * this driver) to use it for normal operation, though it is
   1774 	 * required to work around bugs in some chip versions.
   1775 	 */
   1776 	if (sc->sc_type >= WM_T_82544) {
   1777 		/* First we have to find the I/O BAR. */
   1778 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1779 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1780 			if (memtype == PCI_MAPREG_TYPE_IO)
   1781 				break;
   1782 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1783 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1784 				i += 4;	/* skip high bits, too */
   1785 		}
   1786 		if (i < PCI_MAPREG_END) {
   1787 			/*
   1788 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1789 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1790 			 * It's no problem because newer chips has no this
   1791 			 * bug.
   1792 			 *
   1793 			 * The i8254x doesn't apparently respond when the
   1794 			 * I/O BAR is 0, which looks somewhat like it's not
   1795 			 * been configured.
   1796 			 */
   1797 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1798 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1799 				aprint_error_dev(sc->sc_dev,
   1800 				    "WARNING: I/O BAR at zero.\n");
   1801 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1802 					0, &sc->sc_iot, &sc->sc_ioh,
   1803 					NULL, &sc->sc_ios) == 0) {
   1804 				sc->sc_flags |= WM_F_IOH_VALID;
   1805 			} else {
   1806 				aprint_error_dev(sc->sc_dev,
   1807 				    "WARNING: unable to map I/O space\n");
   1808 			}
   1809 		}
   1810 
   1811 	}
   1812 
   1813 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1814 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1815 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1816 	if (sc->sc_type < WM_T_82542_2_1)
   1817 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1818 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1819 
   1820 	/* power up chip */
   1821 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1822 	    NULL)) && error != EOPNOTSUPP) {
   1823 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1824 		return;
   1825 	}
   1826 
   1827 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1828 
   1829 	/* Allocation settings */
   1830 	max_type = PCI_INTR_TYPE_MSIX;
   1831 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1832 	counts[PCI_INTR_TYPE_MSI] = 1;
   1833 	counts[PCI_INTR_TYPE_INTX] = 1;
   1834 
   1835 alloc_retry:
   1836 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1837 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1838 		return;
   1839 	}
   1840 
   1841 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1842 		error = wm_setup_msix(sc);
   1843 		if (error) {
   1844 			pci_intr_release(pc, sc->sc_intrs,
   1845 			    counts[PCI_INTR_TYPE_MSIX]);
   1846 
   1847 			/* Setup for MSI: Disable MSI-X */
   1848 			max_type = PCI_INTR_TYPE_MSI;
   1849 			counts[PCI_INTR_TYPE_MSI] = 1;
   1850 			counts[PCI_INTR_TYPE_INTX] = 1;
   1851 			goto alloc_retry;
   1852 		}
   1853 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1854 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1855 		error = wm_setup_legacy(sc);
   1856 		if (error) {
   1857 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1858 			    counts[PCI_INTR_TYPE_MSI]);
   1859 
   1860 			/* The next try is for INTx: Disable MSI */
   1861 			max_type = PCI_INTR_TYPE_INTX;
   1862 			counts[PCI_INTR_TYPE_INTX] = 1;
   1863 			goto alloc_retry;
   1864 		}
   1865 	} else {
   1866 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1867 		error = wm_setup_legacy(sc);
   1868 		if (error) {
   1869 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1870 			    counts[PCI_INTR_TYPE_INTX]);
   1871 			return;
   1872 		}
   1873 	}
   1874 
   1875 	/*
   1876 	 * Check the function ID (unit number of the chip).
   1877 	 */
   1878 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1879 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1880 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1881 	    || (sc->sc_type == WM_T_82580)
   1882 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1883 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1884 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1885 	else
   1886 		sc->sc_funcid = 0;
   1887 
   1888 	/*
   1889 	 * Determine a few things about the bus we're connected to.
   1890 	 */
   1891 	if (sc->sc_type < WM_T_82543) {
   1892 		/* We don't really know the bus characteristics here. */
   1893 		sc->sc_bus_speed = 33;
   1894 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1895 		/*
   1896 		 * CSA (Communication Streaming Architecture) is about as fast
   1897 		 * a 32-bit 66MHz PCI Bus.
   1898 		 */
   1899 		sc->sc_flags |= WM_F_CSA;
   1900 		sc->sc_bus_speed = 66;
   1901 		aprint_verbose_dev(sc->sc_dev,
   1902 		    "Communication Streaming Architecture\n");
   1903 		if (sc->sc_type == WM_T_82547) {
   1904 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1905 			callout_setfunc(&sc->sc_txfifo_ch,
   1906 					wm_82547_txfifo_stall, sc);
   1907 			aprint_verbose_dev(sc->sc_dev,
   1908 			    "using 82547 Tx FIFO stall work-around\n");
   1909 		}
   1910 	} else if (sc->sc_type >= WM_T_82571) {
   1911 		sc->sc_flags |= WM_F_PCIE;
   1912 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1913 		    && (sc->sc_type != WM_T_ICH10)
   1914 		    && (sc->sc_type != WM_T_PCH)
   1915 		    && (sc->sc_type != WM_T_PCH2)
   1916 		    && (sc->sc_type != WM_T_PCH_LPT)
   1917 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1918 			/* ICH* and PCH* have no PCIe capability registers */
   1919 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1920 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1921 				NULL) == 0)
   1922 				aprint_error_dev(sc->sc_dev,
   1923 				    "unable to find PCIe capability\n");
   1924 		}
   1925 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1926 	} else {
   1927 		reg = CSR_READ(sc, WMREG_STATUS);
   1928 		if (reg & STATUS_BUS64)
   1929 			sc->sc_flags |= WM_F_BUS64;
   1930 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1931 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1932 
   1933 			sc->sc_flags |= WM_F_PCIX;
   1934 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1935 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1936 				aprint_error_dev(sc->sc_dev,
   1937 				    "unable to find PCIX capability\n");
   1938 			else if (sc->sc_type != WM_T_82545_3 &&
   1939 				 sc->sc_type != WM_T_82546_3) {
   1940 				/*
   1941 				 * Work around a problem caused by the BIOS
   1942 				 * setting the max memory read byte count
   1943 				 * incorrectly.
   1944 				 */
   1945 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1946 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1947 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1948 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1949 
   1950 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1951 				    PCIX_CMD_BYTECNT_SHIFT;
   1952 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1953 				    PCIX_STATUS_MAXB_SHIFT;
   1954 				if (bytecnt > maxb) {
   1955 					aprint_verbose_dev(sc->sc_dev,
   1956 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1957 					    512 << bytecnt, 512 << maxb);
   1958 					pcix_cmd = (pcix_cmd &
   1959 					    ~PCIX_CMD_BYTECNT_MASK) |
   1960 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1961 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1962 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1963 					    pcix_cmd);
   1964 				}
   1965 			}
   1966 		}
   1967 		/*
   1968 		 * The quad port adapter is special; it has a PCIX-PCIX
   1969 		 * bridge on the board, and can run the secondary bus at
   1970 		 * a higher speed.
   1971 		 */
   1972 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1973 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1974 								      : 66;
   1975 		} else if (sc->sc_flags & WM_F_PCIX) {
   1976 			switch (reg & STATUS_PCIXSPD_MASK) {
   1977 			case STATUS_PCIXSPD_50_66:
   1978 				sc->sc_bus_speed = 66;
   1979 				break;
   1980 			case STATUS_PCIXSPD_66_100:
   1981 				sc->sc_bus_speed = 100;
   1982 				break;
   1983 			case STATUS_PCIXSPD_100_133:
   1984 				sc->sc_bus_speed = 133;
   1985 				break;
   1986 			default:
   1987 				aprint_error_dev(sc->sc_dev,
   1988 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1989 				    reg & STATUS_PCIXSPD_MASK);
   1990 				sc->sc_bus_speed = 66;
   1991 				break;
   1992 			}
   1993 		} else
   1994 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1995 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1996 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1997 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1998 	}
   1999 
   2000 	/* clear interesting stat counters */
   2001 	CSR_READ(sc, WMREG_COLC);
   2002 	CSR_READ(sc, WMREG_RXERRC);
   2003 
   2004 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2005 	    || (sc->sc_type >= WM_T_ICH8))
   2006 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2007 	if (sc->sc_type >= WM_T_ICH8)
   2008 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2009 
   2010 	/* Set PHY, NVM mutex related stuff */
   2011 	switch (sc->sc_type) {
   2012 	case WM_T_82542_2_0:
   2013 	case WM_T_82542_2_1:
   2014 	case WM_T_82543:
   2015 	case WM_T_82544:
   2016 		/* Microwire */
   2017 		sc->sc_nvm_wordsize = 64;
   2018 		sc->sc_nvm_addrbits = 6;
   2019 		break;
   2020 	case WM_T_82540:
   2021 	case WM_T_82545:
   2022 	case WM_T_82545_3:
   2023 	case WM_T_82546:
   2024 	case WM_T_82546_3:
   2025 		/* Microwire */
   2026 		reg = CSR_READ(sc, WMREG_EECD);
   2027 		if (reg & EECD_EE_SIZE) {
   2028 			sc->sc_nvm_wordsize = 256;
   2029 			sc->sc_nvm_addrbits = 8;
   2030 		} else {
   2031 			sc->sc_nvm_wordsize = 64;
   2032 			sc->sc_nvm_addrbits = 6;
   2033 		}
   2034 		sc->sc_flags |= WM_F_LOCK_EECD;
   2035 		break;
   2036 	case WM_T_82541:
   2037 	case WM_T_82541_2:
   2038 	case WM_T_82547:
   2039 	case WM_T_82547_2:
   2040 		sc->sc_flags |= WM_F_LOCK_EECD;
   2041 		reg = CSR_READ(sc, WMREG_EECD);
   2042 		if (reg & EECD_EE_TYPE) {
   2043 			/* SPI */
   2044 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2045 			wm_nvm_set_addrbits_size_eecd(sc);
   2046 		} else {
   2047 			/* Microwire */
   2048 			if ((reg & EECD_EE_ABITS) != 0) {
   2049 				sc->sc_nvm_wordsize = 256;
   2050 				sc->sc_nvm_addrbits = 8;
   2051 			} else {
   2052 				sc->sc_nvm_wordsize = 64;
   2053 				sc->sc_nvm_addrbits = 6;
   2054 			}
   2055 		}
   2056 		break;
   2057 	case WM_T_82571:
   2058 	case WM_T_82572:
   2059 		/* SPI */
   2060 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2061 		wm_nvm_set_addrbits_size_eecd(sc);
   2062 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2063 		sc->phy.acquire = wm_get_swsm_semaphore;
   2064 		sc->phy.release = wm_put_swsm_semaphore;
   2065 		break;
   2066 	case WM_T_82573:
   2067 	case WM_T_82574:
   2068 	case WM_T_82583:
   2069 		if (sc->sc_type == WM_T_82573) {
   2070 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2071 			sc->phy.acquire = wm_get_swsm_semaphore;
   2072 			sc->phy.release = wm_put_swsm_semaphore;
   2073 		} else {
   2074 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2075 			/* Both PHY and NVM use the same semaphore. */
   2076 			sc->phy.acquire
   2077 			    = wm_get_swfwhw_semaphore;
   2078 			sc->phy.release
   2079 			    = wm_put_swfwhw_semaphore;
   2080 		}
   2081 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2082 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2083 			sc->sc_nvm_wordsize = 2048;
   2084 		} else {
   2085 			/* SPI */
   2086 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2087 			wm_nvm_set_addrbits_size_eecd(sc);
   2088 		}
   2089 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2090 		break;
   2091 	case WM_T_82575:
   2092 	case WM_T_82576:
   2093 	case WM_T_82580:
   2094 	case WM_T_I350:
   2095 	case WM_T_I354:
   2096 	case WM_T_80003:
   2097 		/* SPI */
   2098 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2099 		wm_nvm_set_addrbits_size_eecd(sc);
   2100 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2101 		    | WM_F_LOCK_SWSM;
   2102 		sc->phy.acquire = wm_get_phy_82575;
   2103 		sc->phy.release = wm_put_phy_82575;
   2104 		break;
   2105 	case WM_T_ICH8:
   2106 	case WM_T_ICH9:
   2107 	case WM_T_ICH10:
   2108 	case WM_T_PCH:
   2109 	case WM_T_PCH2:
   2110 	case WM_T_PCH_LPT:
   2111 		/* FLASH */
   2112 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2113 		sc->sc_nvm_wordsize = 2048;
   2114 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2115 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2116 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2117 			aprint_error_dev(sc->sc_dev,
   2118 			    "can't map FLASH registers\n");
   2119 			goto out;
   2120 		}
   2121 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2122 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2123 		    ICH_FLASH_SECTOR_SIZE;
   2124 		sc->sc_ich8_flash_bank_size =
   2125 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2126 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2127 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2128 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2129 		sc->sc_flashreg_offset = 0;
   2130 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2131 		sc->phy.release = wm_put_swflag_ich8lan;
   2132 		break;
   2133 	case WM_T_PCH_SPT:
   2134 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2135 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2136 		sc->sc_flasht = sc->sc_st;
   2137 		sc->sc_flashh = sc->sc_sh;
   2138 		sc->sc_ich8_flash_base = 0;
   2139 		sc->sc_nvm_wordsize =
   2140 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2141 			* NVM_SIZE_MULTIPLIER;
   2142 		/* It is size in bytes, we want words */
   2143 		sc->sc_nvm_wordsize /= 2;
   2144 		/* assume 2 banks */
   2145 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2146 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2147 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2148 		sc->phy.release = wm_put_swflag_ich8lan;
   2149 		break;
   2150 	case WM_T_I210:
   2151 	case WM_T_I211:
   2152 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2153 			wm_nvm_set_addrbits_size_eecd(sc);
   2154 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2155 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2156 		} else {
   2157 			sc->sc_nvm_wordsize = INVM_SIZE;
   2158 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2159 		}
   2160 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2161 		sc->phy.acquire = wm_get_phy_82575;
   2162 		sc->phy.release = wm_put_phy_82575;
   2163 		break;
   2164 	default:
   2165 		break;
   2166 	}
   2167 
   2168 	/* Reset the chip to a known state. */
   2169 	wm_reset(sc);
   2170 
   2171 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2172 	switch (sc->sc_type) {
   2173 	case WM_T_82571:
   2174 	case WM_T_82572:
   2175 		reg = CSR_READ(sc, WMREG_SWSM2);
   2176 		if ((reg & SWSM2_LOCK) == 0) {
   2177 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2178 			force_clear_smbi = true;
   2179 		} else
   2180 			force_clear_smbi = false;
   2181 		break;
   2182 	case WM_T_82573:
   2183 	case WM_T_82574:
   2184 	case WM_T_82583:
   2185 		force_clear_smbi = true;
   2186 		break;
   2187 	default:
   2188 		force_clear_smbi = false;
   2189 		break;
   2190 	}
   2191 	if (force_clear_smbi) {
   2192 		reg = CSR_READ(sc, WMREG_SWSM);
   2193 		if ((reg & SWSM_SMBI) != 0)
   2194 			aprint_error_dev(sc->sc_dev,
   2195 			    "Please update the Bootagent\n");
   2196 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2197 	}
   2198 
   2199 	/*
   2200 	 * Defer printing the EEPROM type until after verifying the checksum
   2201 	 * This allows the EEPROM type to be printed correctly in the case
   2202 	 * that no EEPROM is attached.
   2203 	 */
   2204 	/*
   2205 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2206 	 * this for later, so we can fail future reads from the EEPROM.
   2207 	 */
   2208 	if (wm_nvm_validate_checksum(sc)) {
   2209 		/*
   2210 		 * Read twice again because some PCI-e parts fail the
   2211 		 * first check due to the link being in sleep state.
   2212 		 */
   2213 		if (wm_nvm_validate_checksum(sc))
   2214 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2215 	}
   2216 
   2217 	/* Set device properties (macflags) */
   2218 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2219 
   2220 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2221 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2222 	else {
   2223 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2224 		    sc->sc_nvm_wordsize);
   2225 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2226 			aprint_verbose("iNVM");
   2227 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2228 			aprint_verbose("FLASH(HW)");
   2229 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2230 			aprint_verbose("FLASH");
   2231 		else {
   2232 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2233 				eetype = "SPI";
   2234 			else
   2235 				eetype = "MicroWire";
   2236 			aprint_verbose("(%d address bits) %s EEPROM",
   2237 			    sc->sc_nvm_addrbits, eetype);
   2238 		}
   2239 	}
   2240 	wm_nvm_version(sc);
   2241 	aprint_verbose("\n");
   2242 
   2243 	/* Check for I21[01] PLL workaround */
   2244 	if (sc->sc_type == WM_T_I210)
   2245 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2246 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2247 		/* NVM image release 3.25 has a workaround */
   2248 		if ((sc->sc_nvm_ver_major < 3)
   2249 		    || ((sc->sc_nvm_ver_major == 3)
   2250 			&& (sc->sc_nvm_ver_minor < 25))) {
   2251 			aprint_verbose_dev(sc->sc_dev,
   2252 			    "ROM image version %d.%d is older than 3.25\n",
   2253 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2254 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2255 		}
   2256 	}
   2257 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2258 		wm_pll_workaround_i210(sc);
   2259 
   2260 	wm_get_wakeup(sc);
   2261 
   2262 	/* Non-AMT based hardware can now take control from firmware */
   2263 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2264 		wm_get_hw_control(sc);
   2265 
   2266 	/*
   2267 	 * Read the Ethernet address from the EEPROM, if not first found
   2268 	 * in device properties.
   2269 	 */
   2270 	ea = prop_dictionary_get(dict, "mac-address");
   2271 	if (ea != NULL) {
   2272 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2273 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2274 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2275 	} else {
   2276 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2277 			aprint_error_dev(sc->sc_dev,
   2278 			    "unable to read Ethernet address\n");
   2279 			goto out;
   2280 		}
   2281 	}
   2282 
   2283 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2284 	    ether_sprintf(enaddr));
   2285 
   2286 	/*
   2287 	 * Read the config info from the EEPROM, and set up various
   2288 	 * bits in the control registers based on their contents.
   2289 	 */
   2290 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2291 	if (pn != NULL) {
   2292 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2293 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2294 	} else {
   2295 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2296 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2297 			goto out;
   2298 		}
   2299 	}
   2300 
   2301 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2302 	if (pn != NULL) {
   2303 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2304 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2305 	} else {
   2306 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2307 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2308 			goto out;
   2309 		}
   2310 	}
   2311 
   2312 	/* check for WM_F_WOL */
   2313 	switch (sc->sc_type) {
   2314 	case WM_T_82542_2_0:
   2315 	case WM_T_82542_2_1:
   2316 	case WM_T_82543:
   2317 		/* dummy? */
   2318 		eeprom_data = 0;
   2319 		apme_mask = NVM_CFG3_APME;
   2320 		break;
   2321 	case WM_T_82544:
   2322 		apme_mask = NVM_CFG2_82544_APM_EN;
   2323 		eeprom_data = cfg2;
   2324 		break;
   2325 	case WM_T_82546:
   2326 	case WM_T_82546_3:
   2327 	case WM_T_82571:
   2328 	case WM_T_82572:
   2329 	case WM_T_82573:
   2330 	case WM_T_82574:
   2331 	case WM_T_82583:
   2332 	case WM_T_80003:
   2333 	default:
   2334 		apme_mask = NVM_CFG3_APME;
   2335 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2336 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2337 		break;
   2338 	case WM_T_82575:
   2339 	case WM_T_82576:
   2340 	case WM_T_82580:
   2341 	case WM_T_I350:
   2342 	case WM_T_I354: /* XXX ok? */
   2343 	case WM_T_ICH8:
   2344 	case WM_T_ICH9:
   2345 	case WM_T_ICH10:
   2346 	case WM_T_PCH:
   2347 	case WM_T_PCH2:
   2348 	case WM_T_PCH_LPT:
   2349 	case WM_T_PCH_SPT:
   2350 		/* XXX The funcid should be checked on some devices */
   2351 		apme_mask = WUC_APME;
   2352 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2353 		break;
   2354 	}
   2355 
   2356 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2357 	if ((eeprom_data & apme_mask) != 0)
   2358 		sc->sc_flags |= WM_F_WOL;
   2359 #ifdef WM_DEBUG
   2360 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2361 		printf("WOL\n");
   2362 #endif
   2363 
   2364 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2365 		/* Check NVM for autonegotiation */
   2366 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2367 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2368 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2369 		}
   2370 	}
   2371 
   2372 	/*
   2373 	 * XXX need special handling for some multiple port cards
   2374 	 * to disable a paticular port.
   2375 	 */
   2376 
   2377 	if (sc->sc_type >= WM_T_82544) {
   2378 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2379 		if (pn != NULL) {
   2380 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2381 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2382 		} else {
   2383 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2384 				aprint_error_dev(sc->sc_dev,
   2385 				    "unable to read SWDPIN\n");
   2386 				goto out;
   2387 			}
   2388 		}
   2389 	}
   2390 
   2391 	if (cfg1 & NVM_CFG1_ILOS)
   2392 		sc->sc_ctrl |= CTRL_ILOS;
   2393 
   2394 	/*
   2395 	 * XXX
   2396 	 * This code isn't correct because pin 2 and 3 are located
   2397 	 * in different position on newer chips. Check all datasheet.
   2398 	 *
   2399 	 * Until resolve this problem, check if a chip < 82580
   2400 	 */
   2401 	if (sc->sc_type <= WM_T_82580) {
   2402 		if (sc->sc_type >= WM_T_82544) {
   2403 			sc->sc_ctrl |=
   2404 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2405 			    CTRL_SWDPIO_SHIFT;
   2406 			sc->sc_ctrl |=
   2407 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2408 			    CTRL_SWDPINS_SHIFT;
   2409 		} else {
   2410 			sc->sc_ctrl |=
   2411 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2412 			    CTRL_SWDPIO_SHIFT;
   2413 		}
   2414 	}
   2415 
   2416 	/* XXX For other than 82580? */
   2417 	if (sc->sc_type == WM_T_82580) {
   2418 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2419 		if (nvmword & __BIT(13))
   2420 			sc->sc_ctrl |= CTRL_ILOS;
   2421 	}
   2422 
   2423 #if 0
   2424 	if (sc->sc_type >= WM_T_82544) {
   2425 		if (cfg1 & NVM_CFG1_IPS0)
   2426 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2427 		if (cfg1 & NVM_CFG1_IPS1)
   2428 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2429 		sc->sc_ctrl_ext |=
   2430 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2431 		    CTRL_EXT_SWDPIO_SHIFT;
   2432 		sc->sc_ctrl_ext |=
   2433 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2434 		    CTRL_EXT_SWDPINS_SHIFT;
   2435 	} else {
   2436 		sc->sc_ctrl_ext |=
   2437 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2438 		    CTRL_EXT_SWDPIO_SHIFT;
   2439 	}
   2440 #endif
   2441 
   2442 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2443 #if 0
   2444 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2445 #endif
   2446 
   2447 	if (sc->sc_type == WM_T_PCH) {
   2448 		uint16_t val;
   2449 
   2450 		/* Save the NVM K1 bit setting */
   2451 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2452 
   2453 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2454 			sc->sc_nvm_k1_enabled = 1;
   2455 		else
   2456 			sc->sc_nvm_k1_enabled = 0;
   2457 	}
   2458 
   2459 	/*
   2460 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2461 	 * media structures accordingly.
   2462 	 */
   2463 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2464 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2465 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2466 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2467 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2468 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2469 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2470 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2471 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2472 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2473 	    || (sc->sc_type ==WM_T_I211)) {
   2474 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2475 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2476 		switch (link_mode) {
   2477 		case CTRL_EXT_LINK_MODE_1000KX:
   2478 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2479 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2480 			break;
   2481 		case CTRL_EXT_LINK_MODE_SGMII:
   2482 			if (wm_sgmii_uses_mdio(sc)) {
   2483 				aprint_verbose_dev(sc->sc_dev,
   2484 				    "SGMII(MDIO)\n");
   2485 				sc->sc_flags |= WM_F_SGMII;
   2486 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2487 				break;
   2488 			}
   2489 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2490 			/*FALLTHROUGH*/
   2491 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2492 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2493 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2494 				if (link_mode
   2495 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2496 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2497 					sc->sc_flags |= WM_F_SGMII;
   2498 				} else {
   2499 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2500 					aprint_verbose_dev(sc->sc_dev,
   2501 					    "SERDES\n");
   2502 				}
   2503 				break;
   2504 			}
   2505 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2506 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2507 
   2508 			/* Change current link mode setting */
   2509 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2510 			switch (sc->sc_mediatype) {
   2511 			case WM_MEDIATYPE_COPPER:
   2512 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2513 				break;
   2514 			case WM_MEDIATYPE_SERDES:
   2515 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2516 				break;
   2517 			default:
   2518 				break;
   2519 			}
   2520 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2521 			break;
   2522 		case CTRL_EXT_LINK_MODE_GMII:
   2523 		default:
   2524 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2525 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2526 			break;
   2527 		}
   2528 
   2529 		reg &= ~CTRL_EXT_I2C_ENA;
   2530 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2531 			reg |= CTRL_EXT_I2C_ENA;
   2532 		else
   2533 			reg &= ~CTRL_EXT_I2C_ENA;
   2534 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2535 
   2536 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2537 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2538 		else
   2539 			wm_tbi_mediainit(sc);
   2540 	} else if (sc->sc_type < WM_T_82543 ||
   2541 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2542 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2543 			aprint_error_dev(sc->sc_dev,
   2544 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2545 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2546 		}
   2547 		wm_tbi_mediainit(sc);
   2548 	} else {
   2549 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2550 			aprint_error_dev(sc->sc_dev,
   2551 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2552 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2553 		}
   2554 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2555 	}
   2556 
   2557 	ifp = &sc->sc_ethercom.ec_if;
   2558 	xname = device_xname(sc->sc_dev);
   2559 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2560 	ifp->if_softc = sc;
   2561 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2562 #ifdef WM_MPSAFE
   2563 	ifp->if_extflags = IFEF_START_MPSAFE;
   2564 #endif
   2565 	ifp->if_ioctl = wm_ioctl;
   2566 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2567 		ifp->if_start = wm_nq_start;
   2568 		/*
   2569 		 * When the number of CPUs is one and the controller can use
   2570 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2571 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2572 		 * and the other is used for link status changing.
   2573 		 * In this situation, wm_nq_transmit() is disadvantageous
   2574 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2575 		 */
   2576 		if (wm_is_using_multiqueue(sc))
   2577 			ifp->if_transmit = wm_nq_transmit;
   2578 	} else {
   2579 		ifp->if_start = wm_start;
   2580 		/*
   2581 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2582 		 */
   2583 		if (wm_is_using_multiqueue(sc))
   2584 			ifp->if_transmit = wm_transmit;
   2585 	}
   2586 	ifp->if_watchdog = wm_watchdog;
   2587 	ifp->if_init = wm_init;
   2588 	ifp->if_stop = wm_stop;
   2589 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2590 	IFQ_SET_READY(&ifp->if_snd);
   2591 
   2592 	/* Check for jumbo frame */
   2593 	switch (sc->sc_type) {
   2594 	case WM_T_82573:
   2595 		/* XXX limited to 9234 if ASPM is disabled */
   2596 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2597 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2598 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2599 		break;
   2600 	case WM_T_82571:
   2601 	case WM_T_82572:
   2602 	case WM_T_82574:
   2603 	case WM_T_82575:
   2604 	case WM_T_82576:
   2605 	case WM_T_82580:
   2606 	case WM_T_I350:
   2607 	case WM_T_I354: /* XXXX ok? */
   2608 	case WM_T_I210:
   2609 	case WM_T_I211:
   2610 	case WM_T_80003:
   2611 	case WM_T_ICH9:
   2612 	case WM_T_ICH10:
   2613 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2614 	case WM_T_PCH_LPT:
   2615 	case WM_T_PCH_SPT:
   2616 		/* XXX limited to 9234 */
   2617 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2618 		break;
   2619 	case WM_T_PCH:
   2620 		/* XXX limited to 4096 */
   2621 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2622 		break;
   2623 	case WM_T_82542_2_0:
   2624 	case WM_T_82542_2_1:
   2625 	case WM_T_82583:
   2626 	case WM_T_ICH8:
   2627 		/* No support for jumbo frame */
   2628 		break;
   2629 	default:
   2630 		/* ETHER_MAX_LEN_JUMBO */
   2631 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2632 		break;
   2633 	}
   2634 
   2635 	/* If we're a i82543 or greater, we can support VLANs. */
   2636 	if (sc->sc_type >= WM_T_82543)
   2637 		sc->sc_ethercom.ec_capabilities |=
   2638 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2639 
   2640 	/*
   2641 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2642 	 * on i82543 and later.
   2643 	 */
   2644 	if (sc->sc_type >= WM_T_82543) {
   2645 		ifp->if_capabilities |=
   2646 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2647 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2648 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2649 		    IFCAP_CSUM_TCPv6_Tx |
   2650 		    IFCAP_CSUM_UDPv6_Tx;
   2651 	}
   2652 
   2653 	/*
   2654 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2655 	 *
   2656 	 *	82541GI (8086:1076) ... no
   2657 	 *	82572EI (8086:10b9) ... yes
   2658 	 */
   2659 	if (sc->sc_type >= WM_T_82571) {
   2660 		ifp->if_capabilities |=
   2661 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2662 	}
   2663 
   2664 	/*
   2665 	 * If we're a i82544 or greater (except i82547), we can do
   2666 	 * TCP segmentation offload.
   2667 	 */
   2668 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2669 		ifp->if_capabilities |= IFCAP_TSOv4;
   2670 	}
   2671 
   2672 	if (sc->sc_type >= WM_T_82571) {
   2673 		ifp->if_capabilities |= IFCAP_TSOv6;
   2674 	}
   2675 
   2676 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2677 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2678 
   2679 #ifdef WM_MPSAFE
   2680 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2681 #else
   2682 	sc->sc_core_lock = NULL;
   2683 #endif
   2684 
   2685 	/* Attach the interface. */
   2686 	if_initialize(ifp);
   2687 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2688 	ether_ifattach(ifp, enaddr);
   2689 	if_register(ifp);
   2690 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2691 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2692 			  RND_FLAG_DEFAULT);
   2693 
   2694 #ifdef WM_EVENT_COUNTERS
   2695 	/* Attach event counters. */
   2696 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2697 	    NULL, xname, "linkintr");
   2698 
   2699 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2700 	    NULL, xname, "tx_xoff");
   2701 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2702 	    NULL, xname, "tx_xon");
   2703 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2704 	    NULL, xname, "rx_xoff");
   2705 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2706 	    NULL, xname, "rx_xon");
   2707 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2708 	    NULL, xname, "rx_macctl");
   2709 #endif /* WM_EVENT_COUNTERS */
   2710 
   2711 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2712 		pmf_class_network_register(self, ifp);
   2713 	else
   2714 		aprint_error_dev(self, "couldn't establish power handler\n");
   2715 
   2716 	sc->sc_flags |= WM_F_ATTACHED;
   2717  out:
   2718 	return;
   2719 }
   2720 
   2721 /* The detach function (ca_detach) */
   2722 static int
   2723 wm_detach(device_t self, int flags __unused)
   2724 {
   2725 	struct wm_softc *sc = device_private(self);
   2726 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2727 	int i;
   2728 
   2729 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2730 		return 0;
   2731 
   2732 	/* Stop the interface. Callouts are stopped in it. */
   2733 	wm_stop(ifp, 1);
   2734 
   2735 	pmf_device_deregister(self);
   2736 
   2737 #ifdef WM_EVENT_COUNTERS
   2738 	evcnt_detach(&sc->sc_ev_linkintr);
   2739 
   2740 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2741 	evcnt_detach(&sc->sc_ev_tx_xon);
   2742 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2743 	evcnt_detach(&sc->sc_ev_rx_xon);
   2744 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2745 #endif /* WM_EVENT_COUNTERS */
   2746 
   2747 	/* Tell the firmware about the release */
   2748 	WM_CORE_LOCK(sc);
   2749 	wm_release_manageability(sc);
   2750 	wm_release_hw_control(sc);
   2751 	wm_enable_wakeup(sc);
   2752 	WM_CORE_UNLOCK(sc);
   2753 
   2754 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2755 
   2756 	/* Delete all remaining media. */
   2757 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2758 
   2759 	ether_ifdetach(ifp);
   2760 	if_detach(ifp);
   2761 	if_percpuq_destroy(sc->sc_ipq);
   2762 
   2763 	/* Unload RX dmamaps and free mbufs */
   2764 	for (i = 0; i < sc->sc_nqueues; i++) {
   2765 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2766 		mutex_enter(rxq->rxq_lock);
   2767 		wm_rxdrain(rxq);
   2768 		mutex_exit(rxq->rxq_lock);
   2769 	}
   2770 	/* Must unlock here */
   2771 
   2772 	/* Disestablish the interrupt handler */
   2773 	for (i = 0; i < sc->sc_nintrs; i++) {
   2774 		if (sc->sc_ihs[i] != NULL) {
   2775 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2776 			sc->sc_ihs[i] = NULL;
   2777 		}
   2778 	}
   2779 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2780 
   2781 	wm_free_txrx_queues(sc);
   2782 
   2783 	/* Unmap the registers */
   2784 	if (sc->sc_ss) {
   2785 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2786 		sc->sc_ss = 0;
   2787 	}
   2788 	if (sc->sc_ios) {
   2789 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2790 		sc->sc_ios = 0;
   2791 	}
   2792 	if (sc->sc_flashs) {
   2793 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2794 		sc->sc_flashs = 0;
   2795 	}
   2796 
   2797 	if (sc->sc_core_lock)
   2798 		mutex_obj_free(sc->sc_core_lock);
   2799 	if (sc->sc_ich_phymtx)
   2800 		mutex_obj_free(sc->sc_ich_phymtx);
   2801 	if (sc->sc_ich_nvmmtx)
   2802 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2803 
   2804 	return 0;
   2805 }
   2806 
   2807 static bool
   2808 wm_suspend(device_t self, const pmf_qual_t *qual)
   2809 {
   2810 	struct wm_softc *sc = device_private(self);
   2811 
   2812 	wm_release_manageability(sc);
   2813 	wm_release_hw_control(sc);
   2814 	wm_enable_wakeup(sc);
   2815 
   2816 	return true;
   2817 }
   2818 
   2819 static bool
   2820 wm_resume(device_t self, const pmf_qual_t *qual)
   2821 {
   2822 	struct wm_softc *sc = device_private(self);
   2823 
   2824 	wm_init_manageability(sc);
   2825 
   2826 	return true;
   2827 }
   2828 
   2829 /*
   2830  * wm_watchdog:		[ifnet interface function]
   2831  *
   2832  *	Watchdog timer handler.
   2833  */
   2834 static void
   2835 wm_watchdog(struct ifnet *ifp)
   2836 {
   2837 	int qid;
   2838 	struct wm_softc *sc = ifp->if_softc;
   2839 
   2840 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2841 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2842 
   2843 		wm_watchdog_txq(ifp, txq);
   2844 	}
   2845 
   2846 	/* Reset the interface. */
   2847 	(void) wm_init(ifp);
   2848 
   2849 	/*
   2850 	 * There are still some upper layer processing which call
   2851 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2852 	 */
   2853 	/* Try to get more packets going. */
   2854 	ifp->if_start(ifp);
   2855 }
   2856 
   2857 static void
   2858 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2859 {
   2860 	struct wm_softc *sc = ifp->if_softc;
   2861 
   2862 	/*
   2863 	 * Since we're using delayed interrupts, sweep up
   2864 	 * before we report an error.
   2865 	 */
   2866 	mutex_enter(txq->txq_lock);
   2867 	wm_txeof(sc, txq);
   2868 	mutex_exit(txq->txq_lock);
   2869 
   2870 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2871 #ifdef WM_DEBUG
   2872 		int i, j;
   2873 		struct wm_txsoft *txs;
   2874 #endif
   2875 		log(LOG_ERR,
   2876 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2877 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2878 		    txq->txq_next);
   2879 		ifp->if_oerrors++;
   2880 #ifdef WM_DEBUG
   2881 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2882 		    i = WM_NEXTTXS(txq, i)) {
   2883 		    txs = &txq->txq_soft[i];
   2884 		    printf("txs %d tx %d -> %d\n",
   2885 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2886 		    for (j = txs->txs_firstdesc; ;
   2887 			j = WM_NEXTTX(txq, j)) {
   2888 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2889 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2890 			printf("\t %#08x%08x\n",
   2891 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2892 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2893 			if (j == txs->txs_lastdesc)
   2894 				break;
   2895 			}
   2896 		}
   2897 #endif
   2898 	}
   2899 }
   2900 
   2901 /*
   2902  * wm_tick:
   2903  *
   2904  *	One second timer, used to check link status, sweep up
   2905  *	completed transmit jobs, etc.
   2906  */
   2907 static void
   2908 wm_tick(void *arg)
   2909 {
   2910 	struct wm_softc *sc = arg;
   2911 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2912 #ifndef WM_MPSAFE
   2913 	int s = splnet();
   2914 #endif
   2915 
   2916 	WM_CORE_LOCK(sc);
   2917 
   2918 	if (sc->sc_core_stopping)
   2919 		goto out;
   2920 
   2921 	if (sc->sc_type >= WM_T_82542_2_1) {
   2922 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2923 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2924 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2925 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2926 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2927 	}
   2928 
   2929 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2930 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   2931 	    + CSR_READ(sc, WMREG_CRCERRS)
   2932 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2933 	    + CSR_READ(sc, WMREG_SYMERRC)
   2934 	    + CSR_READ(sc, WMREG_RXERRC)
   2935 	    + CSR_READ(sc, WMREG_SEC)
   2936 	    + CSR_READ(sc, WMREG_CEXTERR)
   2937 	    + CSR_READ(sc, WMREG_RLEC);
   2938 	/*
   2939 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2940 	 * memory. It does not mean the number of dropped packet. Because
   2941 	 * ethernet controller can receive packets in such case if there is
   2942 	 * space in phy's FIFO.
   2943 	 *
   2944 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2945 	 * own EVCNT instead of if_iqdrops.
   2946 	 */
   2947 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2948 
   2949 	if (sc->sc_flags & WM_F_HAS_MII)
   2950 		mii_tick(&sc->sc_mii);
   2951 	else if ((sc->sc_type >= WM_T_82575)
   2952 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2953 		wm_serdes_tick(sc);
   2954 	else
   2955 		wm_tbi_tick(sc);
   2956 
   2957 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2958 out:
   2959 	WM_CORE_UNLOCK(sc);
   2960 #ifndef WM_MPSAFE
   2961 	splx(s);
   2962 #endif
   2963 }
   2964 
   2965 static int
   2966 wm_ifflags_cb(struct ethercom *ec)
   2967 {
   2968 	struct ifnet *ifp = &ec->ec_if;
   2969 	struct wm_softc *sc = ifp->if_softc;
   2970 	int rc = 0;
   2971 
   2972 	WM_CORE_LOCK(sc);
   2973 
   2974 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2975 	sc->sc_if_flags = ifp->if_flags;
   2976 
   2977 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2978 		rc = ENETRESET;
   2979 		goto out;
   2980 	}
   2981 
   2982 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2983 		wm_set_filter(sc);
   2984 
   2985 	wm_set_vlan(sc);
   2986 
   2987 out:
   2988 	WM_CORE_UNLOCK(sc);
   2989 
   2990 	return rc;
   2991 }
   2992 
   2993 /*
   2994  * wm_ioctl:		[ifnet interface function]
   2995  *
   2996  *	Handle control requests from the operator.
   2997  */
   2998 static int
   2999 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3000 {
   3001 	struct wm_softc *sc = ifp->if_softc;
   3002 	struct ifreq *ifr = (struct ifreq *) data;
   3003 	struct ifaddr *ifa = (struct ifaddr *)data;
   3004 	struct sockaddr_dl *sdl;
   3005 	int s, error;
   3006 
   3007 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3008 		device_xname(sc->sc_dev), __func__));
   3009 
   3010 #ifndef WM_MPSAFE
   3011 	s = splnet();
   3012 #endif
   3013 	switch (cmd) {
   3014 	case SIOCSIFMEDIA:
   3015 	case SIOCGIFMEDIA:
   3016 		WM_CORE_LOCK(sc);
   3017 		/* Flow control requires full-duplex mode. */
   3018 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3019 		    (ifr->ifr_media & IFM_FDX) == 0)
   3020 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3021 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3022 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3023 				/* We can do both TXPAUSE and RXPAUSE. */
   3024 				ifr->ifr_media |=
   3025 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3026 			}
   3027 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3028 		}
   3029 		WM_CORE_UNLOCK(sc);
   3030 #ifdef WM_MPSAFE
   3031 		s = splnet();
   3032 #endif
   3033 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3034 #ifdef WM_MPSAFE
   3035 		splx(s);
   3036 #endif
   3037 		break;
   3038 	case SIOCINITIFADDR:
   3039 		WM_CORE_LOCK(sc);
   3040 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3041 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3042 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3043 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3044 			/* unicast address is first multicast entry */
   3045 			wm_set_filter(sc);
   3046 			error = 0;
   3047 			WM_CORE_UNLOCK(sc);
   3048 			break;
   3049 		}
   3050 		WM_CORE_UNLOCK(sc);
   3051 		/*FALLTHROUGH*/
   3052 	default:
   3053 #ifdef WM_MPSAFE
   3054 		s = splnet();
   3055 #endif
   3056 		/* It may call wm_start, so unlock here */
   3057 		error = ether_ioctl(ifp, cmd, data);
   3058 #ifdef WM_MPSAFE
   3059 		splx(s);
   3060 #endif
   3061 		if (error != ENETRESET)
   3062 			break;
   3063 
   3064 		error = 0;
   3065 
   3066 		if (cmd == SIOCSIFCAP) {
   3067 			error = (*ifp->if_init)(ifp);
   3068 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3069 			;
   3070 		else if (ifp->if_flags & IFF_RUNNING) {
   3071 			/*
   3072 			 * Multicast list has changed; set the hardware filter
   3073 			 * accordingly.
   3074 			 */
   3075 			WM_CORE_LOCK(sc);
   3076 			wm_set_filter(sc);
   3077 			WM_CORE_UNLOCK(sc);
   3078 		}
   3079 		break;
   3080 	}
   3081 
   3082 #ifndef WM_MPSAFE
   3083 	splx(s);
   3084 #endif
   3085 	return error;
   3086 }
   3087 
   3088 /* MAC address related */
   3089 
   3090 /*
   3091  * Get the offset of MAC address and return it.
   3092  * If error occured, use offset 0.
   3093  */
   3094 static uint16_t
   3095 wm_check_alt_mac_addr(struct wm_softc *sc)
   3096 {
   3097 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3098 	uint16_t offset = NVM_OFF_MACADDR;
   3099 
   3100 	/* Try to read alternative MAC address pointer */
   3101 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3102 		return 0;
   3103 
   3104 	/* Check pointer if it's valid or not. */
   3105 	if ((offset == 0x0000) || (offset == 0xffff))
   3106 		return 0;
   3107 
   3108 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3109 	/*
   3110 	 * Check whether alternative MAC address is valid or not.
   3111 	 * Some cards have non 0xffff pointer but those don't use
   3112 	 * alternative MAC address in reality.
   3113 	 *
   3114 	 * Check whether the broadcast bit is set or not.
   3115 	 */
   3116 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3117 		if (((myea[0] & 0xff) & 0x01) == 0)
   3118 			return offset; /* Found */
   3119 
   3120 	/* Not found */
   3121 	return 0;
   3122 }
   3123 
   3124 static int
   3125 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3126 {
   3127 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3128 	uint16_t offset = NVM_OFF_MACADDR;
   3129 	int do_invert = 0;
   3130 
   3131 	switch (sc->sc_type) {
   3132 	case WM_T_82580:
   3133 	case WM_T_I350:
   3134 	case WM_T_I354:
   3135 		/* EEPROM Top Level Partitioning */
   3136 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3137 		break;
   3138 	case WM_T_82571:
   3139 	case WM_T_82575:
   3140 	case WM_T_82576:
   3141 	case WM_T_80003:
   3142 	case WM_T_I210:
   3143 	case WM_T_I211:
   3144 		offset = wm_check_alt_mac_addr(sc);
   3145 		if (offset == 0)
   3146 			if ((sc->sc_funcid & 0x01) == 1)
   3147 				do_invert = 1;
   3148 		break;
   3149 	default:
   3150 		if ((sc->sc_funcid & 0x01) == 1)
   3151 			do_invert = 1;
   3152 		break;
   3153 	}
   3154 
   3155 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3156 		goto bad;
   3157 
   3158 	enaddr[0] = myea[0] & 0xff;
   3159 	enaddr[1] = myea[0] >> 8;
   3160 	enaddr[2] = myea[1] & 0xff;
   3161 	enaddr[3] = myea[1] >> 8;
   3162 	enaddr[4] = myea[2] & 0xff;
   3163 	enaddr[5] = myea[2] >> 8;
   3164 
   3165 	/*
   3166 	 * Toggle the LSB of the MAC address on the second port
   3167 	 * of some dual port cards.
   3168 	 */
   3169 	if (do_invert != 0)
   3170 		enaddr[5] ^= 1;
   3171 
   3172 	return 0;
   3173 
   3174  bad:
   3175 	return -1;
   3176 }
   3177 
   3178 /*
   3179  * wm_set_ral:
   3180  *
   3181  *	Set an entery in the receive address list.
   3182  */
   3183 static void
   3184 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3185 {
   3186 	uint32_t ral_lo, ral_hi;
   3187 
   3188 	if (enaddr != NULL) {
   3189 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3190 		    (enaddr[3] << 24);
   3191 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3192 		ral_hi |= RAL_AV;
   3193 	} else {
   3194 		ral_lo = 0;
   3195 		ral_hi = 0;
   3196 	}
   3197 
   3198 	if (sc->sc_type >= WM_T_82544) {
   3199 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3200 		    ral_lo);
   3201 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3202 		    ral_hi);
   3203 	} else {
   3204 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3205 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3206 	}
   3207 }
   3208 
   3209 /*
   3210  * wm_mchash:
   3211  *
   3212  *	Compute the hash of the multicast address for the 4096-bit
   3213  *	multicast filter.
   3214  */
   3215 static uint32_t
   3216 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3217 {
   3218 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3219 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3220 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3221 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3222 	uint32_t hash;
   3223 
   3224 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3225 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3226 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3227 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3228 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3229 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3230 		return (hash & 0x3ff);
   3231 	}
   3232 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3233 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3234 
   3235 	return (hash & 0xfff);
   3236 }
   3237 
   3238 /*
   3239  * wm_set_filter:
   3240  *
   3241  *	Set up the receive filter.
   3242  */
   3243 static void
   3244 wm_set_filter(struct wm_softc *sc)
   3245 {
   3246 	struct ethercom *ec = &sc->sc_ethercom;
   3247 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3248 	struct ether_multi *enm;
   3249 	struct ether_multistep step;
   3250 	bus_addr_t mta_reg;
   3251 	uint32_t hash, reg, bit;
   3252 	int i, size, ralmax;
   3253 
   3254 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3255 		device_xname(sc->sc_dev), __func__));
   3256 
   3257 	if (sc->sc_type >= WM_T_82544)
   3258 		mta_reg = WMREG_CORDOVA_MTA;
   3259 	else
   3260 		mta_reg = WMREG_MTA;
   3261 
   3262 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3263 
   3264 	if (ifp->if_flags & IFF_BROADCAST)
   3265 		sc->sc_rctl |= RCTL_BAM;
   3266 	if (ifp->if_flags & IFF_PROMISC) {
   3267 		sc->sc_rctl |= RCTL_UPE;
   3268 		goto allmulti;
   3269 	}
   3270 
   3271 	/*
   3272 	 * Set the station address in the first RAL slot, and
   3273 	 * clear the remaining slots.
   3274 	 */
   3275 	if (sc->sc_type == WM_T_ICH8)
   3276 		size = WM_RAL_TABSIZE_ICH8 -1;
   3277 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3278 	    || (sc->sc_type == WM_T_PCH))
   3279 		size = WM_RAL_TABSIZE_ICH8;
   3280 	else if (sc->sc_type == WM_T_PCH2)
   3281 		size = WM_RAL_TABSIZE_PCH2;
   3282 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3283 		size = WM_RAL_TABSIZE_PCH_LPT;
   3284 	else if (sc->sc_type == WM_T_82575)
   3285 		size = WM_RAL_TABSIZE_82575;
   3286 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3287 		size = WM_RAL_TABSIZE_82576;
   3288 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3289 		size = WM_RAL_TABSIZE_I350;
   3290 	else
   3291 		size = WM_RAL_TABSIZE;
   3292 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3293 
   3294 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3295 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3296 		switch (i) {
   3297 		case 0:
   3298 			/* We can use all entries */
   3299 			ralmax = size;
   3300 			break;
   3301 		case 1:
   3302 			/* Only RAR[0] */
   3303 			ralmax = 1;
   3304 			break;
   3305 		default:
   3306 			/* available SHRA + RAR[0] */
   3307 			ralmax = i + 1;
   3308 		}
   3309 	} else
   3310 		ralmax = size;
   3311 	for (i = 1; i < size; i++) {
   3312 		if (i < ralmax)
   3313 			wm_set_ral(sc, NULL, i);
   3314 	}
   3315 
   3316 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3317 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3318 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3319 	    || (sc->sc_type == WM_T_PCH_SPT))
   3320 		size = WM_ICH8_MC_TABSIZE;
   3321 	else
   3322 		size = WM_MC_TABSIZE;
   3323 	/* Clear out the multicast table. */
   3324 	for (i = 0; i < size; i++)
   3325 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3326 
   3327 	ETHER_LOCK(ec);
   3328 	ETHER_FIRST_MULTI(step, ec, enm);
   3329 	while (enm != NULL) {
   3330 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3331 			ETHER_UNLOCK(ec);
   3332 			/*
   3333 			 * We must listen to a range of multicast addresses.
   3334 			 * For now, just accept all multicasts, rather than
   3335 			 * trying to set only those filter bits needed to match
   3336 			 * the range.  (At this time, the only use of address
   3337 			 * ranges is for IP multicast routing, for which the
   3338 			 * range is big enough to require all bits set.)
   3339 			 */
   3340 			goto allmulti;
   3341 		}
   3342 
   3343 		hash = wm_mchash(sc, enm->enm_addrlo);
   3344 
   3345 		reg = (hash >> 5);
   3346 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3347 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3348 		    || (sc->sc_type == WM_T_PCH2)
   3349 		    || (sc->sc_type == WM_T_PCH_LPT)
   3350 		    || (sc->sc_type == WM_T_PCH_SPT))
   3351 			reg &= 0x1f;
   3352 		else
   3353 			reg &= 0x7f;
   3354 		bit = hash & 0x1f;
   3355 
   3356 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3357 		hash |= 1U << bit;
   3358 
   3359 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3360 			/*
   3361 			 * 82544 Errata 9: Certain register cannot be written
   3362 			 * with particular alignments in PCI-X bus operation
   3363 			 * (FCAH, MTA and VFTA).
   3364 			 */
   3365 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3366 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3367 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3368 		} else
   3369 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3370 
   3371 		ETHER_NEXT_MULTI(step, enm);
   3372 	}
   3373 	ETHER_UNLOCK(ec);
   3374 
   3375 	ifp->if_flags &= ~IFF_ALLMULTI;
   3376 	goto setit;
   3377 
   3378  allmulti:
   3379 	ifp->if_flags |= IFF_ALLMULTI;
   3380 	sc->sc_rctl |= RCTL_MPE;
   3381 
   3382  setit:
   3383 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3384 }
   3385 
   3386 /* Reset and init related */
   3387 
   3388 static void
   3389 wm_set_vlan(struct wm_softc *sc)
   3390 {
   3391 
   3392 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3393 		device_xname(sc->sc_dev), __func__));
   3394 
   3395 	/* Deal with VLAN enables. */
   3396 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3397 		sc->sc_ctrl |= CTRL_VME;
   3398 	else
   3399 		sc->sc_ctrl &= ~CTRL_VME;
   3400 
   3401 	/* Write the control registers. */
   3402 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3403 }
   3404 
   3405 static void
   3406 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3407 {
   3408 	uint32_t gcr;
   3409 	pcireg_t ctrl2;
   3410 
   3411 	gcr = CSR_READ(sc, WMREG_GCR);
   3412 
   3413 	/* Only take action if timeout value is defaulted to 0 */
   3414 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3415 		goto out;
   3416 
   3417 	if ((gcr & GCR_CAP_VER2) == 0) {
   3418 		gcr |= GCR_CMPL_TMOUT_10MS;
   3419 		goto out;
   3420 	}
   3421 
   3422 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3423 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3424 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3425 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3426 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3427 
   3428 out:
   3429 	/* Disable completion timeout resend */
   3430 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3431 
   3432 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3433 }
   3434 
   3435 void
   3436 wm_get_auto_rd_done(struct wm_softc *sc)
   3437 {
   3438 	int i;
   3439 
   3440 	/* wait for eeprom to reload */
   3441 	switch (sc->sc_type) {
   3442 	case WM_T_82571:
   3443 	case WM_T_82572:
   3444 	case WM_T_82573:
   3445 	case WM_T_82574:
   3446 	case WM_T_82583:
   3447 	case WM_T_82575:
   3448 	case WM_T_82576:
   3449 	case WM_T_82580:
   3450 	case WM_T_I350:
   3451 	case WM_T_I354:
   3452 	case WM_T_I210:
   3453 	case WM_T_I211:
   3454 	case WM_T_80003:
   3455 	case WM_T_ICH8:
   3456 	case WM_T_ICH9:
   3457 		for (i = 0; i < 10; i++) {
   3458 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3459 				break;
   3460 			delay(1000);
   3461 		}
   3462 		if (i == 10) {
   3463 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3464 			    "complete\n", device_xname(sc->sc_dev));
   3465 		}
   3466 		break;
   3467 	default:
   3468 		break;
   3469 	}
   3470 }
   3471 
   3472 void
   3473 wm_lan_init_done(struct wm_softc *sc)
   3474 {
   3475 	uint32_t reg = 0;
   3476 	int i;
   3477 
   3478 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3479 		device_xname(sc->sc_dev), __func__));
   3480 
   3481 	/* Wait for eeprom to reload */
   3482 	switch (sc->sc_type) {
   3483 	case WM_T_ICH10:
   3484 	case WM_T_PCH:
   3485 	case WM_T_PCH2:
   3486 	case WM_T_PCH_LPT:
   3487 	case WM_T_PCH_SPT:
   3488 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3489 			reg = CSR_READ(sc, WMREG_STATUS);
   3490 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3491 				break;
   3492 			delay(100);
   3493 		}
   3494 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3495 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3496 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3497 		}
   3498 		break;
   3499 	default:
   3500 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3501 		    __func__);
   3502 		break;
   3503 	}
   3504 
   3505 	reg &= ~STATUS_LAN_INIT_DONE;
   3506 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3507 }
   3508 
   3509 void
   3510 wm_get_cfg_done(struct wm_softc *sc)
   3511 {
   3512 	int mask;
   3513 	uint32_t reg;
   3514 	int i;
   3515 
   3516 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3517 		device_xname(sc->sc_dev), __func__));
   3518 
   3519 	/* Wait for eeprom to reload */
   3520 	switch (sc->sc_type) {
   3521 	case WM_T_82542_2_0:
   3522 	case WM_T_82542_2_1:
   3523 		/* null */
   3524 		break;
   3525 	case WM_T_82543:
   3526 	case WM_T_82544:
   3527 	case WM_T_82540:
   3528 	case WM_T_82545:
   3529 	case WM_T_82545_3:
   3530 	case WM_T_82546:
   3531 	case WM_T_82546_3:
   3532 	case WM_T_82541:
   3533 	case WM_T_82541_2:
   3534 	case WM_T_82547:
   3535 	case WM_T_82547_2:
   3536 	case WM_T_82573:
   3537 	case WM_T_82574:
   3538 	case WM_T_82583:
   3539 		/* generic */
   3540 		delay(10*1000);
   3541 		break;
   3542 	case WM_T_80003:
   3543 	case WM_T_82571:
   3544 	case WM_T_82572:
   3545 	case WM_T_82575:
   3546 	case WM_T_82576:
   3547 	case WM_T_82580:
   3548 	case WM_T_I350:
   3549 	case WM_T_I354:
   3550 	case WM_T_I210:
   3551 	case WM_T_I211:
   3552 		if (sc->sc_type == WM_T_82571) {
   3553 			/* Only 82571 shares port 0 */
   3554 			mask = EEMNGCTL_CFGDONE_0;
   3555 		} else
   3556 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3557 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3558 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3559 				break;
   3560 			delay(1000);
   3561 		}
   3562 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3563 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3564 				device_xname(sc->sc_dev), __func__));
   3565 		}
   3566 		break;
   3567 	case WM_T_ICH8:
   3568 	case WM_T_ICH9:
   3569 	case WM_T_ICH10:
   3570 	case WM_T_PCH:
   3571 	case WM_T_PCH2:
   3572 	case WM_T_PCH_LPT:
   3573 	case WM_T_PCH_SPT:
   3574 		delay(10*1000);
   3575 		if (sc->sc_type >= WM_T_ICH10)
   3576 			wm_lan_init_done(sc);
   3577 		else
   3578 			wm_get_auto_rd_done(sc);
   3579 
   3580 		reg = CSR_READ(sc, WMREG_STATUS);
   3581 		if ((reg & STATUS_PHYRA) != 0)
   3582 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3583 		break;
   3584 	default:
   3585 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3586 		    __func__);
   3587 		break;
   3588 	}
   3589 }
   3590 
   3591 /* Init hardware bits */
   3592 void
   3593 wm_initialize_hardware_bits(struct wm_softc *sc)
   3594 {
   3595 	uint32_t tarc0, tarc1, reg;
   3596 
   3597 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3598 		device_xname(sc->sc_dev), __func__));
   3599 
   3600 	/* For 82571 variant, 80003 and ICHs */
   3601 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3602 	    || (sc->sc_type >= WM_T_80003)) {
   3603 
   3604 		/* Transmit Descriptor Control 0 */
   3605 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3606 		reg |= TXDCTL_COUNT_DESC;
   3607 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3608 
   3609 		/* Transmit Descriptor Control 1 */
   3610 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3611 		reg |= TXDCTL_COUNT_DESC;
   3612 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3613 
   3614 		/* TARC0 */
   3615 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3616 		switch (sc->sc_type) {
   3617 		case WM_T_82571:
   3618 		case WM_T_82572:
   3619 		case WM_T_82573:
   3620 		case WM_T_82574:
   3621 		case WM_T_82583:
   3622 		case WM_T_80003:
   3623 			/* Clear bits 30..27 */
   3624 			tarc0 &= ~__BITS(30, 27);
   3625 			break;
   3626 		default:
   3627 			break;
   3628 		}
   3629 
   3630 		switch (sc->sc_type) {
   3631 		case WM_T_82571:
   3632 		case WM_T_82572:
   3633 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3634 
   3635 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3636 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3637 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3638 			/* 8257[12] Errata No.7 */
   3639 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3640 
   3641 			/* TARC1 bit 28 */
   3642 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3643 				tarc1 &= ~__BIT(28);
   3644 			else
   3645 				tarc1 |= __BIT(28);
   3646 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3647 
   3648 			/*
   3649 			 * 8257[12] Errata No.13
   3650 			 * Disable Dyamic Clock Gating.
   3651 			 */
   3652 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3653 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3654 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3655 			break;
   3656 		case WM_T_82573:
   3657 		case WM_T_82574:
   3658 		case WM_T_82583:
   3659 			if ((sc->sc_type == WM_T_82574)
   3660 			    || (sc->sc_type == WM_T_82583))
   3661 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3662 
   3663 			/* Extended Device Control */
   3664 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3665 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3666 			reg |= __BIT(22);	/* Set bit 22 */
   3667 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3668 
   3669 			/* Device Control */
   3670 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3671 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3672 
   3673 			/* PCIe Control Register */
   3674 			/*
   3675 			 * 82573 Errata (unknown).
   3676 			 *
   3677 			 * 82574 Errata 25 and 82583 Errata 12
   3678 			 * "Dropped Rx Packets":
   3679 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3680 			 */
   3681 			reg = CSR_READ(sc, WMREG_GCR);
   3682 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3683 			CSR_WRITE(sc, WMREG_GCR, reg);
   3684 
   3685 			if ((sc->sc_type == WM_T_82574)
   3686 			    || (sc->sc_type == WM_T_82583)) {
   3687 				/*
   3688 				 * Document says this bit must be set for
   3689 				 * proper operation.
   3690 				 */
   3691 				reg = CSR_READ(sc, WMREG_GCR);
   3692 				reg |= __BIT(22);
   3693 				CSR_WRITE(sc, WMREG_GCR, reg);
   3694 
   3695 				/*
   3696 				 * Apply workaround for hardware errata
   3697 				 * documented in errata docs Fixes issue where
   3698 				 * some error prone or unreliable PCIe
   3699 				 * completions are occurring, particularly
   3700 				 * with ASPM enabled. Without fix, issue can
   3701 				 * cause Tx timeouts.
   3702 				 */
   3703 				reg = CSR_READ(sc, WMREG_GCR2);
   3704 				reg |= __BIT(0);
   3705 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3706 			}
   3707 			break;
   3708 		case WM_T_80003:
   3709 			/* TARC0 */
   3710 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3711 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3712 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3713 
   3714 			/* TARC1 bit 28 */
   3715 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3716 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3717 				tarc1 &= ~__BIT(28);
   3718 			else
   3719 				tarc1 |= __BIT(28);
   3720 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3721 			break;
   3722 		case WM_T_ICH8:
   3723 		case WM_T_ICH9:
   3724 		case WM_T_ICH10:
   3725 		case WM_T_PCH:
   3726 		case WM_T_PCH2:
   3727 		case WM_T_PCH_LPT:
   3728 		case WM_T_PCH_SPT:
   3729 			/* TARC0 */
   3730 			if ((sc->sc_type == WM_T_ICH8)
   3731 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3732 				/* Set TARC0 bits 29 and 28 */
   3733 				tarc0 |= __BITS(29, 28);
   3734 			}
   3735 			/* Set TARC0 bits 23,24,26,27 */
   3736 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3737 
   3738 			/* CTRL_EXT */
   3739 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3740 			reg |= __BIT(22);	/* Set bit 22 */
   3741 			/*
   3742 			 * Enable PHY low-power state when MAC is at D3
   3743 			 * w/o WoL
   3744 			 */
   3745 			if (sc->sc_type >= WM_T_PCH)
   3746 				reg |= CTRL_EXT_PHYPDEN;
   3747 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3748 
   3749 			/* TARC1 */
   3750 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3751 			/* bit 28 */
   3752 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3753 				tarc1 &= ~__BIT(28);
   3754 			else
   3755 				tarc1 |= __BIT(28);
   3756 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3757 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3758 
   3759 			/* Device Status */
   3760 			if (sc->sc_type == WM_T_ICH8) {
   3761 				reg = CSR_READ(sc, WMREG_STATUS);
   3762 				reg &= ~__BIT(31);
   3763 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3764 
   3765 			}
   3766 
   3767 			/* IOSFPC */
   3768 			if (sc->sc_type == WM_T_PCH_SPT) {
   3769 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3770 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3771 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3772 			}
   3773 			/*
   3774 			 * Work-around descriptor data corruption issue during
   3775 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3776 			 * capability.
   3777 			 */
   3778 			reg = CSR_READ(sc, WMREG_RFCTL);
   3779 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3780 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3781 			break;
   3782 		default:
   3783 			break;
   3784 		}
   3785 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3786 
   3787 		switch (sc->sc_type) {
   3788 		/*
   3789 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3790 		 * Avoid RSS Hash Value bug.
   3791 		 */
   3792 		case WM_T_82571:
   3793 		case WM_T_82572:
   3794 		case WM_T_82573:
   3795 		case WM_T_80003:
   3796 		case WM_T_ICH8:
   3797 			reg = CSR_READ(sc, WMREG_RFCTL);
   3798 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3799 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3800 			break;
   3801 		case WM_T_82574:
   3802 			/* use extened Rx descriptor. */
   3803 			reg = CSR_READ(sc, WMREG_RFCTL);
   3804 			reg |= WMREG_RFCTL_EXSTEN;
   3805 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3806 			break;
   3807 		default:
   3808 			break;
   3809 		}
   3810 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   3811 		/*
   3812 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   3813 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   3814 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   3815 		 * Correctly by the Device"
   3816 		 *
   3817 		 * I354(C2000) Errata AVR53:
   3818 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   3819 		 * Hang"
   3820 		 */
   3821 		reg = CSR_READ(sc, WMREG_RFCTL);
   3822 		reg |= WMREG_RFCTL_IPV6EXDIS;
   3823 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   3824 	}
   3825 }
   3826 
   3827 static uint32_t
   3828 wm_rxpbs_adjust_82580(uint32_t val)
   3829 {
   3830 	uint32_t rv = 0;
   3831 
   3832 	if (val < __arraycount(wm_82580_rxpbs_table))
   3833 		rv = wm_82580_rxpbs_table[val];
   3834 
   3835 	return rv;
   3836 }
   3837 
   3838 /*
   3839  * wm_reset_phy:
   3840  *
   3841  *	generic PHY reset function.
   3842  *	Same as e1000_phy_hw_reset_generic()
   3843  */
   3844 static void
   3845 wm_reset_phy(struct wm_softc *sc)
   3846 {
   3847 	uint32_t reg;
   3848 
   3849 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3850 		device_xname(sc->sc_dev), __func__));
   3851 	if (wm_phy_resetisblocked(sc))
   3852 		return;
   3853 
   3854 	sc->phy.acquire(sc);
   3855 
   3856 	reg = CSR_READ(sc, WMREG_CTRL);
   3857 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3858 	CSR_WRITE_FLUSH(sc);
   3859 
   3860 	delay(sc->phy.reset_delay_us);
   3861 
   3862 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3863 	CSR_WRITE_FLUSH(sc);
   3864 
   3865 	delay(150);
   3866 
   3867 	sc->phy.release(sc);
   3868 
   3869 	wm_get_cfg_done(sc);
   3870 }
   3871 
   3872 static void
   3873 wm_flush_desc_rings(struct wm_softc *sc)
   3874 {
   3875 	pcireg_t preg;
   3876 	uint32_t reg;
   3877 	int nexttx;
   3878 
   3879 	/* First, disable MULR fix in FEXTNVM11 */
   3880 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3881 	reg |= FEXTNVM11_DIS_MULRFIX;
   3882 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3883 
   3884 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3885 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3886 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3887 		struct wm_txqueue *txq;
   3888 		wiseman_txdesc_t *txd;
   3889 
   3890 		/* TX */
   3891 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3892 		    device_xname(sc->sc_dev), preg, reg);
   3893 		reg = CSR_READ(sc, WMREG_TCTL);
   3894 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3895 
   3896 		txq = &sc->sc_queue[0].wmq_txq;
   3897 		nexttx = txq->txq_next;
   3898 		txd = &txq->txq_descs[nexttx];
   3899 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3900 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3901 		txd->wtx_fields.wtxu_status = 0;
   3902 		txd->wtx_fields.wtxu_options = 0;
   3903 		txd->wtx_fields.wtxu_vlan = 0;
   3904 
   3905 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3906 			BUS_SPACE_BARRIER_WRITE);
   3907 
   3908 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3909 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3910 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3911 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3912 		delay(250);
   3913 	}
   3914 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3915 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3916 		uint32_t rctl;
   3917 
   3918 		/* RX */
   3919 		printf("%s: Need RX flush (reg = %08x)\n",
   3920 		    device_xname(sc->sc_dev), preg);
   3921 		rctl = CSR_READ(sc, WMREG_RCTL);
   3922 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3923 		CSR_WRITE_FLUSH(sc);
   3924 		delay(150);
   3925 
   3926 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3927 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3928 		reg &= 0xffffc000;
   3929 		/*
   3930 		 * update thresholds: prefetch threshold to 31, host threshold
   3931 		 * to 1 and make sure the granularity is "descriptors" and not
   3932 		 * "cache lines"
   3933 		 */
   3934 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   3935 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   3936 
   3937 		/*
   3938 		 * momentarily enable the RX ring for the changes to take
   3939 		 * effect
   3940 		 */
   3941 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   3942 		CSR_WRITE_FLUSH(sc);
   3943 		delay(150);
   3944 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3945 	}
   3946 }
   3947 
   3948 /*
   3949  * wm_reset:
   3950  *
   3951  *	Reset the i82542 chip.
   3952  */
   3953 static void
   3954 wm_reset(struct wm_softc *sc)
   3955 {
   3956 	int phy_reset = 0;
   3957 	int i, error = 0;
   3958 	uint32_t reg;
   3959 
   3960 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3961 		device_xname(sc->sc_dev), __func__));
   3962 	KASSERT(sc->sc_type != 0);
   3963 
   3964 	/*
   3965 	 * Allocate on-chip memory according to the MTU size.
   3966 	 * The Packet Buffer Allocation register must be written
   3967 	 * before the chip is reset.
   3968 	 */
   3969 	switch (sc->sc_type) {
   3970 	case WM_T_82547:
   3971 	case WM_T_82547_2:
   3972 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3973 		    PBA_22K : PBA_30K;
   3974 		for (i = 0; i < sc->sc_nqueues; i++) {
   3975 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3976 			txq->txq_fifo_head = 0;
   3977 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3978 			txq->txq_fifo_size =
   3979 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3980 			txq->txq_fifo_stall = 0;
   3981 		}
   3982 		break;
   3983 	case WM_T_82571:
   3984 	case WM_T_82572:
   3985 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3986 	case WM_T_80003:
   3987 		sc->sc_pba = PBA_32K;
   3988 		break;
   3989 	case WM_T_82573:
   3990 		sc->sc_pba = PBA_12K;
   3991 		break;
   3992 	case WM_T_82574:
   3993 	case WM_T_82583:
   3994 		sc->sc_pba = PBA_20K;
   3995 		break;
   3996 	case WM_T_82576:
   3997 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3998 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3999 		break;
   4000 	case WM_T_82580:
   4001 	case WM_T_I350:
   4002 	case WM_T_I354:
   4003 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4004 		break;
   4005 	case WM_T_I210:
   4006 	case WM_T_I211:
   4007 		sc->sc_pba = PBA_34K;
   4008 		break;
   4009 	case WM_T_ICH8:
   4010 		/* Workaround for a bit corruption issue in FIFO memory */
   4011 		sc->sc_pba = PBA_8K;
   4012 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4013 		break;
   4014 	case WM_T_ICH9:
   4015 	case WM_T_ICH10:
   4016 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4017 		    PBA_14K : PBA_10K;
   4018 		break;
   4019 	case WM_T_PCH:
   4020 	case WM_T_PCH2:
   4021 	case WM_T_PCH_LPT:
   4022 	case WM_T_PCH_SPT:
   4023 		sc->sc_pba = PBA_26K;
   4024 		break;
   4025 	default:
   4026 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4027 		    PBA_40K : PBA_48K;
   4028 		break;
   4029 	}
   4030 	/*
   4031 	 * Only old or non-multiqueue devices have the PBA register
   4032 	 * XXX Need special handling for 82575.
   4033 	 */
   4034 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4035 	    || (sc->sc_type == WM_T_82575))
   4036 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4037 
   4038 	/* Prevent the PCI-E bus from sticking */
   4039 	if (sc->sc_flags & WM_F_PCIE) {
   4040 		int timeout = 800;
   4041 
   4042 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4043 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4044 
   4045 		while (timeout--) {
   4046 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4047 			    == 0)
   4048 				break;
   4049 			delay(100);
   4050 		}
   4051 	}
   4052 
   4053 	/* Set the completion timeout for interface */
   4054 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4055 	    || (sc->sc_type == WM_T_82580)
   4056 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4057 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4058 		wm_set_pcie_completion_timeout(sc);
   4059 
   4060 	/* Clear interrupt */
   4061 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4062 	if (wm_is_using_msix(sc)) {
   4063 		if (sc->sc_type != WM_T_82574) {
   4064 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4065 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4066 		} else {
   4067 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4068 		}
   4069 	}
   4070 
   4071 	/* Stop the transmit and receive processes. */
   4072 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4073 	sc->sc_rctl &= ~RCTL_EN;
   4074 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4075 	CSR_WRITE_FLUSH(sc);
   4076 
   4077 	/* XXX set_tbi_sbp_82543() */
   4078 
   4079 	delay(10*1000);
   4080 
   4081 	/* Must acquire the MDIO ownership before MAC reset */
   4082 	switch (sc->sc_type) {
   4083 	case WM_T_82573:
   4084 	case WM_T_82574:
   4085 	case WM_T_82583:
   4086 		error = wm_get_hw_semaphore_82573(sc);
   4087 		break;
   4088 	default:
   4089 		break;
   4090 	}
   4091 
   4092 	/*
   4093 	 * 82541 Errata 29? & 82547 Errata 28?
   4094 	 * See also the description about PHY_RST bit in CTRL register
   4095 	 * in 8254x_GBe_SDM.pdf.
   4096 	 */
   4097 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4098 		CSR_WRITE(sc, WMREG_CTRL,
   4099 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4100 		CSR_WRITE_FLUSH(sc);
   4101 		delay(5000);
   4102 	}
   4103 
   4104 	switch (sc->sc_type) {
   4105 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4106 	case WM_T_82541:
   4107 	case WM_T_82541_2:
   4108 	case WM_T_82547:
   4109 	case WM_T_82547_2:
   4110 		/*
   4111 		 * On some chipsets, a reset through a memory-mapped write
   4112 		 * cycle can cause the chip to reset before completing the
   4113 		 * write cycle.  This causes major headache that can be
   4114 		 * avoided by issuing the reset via indirect register writes
   4115 		 * through I/O space.
   4116 		 *
   4117 		 * So, if we successfully mapped the I/O BAR at attach time,
   4118 		 * use that.  Otherwise, try our luck with a memory-mapped
   4119 		 * reset.
   4120 		 */
   4121 		if (sc->sc_flags & WM_F_IOH_VALID)
   4122 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4123 		else
   4124 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4125 		break;
   4126 	case WM_T_82545_3:
   4127 	case WM_T_82546_3:
   4128 		/* Use the shadow control register on these chips. */
   4129 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4130 		break;
   4131 	case WM_T_80003:
   4132 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4133 		sc->phy.acquire(sc);
   4134 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4135 		sc->phy.release(sc);
   4136 		break;
   4137 	case WM_T_ICH8:
   4138 	case WM_T_ICH9:
   4139 	case WM_T_ICH10:
   4140 	case WM_T_PCH:
   4141 	case WM_T_PCH2:
   4142 	case WM_T_PCH_LPT:
   4143 	case WM_T_PCH_SPT:
   4144 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4145 		if (wm_phy_resetisblocked(sc) == false) {
   4146 			/*
   4147 			 * Gate automatic PHY configuration by hardware on
   4148 			 * non-managed 82579
   4149 			 */
   4150 			if ((sc->sc_type == WM_T_PCH2)
   4151 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4152 				== 0))
   4153 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4154 
   4155 			reg |= CTRL_PHY_RESET;
   4156 			phy_reset = 1;
   4157 		} else
   4158 			printf("XXX reset is blocked!!!\n");
   4159 		sc->phy.acquire(sc);
   4160 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4161 		/* Don't insert a completion barrier when reset */
   4162 		delay(20*1000);
   4163 		mutex_exit(sc->sc_ich_phymtx);
   4164 		break;
   4165 	case WM_T_82580:
   4166 	case WM_T_I350:
   4167 	case WM_T_I354:
   4168 	case WM_T_I210:
   4169 	case WM_T_I211:
   4170 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4171 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4172 			CSR_WRITE_FLUSH(sc);
   4173 		delay(5000);
   4174 		break;
   4175 	case WM_T_82542_2_0:
   4176 	case WM_T_82542_2_1:
   4177 	case WM_T_82543:
   4178 	case WM_T_82540:
   4179 	case WM_T_82545:
   4180 	case WM_T_82546:
   4181 	case WM_T_82571:
   4182 	case WM_T_82572:
   4183 	case WM_T_82573:
   4184 	case WM_T_82574:
   4185 	case WM_T_82575:
   4186 	case WM_T_82576:
   4187 	case WM_T_82583:
   4188 	default:
   4189 		/* Everything else can safely use the documented method. */
   4190 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4191 		break;
   4192 	}
   4193 
   4194 	/* Must release the MDIO ownership after MAC reset */
   4195 	switch (sc->sc_type) {
   4196 	case WM_T_82573:
   4197 	case WM_T_82574:
   4198 	case WM_T_82583:
   4199 		if (error == 0)
   4200 			wm_put_hw_semaphore_82573(sc);
   4201 		break;
   4202 	default:
   4203 		break;
   4204 	}
   4205 
   4206 	if (phy_reset != 0)
   4207 		wm_get_cfg_done(sc);
   4208 
   4209 	/* reload EEPROM */
   4210 	switch (sc->sc_type) {
   4211 	case WM_T_82542_2_0:
   4212 	case WM_T_82542_2_1:
   4213 	case WM_T_82543:
   4214 	case WM_T_82544:
   4215 		delay(10);
   4216 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4217 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4218 		CSR_WRITE_FLUSH(sc);
   4219 		delay(2000);
   4220 		break;
   4221 	case WM_T_82540:
   4222 	case WM_T_82545:
   4223 	case WM_T_82545_3:
   4224 	case WM_T_82546:
   4225 	case WM_T_82546_3:
   4226 		delay(5*1000);
   4227 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4228 		break;
   4229 	case WM_T_82541:
   4230 	case WM_T_82541_2:
   4231 	case WM_T_82547:
   4232 	case WM_T_82547_2:
   4233 		delay(20000);
   4234 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4235 		break;
   4236 	case WM_T_82571:
   4237 	case WM_T_82572:
   4238 	case WM_T_82573:
   4239 	case WM_T_82574:
   4240 	case WM_T_82583:
   4241 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4242 			delay(10);
   4243 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4244 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4245 			CSR_WRITE_FLUSH(sc);
   4246 		}
   4247 		/* check EECD_EE_AUTORD */
   4248 		wm_get_auto_rd_done(sc);
   4249 		/*
   4250 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4251 		 * is set.
   4252 		 */
   4253 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4254 		    || (sc->sc_type == WM_T_82583))
   4255 			delay(25*1000);
   4256 		break;
   4257 	case WM_T_82575:
   4258 	case WM_T_82576:
   4259 	case WM_T_82580:
   4260 	case WM_T_I350:
   4261 	case WM_T_I354:
   4262 	case WM_T_I210:
   4263 	case WM_T_I211:
   4264 	case WM_T_80003:
   4265 		/* check EECD_EE_AUTORD */
   4266 		wm_get_auto_rd_done(sc);
   4267 		break;
   4268 	case WM_T_ICH8:
   4269 	case WM_T_ICH9:
   4270 	case WM_T_ICH10:
   4271 	case WM_T_PCH:
   4272 	case WM_T_PCH2:
   4273 	case WM_T_PCH_LPT:
   4274 	case WM_T_PCH_SPT:
   4275 		break;
   4276 	default:
   4277 		panic("%s: unknown type\n", __func__);
   4278 	}
   4279 
   4280 	/* Check whether EEPROM is present or not */
   4281 	switch (sc->sc_type) {
   4282 	case WM_T_82575:
   4283 	case WM_T_82576:
   4284 	case WM_T_82580:
   4285 	case WM_T_I350:
   4286 	case WM_T_I354:
   4287 	case WM_T_ICH8:
   4288 	case WM_T_ICH9:
   4289 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4290 			/* Not found */
   4291 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4292 			if (sc->sc_type == WM_T_82575)
   4293 				wm_reset_init_script_82575(sc);
   4294 		}
   4295 		break;
   4296 	default:
   4297 		break;
   4298 	}
   4299 
   4300 	if ((sc->sc_type == WM_T_82580)
   4301 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4302 		/* clear global device reset status bit */
   4303 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4304 	}
   4305 
   4306 	/* Clear any pending interrupt events. */
   4307 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4308 	reg = CSR_READ(sc, WMREG_ICR);
   4309 	if (wm_is_using_msix(sc)) {
   4310 		if (sc->sc_type != WM_T_82574) {
   4311 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4312 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4313 		} else
   4314 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4315 	}
   4316 
   4317 	/* reload sc_ctrl */
   4318 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4319 
   4320 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4321 		wm_set_eee_i350(sc);
   4322 
   4323 	/* Clear the host wakeup bit after lcd reset */
   4324 	if (sc->sc_type >= WM_T_PCH) {
   4325 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4326 		    BM_PORT_GEN_CFG);
   4327 		reg &= ~BM_WUC_HOST_WU_BIT;
   4328 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4329 		    BM_PORT_GEN_CFG, reg);
   4330 	}
   4331 
   4332 	/*
   4333 	 * For PCH, this write will make sure that any noise will be detected
   4334 	 * as a CRC error and be dropped rather than show up as a bad packet
   4335 	 * to the DMA engine
   4336 	 */
   4337 	if (sc->sc_type == WM_T_PCH)
   4338 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4339 
   4340 	if (sc->sc_type >= WM_T_82544)
   4341 		CSR_WRITE(sc, WMREG_WUC, 0);
   4342 
   4343 	wm_reset_mdicnfg_82580(sc);
   4344 
   4345 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4346 		wm_pll_workaround_i210(sc);
   4347 }
   4348 
   4349 /*
   4350  * wm_add_rxbuf:
   4351  *
   4352  *	Add a receive buffer to the indiciated descriptor.
   4353  */
   4354 static int
   4355 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4356 {
   4357 	struct wm_softc *sc = rxq->rxq_sc;
   4358 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4359 	struct mbuf *m;
   4360 	int error;
   4361 
   4362 	KASSERT(mutex_owned(rxq->rxq_lock));
   4363 
   4364 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4365 	if (m == NULL)
   4366 		return ENOBUFS;
   4367 
   4368 	MCLGET(m, M_DONTWAIT);
   4369 	if ((m->m_flags & M_EXT) == 0) {
   4370 		m_freem(m);
   4371 		return ENOBUFS;
   4372 	}
   4373 
   4374 	if (rxs->rxs_mbuf != NULL)
   4375 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4376 
   4377 	rxs->rxs_mbuf = m;
   4378 
   4379 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4380 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4381 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4382 	if (error) {
   4383 		/* XXX XXX XXX */
   4384 		aprint_error_dev(sc->sc_dev,
   4385 		    "unable to load rx DMA map %d, error = %d\n",
   4386 		    idx, error);
   4387 		panic("wm_add_rxbuf");
   4388 	}
   4389 
   4390 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4391 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4392 
   4393 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4394 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4395 			wm_init_rxdesc(rxq, idx);
   4396 	} else
   4397 		wm_init_rxdesc(rxq, idx);
   4398 
   4399 	return 0;
   4400 }
   4401 
   4402 /*
   4403  * wm_rxdrain:
   4404  *
   4405  *	Drain the receive queue.
   4406  */
   4407 static void
   4408 wm_rxdrain(struct wm_rxqueue *rxq)
   4409 {
   4410 	struct wm_softc *sc = rxq->rxq_sc;
   4411 	struct wm_rxsoft *rxs;
   4412 	int i;
   4413 
   4414 	KASSERT(mutex_owned(rxq->rxq_lock));
   4415 
   4416 	for (i = 0; i < WM_NRXDESC; i++) {
   4417 		rxs = &rxq->rxq_soft[i];
   4418 		if (rxs->rxs_mbuf != NULL) {
   4419 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4420 			m_freem(rxs->rxs_mbuf);
   4421 			rxs->rxs_mbuf = NULL;
   4422 		}
   4423 	}
   4424 }
   4425 
   4426 
   4427 /*
   4428  * XXX copy from FreeBSD's sys/net/rss_config.c
   4429  */
   4430 /*
   4431  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4432  * effectiveness may be limited by algorithm choice and available entropy
   4433  * during the boot.
   4434  *
   4435  * XXXRW: And that we don't randomize it yet!
   4436  *
   4437  * This is the default Microsoft RSS specification key which is also
   4438  * the Chelsio T5 firmware default key.
   4439  */
   4440 #define RSS_KEYSIZE 40
   4441 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4442 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4443 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4444 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4445 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4446 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4447 };
   4448 
   4449 /*
   4450  * Caller must pass an array of size sizeof(rss_key).
   4451  *
   4452  * XXX
   4453  * As if_ixgbe may use this function, this function should not be
   4454  * if_wm specific function.
   4455  */
   4456 static void
   4457 wm_rss_getkey(uint8_t *key)
   4458 {
   4459 
   4460 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4461 }
   4462 
   4463 /*
   4464  * Setup registers for RSS.
   4465  *
   4466  * XXX not yet VMDq support
   4467  */
   4468 static void
   4469 wm_init_rss(struct wm_softc *sc)
   4470 {
   4471 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4472 	int i;
   4473 
   4474 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4475 
   4476 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4477 		int qid, reta_ent;
   4478 
   4479 		qid  = i % sc->sc_nqueues;
   4480 		switch(sc->sc_type) {
   4481 		case WM_T_82574:
   4482 			reta_ent = __SHIFTIN(qid,
   4483 			    RETA_ENT_QINDEX_MASK_82574);
   4484 			break;
   4485 		case WM_T_82575:
   4486 			reta_ent = __SHIFTIN(qid,
   4487 			    RETA_ENT_QINDEX1_MASK_82575);
   4488 			break;
   4489 		default:
   4490 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4491 			break;
   4492 		}
   4493 
   4494 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4495 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4496 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4497 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4498 	}
   4499 
   4500 	wm_rss_getkey((uint8_t *)rss_key);
   4501 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4502 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4503 
   4504 	if (sc->sc_type == WM_T_82574)
   4505 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4506 	else
   4507 		mrqc = MRQC_ENABLE_RSS_MQ;
   4508 
   4509 	/*
   4510 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4511 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4512 	 */
   4513 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4514 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4515 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4516 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4517 
   4518 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4519 }
   4520 
   4521 /*
   4522  * Adjust TX and RX queue numbers which the system actulally uses.
   4523  *
   4524  * The numbers are affected by below parameters.
   4525  *     - The nubmer of hardware queues
   4526  *     - The number of MSI-X vectors (= "nvectors" argument)
   4527  *     - ncpu
   4528  */
   4529 static void
   4530 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4531 {
   4532 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4533 
   4534 	if (nvectors < 2) {
   4535 		sc->sc_nqueues = 1;
   4536 		return;
   4537 	}
   4538 
   4539 	switch(sc->sc_type) {
   4540 	case WM_T_82572:
   4541 		hw_ntxqueues = 2;
   4542 		hw_nrxqueues = 2;
   4543 		break;
   4544 	case WM_T_82574:
   4545 		hw_ntxqueues = 2;
   4546 		hw_nrxqueues = 2;
   4547 		break;
   4548 	case WM_T_82575:
   4549 		hw_ntxqueues = 4;
   4550 		hw_nrxqueues = 4;
   4551 		break;
   4552 	case WM_T_82576:
   4553 		hw_ntxqueues = 16;
   4554 		hw_nrxqueues = 16;
   4555 		break;
   4556 	case WM_T_82580:
   4557 	case WM_T_I350:
   4558 	case WM_T_I354:
   4559 		hw_ntxqueues = 8;
   4560 		hw_nrxqueues = 8;
   4561 		break;
   4562 	case WM_T_I210:
   4563 		hw_ntxqueues = 4;
   4564 		hw_nrxqueues = 4;
   4565 		break;
   4566 	case WM_T_I211:
   4567 		hw_ntxqueues = 2;
   4568 		hw_nrxqueues = 2;
   4569 		break;
   4570 		/*
   4571 		 * As below ethernet controllers does not support MSI-X,
   4572 		 * this driver let them not use multiqueue.
   4573 		 *     - WM_T_80003
   4574 		 *     - WM_T_ICH8
   4575 		 *     - WM_T_ICH9
   4576 		 *     - WM_T_ICH10
   4577 		 *     - WM_T_PCH
   4578 		 *     - WM_T_PCH2
   4579 		 *     - WM_T_PCH_LPT
   4580 		 */
   4581 	default:
   4582 		hw_ntxqueues = 1;
   4583 		hw_nrxqueues = 1;
   4584 		break;
   4585 	}
   4586 
   4587 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4588 
   4589 	/*
   4590 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4591 	 * the number of queues used actually.
   4592 	 */
   4593 	if (nvectors < hw_nqueues + 1) {
   4594 		sc->sc_nqueues = nvectors - 1;
   4595 	} else {
   4596 		sc->sc_nqueues = hw_nqueues;
   4597 	}
   4598 
   4599 	/*
   4600 	 * As queues more then cpus cannot improve scaling, we limit
   4601 	 * the number of queues used actually.
   4602 	 */
   4603 	if (ncpu < sc->sc_nqueues)
   4604 		sc->sc_nqueues = ncpu;
   4605 }
   4606 
   4607 static inline bool
   4608 wm_is_using_msix(struct wm_softc *sc)
   4609 {
   4610 
   4611 	return (sc->sc_nintrs > 1);
   4612 }
   4613 
   4614 static inline bool
   4615 wm_is_using_multiqueue(struct wm_softc *sc)
   4616 {
   4617 
   4618 	return (sc->sc_nqueues > 1);
   4619 }
   4620 
   4621 static int
   4622 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4623 {
   4624 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4625 	wmq->wmq_id = qidx;
   4626 	wmq->wmq_intr_idx = intr_idx;
   4627 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4628 #ifdef WM_MPSAFE
   4629 	    | SOFTINT_MPSAFE
   4630 #endif
   4631 	    , wm_handle_queue, wmq);
   4632 	if (wmq->wmq_si != NULL)
   4633 		return 0;
   4634 
   4635 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4636 	    wmq->wmq_id);
   4637 
   4638 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4639 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4640 	return ENOMEM;
   4641 }
   4642 
   4643 /*
   4644  * Both single interrupt MSI and INTx can use this function.
   4645  */
   4646 static int
   4647 wm_setup_legacy(struct wm_softc *sc)
   4648 {
   4649 	pci_chipset_tag_t pc = sc->sc_pc;
   4650 	const char *intrstr = NULL;
   4651 	char intrbuf[PCI_INTRSTR_LEN];
   4652 	int error;
   4653 
   4654 	error = wm_alloc_txrx_queues(sc);
   4655 	if (error) {
   4656 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4657 		    error);
   4658 		return ENOMEM;
   4659 	}
   4660 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4661 	    sizeof(intrbuf));
   4662 #ifdef WM_MPSAFE
   4663 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4664 #endif
   4665 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4666 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4667 	if (sc->sc_ihs[0] == NULL) {
   4668 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4669 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4670 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4671 		return ENOMEM;
   4672 	}
   4673 
   4674 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4675 	sc->sc_nintrs = 1;
   4676 
   4677 	return wm_softint_establish(sc, 0, 0);
   4678 }
   4679 
   4680 static int
   4681 wm_setup_msix(struct wm_softc *sc)
   4682 {
   4683 	void *vih;
   4684 	kcpuset_t *affinity;
   4685 	int qidx, error, intr_idx, txrx_established;
   4686 	pci_chipset_tag_t pc = sc->sc_pc;
   4687 	const char *intrstr = NULL;
   4688 	char intrbuf[PCI_INTRSTR_LEN];
   4689 	char intr_xname[INTRDEVNAMEBUF];
   4690 
   4691 	if (sc->sc_nqueues < ncpu) {
   4692 		/*
   4693 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4694 		 * interrupts start from CPU#1.
   4695 		 */
   4696 		sc->sc_affinity_offset = 1;
   4697 	} else {
   4698 		/*
   4699 		 * In this case, this device use all CPUs. So, we unify
   4700 		 * affinitied cpu_index to msix vector number for readability.
   4701 		 */
   4702 		sc->sc_affinity_offset = 0;
   4703 	}
   4704 
   4705 	error = wm_alloc_txrx_queues(sc);
   4706 	if (error) {
   4707 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4708 		    error);
   4709 		return ENOMEM;
   4710 	}
   4711 
   4712 	kcpuset_create(&affinity, false);
   4713 	intr_idx = 0;
   4714 
   4715 	/*
   4716 	 * TX and RX
   4717 	 */
   4718 	txrx_established = 0;
   4719 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4720 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4721 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4722 
   4723 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4724 		    sizeof(intrbuf));
   4725 #ifdef WM_MPSAFE
   4726 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4727 		    PCI_INTR_MPSAFE, true);
   4728 #endif
   4729 		memset(intr_xname, 0, sizeof(intr_xname));
   4730 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4731 		    device_xname(sc->sc_dev), qidx);
   4732 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4733 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4734 		if (vih == NULL) {
   4735 			aprint_error_dev(sc->sc_dev,
   4736 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4737 			    intrstr ? " at " : "",
   4738 			    intrstr ? intrstr : "");
   4739 
   4740 			goto fail;
   4741 		}
   4742 		kcpuset_zero(affinity);
   4743 		/* Round-robin affinity */
   4744 		kcpuset_set(affinity, affinity_to);
   4745 		error = interrupt_distribute(vih, affinity, NULL);
   4746 		if (error == 0) {
   4747 			aprint_normal_dev(sc->sc_dev,
   4748 			    "for TX and RX interrupting at %s affinity to %u\n",
   4749 			    intrstr, affinity_to);
   4750 		} else {
   4751 			aprint_normal_dev(sc->sc_dev,
   4752 			    "for TX and RX interrupting at %s\n", intrstr);
   4753 		}
   4754 		sc->sc_ihs[intr_idx] = vih;
   4755 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   4756 			goto fail;
   4757 		txrx_established++;
   4758 		intr_idx++;
   4759 	}
   4760 
   4761 	/*
   4762 	 * LINK
   4763 	 */
   4764 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4765 	    sizeof(intrbuf));
   4766 #ifdef WM_MPSAFE
   4767 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4768 #endif
   4769 	memset(intr_xname, 0, sizeof(intr_xname));
   4770 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4771 	    device_xname(sc->sc_dev));
   4772 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4773 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4774 	if (vih == NULL) {
   4775 		aprint_error_dev(sc->sc_dev,
   4776 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4777 		    intrstr ? " at " : "",
   4778 		    intrstr ? intrstr : "");
   4779 
   4780 		goto fail;
   4781 	}
   4782 	/* keep default affinity to LINK interrupt */
   4783 	aprint_normal_dev(sc->sc_dev,
   4784 	    "for LINK interrupting at %s\n", intrstr);
   4785 	sc->sc_ihs[intr_idx] = vih;
   4786 	sc->sc_link_intr_idx = intr_idx;
   4787 
   4788 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4789 	kcpuset_destroy(affinity);
   4790 	return 0;
   4791 
   4792  fail:
   4793 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4794 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4795 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4796 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4797 	}
   4798 
   4799 	kcpuset_destroy(affinity);
   4800 	return ENOMEM;
   4801 }
   4802 
   4803 static void
   4804 wm_turnon(struct wm_softc *sc)
   4805 {
   4806 	int i;
   4807 
   4808 	KASSERT(WM_CORE_LOCKED(sc));
   4809 
   4810 	/*
   4811 	 * must unset stopping flags in ascending order.
   4812 	 */
   4813 	for(i = 0; i < sc->sc_nqueues; i++) {
   4814 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4815 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4816 
   4817 		mutex_enter(txq->txq_lock);
   4818 		txq->txq_stopping = false;
   4819 		mutex_exit(txq->txq_lock);
   4820 
   4821 		mutex_enter(rxq->rxq_lock);
   4822 		rxq->rxq_stopping = false;
   4823 		mutex_exit(rxq->rxq_lock);
   4824 	}
   4825 
   4826 	sc->sc_core_stopping = false;
   4827 }
   4828 
   4829 static void
   4830 wm_turnoff(struct wm_softc *sc)
   4831 {
   4832 	int i;
   4833 
   4834 	KASSERT(WM_CORE_LOCKED(sc));
   4835 
   4836 	sc->sc_core_stopping = true;
   4837 
   4838 	/*
   4839 	 * must set stopping flags in ascending order.
   4840 	 */
   4841 	for(i = 0; i < sc->sc_nqueues; i++) {
   4842 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4843 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4844 
   4845 		mutex_enter(rxq->rxq_lock);
   4846 		rxq->rxq_stopping = true;
   4847 		mutex_exit(rxq->rxq_lock);
   4848 
   4849 		mutex_enter(txq->txq_lock);
   4850 		txq->txq_stopping = true;
   4851 		mutex_exit(txq->txq_lock);
   4852 	}
   4853 }
   4854 
   4855 /*
   4856  * write interrupt interval value to ITR or EITR
   4857  */
   4858 static void
   4859 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   4860 {
   4861 
   4862 	if (!wmq->wmq_set_itr)
   4863 		return;
   4864 
   4865 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4866 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   4867 
   4868 		/*
   4869 		 * 82575 doesn't have CNT_INGR field.
   4870 		 * So, overwrite counter field by software.
   4871 		 */
   4872 		if (sc->sc_type == WM_T_82575)
   4873 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   4874 		else
   4875 			eitr |= EITR_CNT_INGR;
   4876 
   4877 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   4878 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   4879 		/*
   4880 		 * 82574 has both ITR and EITR. SET EITR when we use
   4881 		 * the multi queue function with MSI-X.
   4882 		 */
   4883 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   4884 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   4885 	} else {
   4886 		KASSERT(wmq->wmq_id == 0);
   4887 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   4888 	}
   4889 
   4890 	wmq->wmq_set_itr = false;
   4891 }
   4892 
   4893 /*
   4894  * TODO
   4895  * Below dynamic calculation of itr is almost the same as linux igb,
   4896  * however it does not fit to wm(4). So, we will have been disable AIM
   4897  * until we will find appropriate calculation of itr.
   4898  */
   4899 /*
   4900  * calculate interrupt interval value to be going to write register in
   4901  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   4902  */
   4903 static void
   4904 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   4905 {
   4906 #ifdef NOTYET
   4907 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   4908 	struct wm_txqueue *txq = &wmq->wmq_txq;
   4909 	uint32_t avg_size = 0;
   4910 	uint32_t new_itr;
   4911 
   4912 	if (rxq->rxq_packets)
   4913 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   4914 	if (txq->txq_packets)
   4915 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   4916 
   4917 	if (avg_size == 0) {
   4918 		new_itr = 450; /* restore default value */
   4919 		goto out;
   4920 	}
   4921 
   4922 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   4923 	avg_size += 24;
   4924 
   4925 	/* Don't starve jumbo frames */
   4926 	avg_size = min(avg_size, 3000);
   4927 
   4928 	/* Give a little boost to mid-size frames */
   4929 	if ((avg_size > 300) && (avg_size < 1200))
   4930 		new_itr = avg_size / 3;
   4931 	else
   4932 		new_itr = avg_size / 2;
   4933 
   4934 out:
   4935 	/*
   4936 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   4937 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   4938 	 */
   4939 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   4940 		new_itr *= 4;
   4941 
   4942 	if (new_itr != wmq->wmq_itr) {
   4943 		wmq->wmq_itr = new_itr;
   4944 		wmq->wmq_set_itr = true;
   4945 	} else
   4946 		wmq->wmq_set_itr = false;
   4947 
   4948 	rxq->rxq_packets = 0;
   4949 	rxq->rxq_bytes = 0;
   4950 	txq->txq_packets = 0;
   4951 	txq->txq_bytes = 0;
   4952 #endif
   4953 }
   4954 
   4955 /*
   4956  * wm_init:		[ifnet interface function]
   4957  *
   4958  *	Initialize the interface.
   4959  */
   4960 static int
   4961 wm_init(struct ifnet *ifp)
   4962 {
   4963 	struct wm_softc *sc = ifp->if_softc;
   4964 	int ret;
   4965 
   4966 	WM_CORE_LOCK(sc);
   4967 	ret = wm_init_locked(ifp);
   4968 	WM_CORE_UNLOCK(sc);
   4969 
   4970 	return ret;
   4971 }
   4972 
   4973 static int
   4974 wm_init_locked(struct ifnet *ifp)
   4975 {
   4976 	struct wm_softc *sc = ifp->if_softc;
   4977 	int i, j, trynum, error = 0;
   4978 	uint32_t reg;
   4979 
   4980 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4981 		device_xname(sc->sc_dev), __func__));
   4982 	KASSERT(WM_CORE_LOCKED(sc));
   4983 
   4984 	/*
   4985 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4986 	 * There is a small but measurable benefit to avoiding the adjusment
   4987 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4988 	 * on such platforms.  One possibility is that the DMA itself is
   4989 	 * slightly more efficient if the front of the entire packet (instead
   4990 	 * of the front of the headers) is aligned.
   4991 	 *
   4992 	 * Note we must always set align_tweak to 0 if we are using
   4993 	 * jumbo frames.
   4994 	 */
   4995 #ifdef __NO_STRICT_ALIGNMENT
   4996 	sc->sc_align_tweak = 0;
   4997 #else
   4998 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4999 		sc->sc_align_tweak = 0;
   5000 	else
   5001 		sc->sc_align_tweak = 2;
   5002 #endif /* __NO_STRICT_ALIGNMENT */
   5003 
   5004 	/* Cancel any pending I/O. */
   5005 	wm_stop_locked(ifp, 0);
   5006 
   5007 	/* update statistics before reset */
   5008 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5009 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5010 
   5011 	/* PCH_SPT hardware workaround */
   5012 	if (sc->sc_type == WM_T_PCH_SPT)
   5013 		wm_flush_desc_rings(sc);
   5014 
   5015 	/* Reset the chip to a known state. */
   5016 	wm_reset(sc);
   5017 
   5018 	/* AMT based hardware can now take control from firmware */
   5019 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5020 		wm_get_hw_control(sc);
   5021 
   5022 	/* Init hardware bits */
   5023 	wm_initialize_hardware_bits(sc);
   5024 
   5025 	/* Reset the PHY. */
   5026 	if (sc->sc_flags & WM_F_HAS_MII)
   5027 		wm_gmii_reset(sc);
   5028 
   5029 	/* Calculate (E)ITR value */
   5030 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5031 		/*
   5032 		 * For NEWQUEUE's EITR (except for 82575).
   5033 		 * 82575's EITR should be set same throttling value as other
   5034 		 * old controllers' ITR because the interrupt/sec calculation
   5035 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5036 		 *
   5037 		 * 82574's EITR should be set same throttling value as ITR.
   5038 		 *
   5039 		 * For N interrupts/sec, set this value to:
   5040 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5041 		 */
   5042 		sc->sc_itr_init = 450;
   5043 	} else if (sc->sc_type >= WM_T_82543) {
   5044 		/*
   5045 		 * Set up the interrupt throttling register (units of 256ns)
   5046 		 * Note that a footnote in Intel's documentation says this
   5047 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5048 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5049 		 * that that is also true for the 1024ns units of the other
   5050 		 * interrupt-related timer registers -- so, really, we ought
   5051 		 * to divide this value by 4 when the link speed is low.
   5052 		 *
   5053 		 * XXX implement this division at link speed change!
   5054 		 */
   5055 
   5056 		/*
   5057 		 * For N interrupts/sec, set this value to:
   5058 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5059 		 * absolute and packet timer values to this value
   5060 		 * divided by 4 to get "simple timer" behavior.
   5061 		 */
   5062 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5063 	}
   5064 
   5065 	error = wm_init_txrx_queues(sc);
   5066 	if (error)
   5067 		goto out;
   5068 
   5069 	/*
   5070 	 * Clear out the VLAN table -- we don't use it (yet).
   5071 	 */
   5072 	CSR_WRITE(sc, WMREG_VET, 0);
   5073 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5074 		trynum = 10; /* Due to hw errata */
   5075 	else
   5076 		trynum = 1;
   5077 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5078 		for (j = 0; j < trynum; j++)
   5079 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5080 
   5081 	/*
   5082 	 * Set up flow-control parameters.
   5083 	 *
   5084 	 * XXX Values could probably stand some tuning.
   5085 	 */
   5086 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5087 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5088 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5089 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5090 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5091 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5092 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5093 	}
   5094 
   5095 	sc->sc_fcrtl = FCRTL_DFLT;
   5096 	if (sc->sc_type < WM_T_82543) {
   5097 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5098 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5099 	} else {
   5100 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5101 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5102 	}
   5103 
   5104 	if (sc->sc_type == WM_T_80003)
   5105 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5106 	else
   5107 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5108 
   5109 	/* Writes the control register. */
   5110 	wm_set_vlan(sc);
   5111 
   5112 	if (sc->sc_flags & WM_F_HAS_MII) {
   5113 		int val;
   5114 
   5115 		switch (sc->sc_type) {
   5116 		case WM_T_80003:
   5117 		case WM_T_ICH8:
   5118 		case WM_T_ICH9:
   5119 		case WM_T_ICH10:
   5120 		case WM_T_PCH:
   5121 		case WM_T_PCH2:
   5122 		case WM_T_PCH_LPT:
   5123 		case WM_T_PCH_SPT:
   5124 			/*
   5125 			 * Set the mac to wait the maximum time between each
   5126 			 * iteration and increase the max iterations when
   5127 			 * polling the phy; this fixes erroneous timeouts at
   5128 			 * 10Mbps.
   5129 			 */
   5130 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5131 			    0xFFFF);
   5132 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   5133 			val |= 0x3F;
   5134 			wm_kmrn_writereg(sc,
   5135 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   5136 			break;
   5137 		default:
   5138 			break;
   5139 		}
   5140 
   5141 		if (sc->sc_type == WM_T_80003) {
   5142 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   5143 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   5144 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   5145 
   5146 			/* Bypass RX and TX FIFO's */
   5147 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5148 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5149 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5150 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5151 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5152 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5153 		}
   5154 	}
   5155 #if 0
   5156 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5157 #endif
   5158 
   5159 	/* Set up checksum offload parameters. */
   5160 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5161 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5162 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5163 		reg |= RXCSUM_IPOFL;
   5164 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5165 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5166 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5167 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5168 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5169 
   5170 	/* Set registers about MSI-X */
   5171 	if (wm_is_using_msix(sc)) {
   5172 		uint32_t ivar;
   5173 		struct wm_queue *wmq;
   5174 		int qid, qintr_idx;
   5175 
   5176 		if (sc->sc_type == WM_T_82575) {
   5177 			/* Interrupt control */
   5178 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5179 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5180 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5181 
   5182 			/* TX and RX */
   5183 			for (i = 0; i < sc->sc_nqueues; i++) {
   5184 				wmq = &sc->sc_queue[i];
   5185 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5186 				    EITR_TX_QUEUE(wmq->wmq_id)
   5187 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5188 			}
   5189 			/* Link status */
   5190 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5191 			    EITR_OTHER);
   5192 		} else if (sc->sc_type == WM_T_82574) {
   5193 			/* Interrupt control */
   5194 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5195 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5196 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5197 
   5198 			/*
   5199 			 * workaround issue with spurious interrupts
   5200 			 * in MSI-X mode.
   5201 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5202 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5203 			 */
   5204 			reg = CSR_READ(sc, WMREG_RFCTL);
   5205 			reg |= WMREG_RFCTL_ACKDIS;
   5206 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5207 
   5208 			ivar = 0;
   5209 			/* TX and RX */
   5210 			for (i = 0; i < sc->sc_nqueues; i++) {
   5211 				wmq = &sc->sc_queue[i];
   5212 				qid = wmq->wmq_id;
   5213 				qintr_idx = wmq->wmq_intr_idx;
   5214 
   5215 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5216 				    IVAR_TX_MASK_Q_82574(qid));
   5217 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5218 				    IVAR_RX_MASK_Q_82574(qid));
   5219 			}
   5220 			/* Link status */
   5221 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5222 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5223 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5224 		} else {
   5225 			/* Interrupt control */
   5226 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5227 			    | GPIE_EIAME | GPIE_PBA);
   5228 
   5229 			switch (sc->sc_type) {
   5230 			case WM_T_82580:
   5231 			case WM_T_I350:
   5232 			case WM_T_I354:
   5233 			case WM_T_I210:
   5234 			case WM_T_I211:
   5235 				/* TX and RX */
   5236 				for (i = 0; i < sc->sc_nqueues; i++) {
   5237 					wmq = &sc->sc_queue[i];
   5238 					qid = wmq->wmq_id;
   5239 					qintr_idx = wmq->wmq_intr_idx;
   5240 
   5241 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5242 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5243 					ivar |= __SHIFTIN((qintr_idx
   5244 						| IVAR_VALID),
   5245 					    IVAR_TX_MASK_Q(qid));
   5246 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5247 					ivar |= __SHIFTIN((qintr_idx
   5248 						| IVAR_VALID),
   5249 					    IVAR_RX_MASK_Q(qid));
   5250 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5251 				}
   5252 				break;
   5253 			case WM_T_82576:
   5254 				/* TX and RX */
   5255 				for (i = 0; i < sc->sc_nqueues; i++) {
   5256 					wmq = &sc->sc_queue[i];
   5257 					qid = wmq->wmq_id;
   5258 					qintr_idx = wmq->wmq_intr_idx;
   5259 
   5260 					ivar = CSR_READ(sc,
   5261 					    WMREG_IVAR_Q_82576(qid));
   5262 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5263 					ivar |= __SHIFTIN((qintr_idx
   5264 						| IVAR_VALID),
   5265 					    IVAR_TX_MASK_Q_82576(qid));
   5266 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5267 					ivar |= __SHIFTIN((qintr_idx
   5268 						| IVAR_VALID),
   5269 					    IVAR_RX_MASK_Q_82576(qid));
   5270 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5271 					    ivar);
   5272 				}
   5273 				break;
   5274 			default:
   5275 				break;
   5276 			}
   5277 
   5278 			/* Link status */
   5279 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5280 			    IVAR_MISC_OTHER);
   5281 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5282 		}
   5283 
   5284 		if (wm_is_using_multiqueue(sc)) {
   5285 			wm_init_rss(sc);
   5286 
   5287 			/*
   5288 			** NOTE: Receive Full-Packet Checksum Offload
   5289 			** is mutually exclusive with Multiqueue. However
   5290 			** this is not the same as TCP/IP checksums which
   5291 			** still work.
   5292 			*/
   5293 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5294 			reg |= RXCSUM_PCSD;
   5295 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5296 		}
   5297 	}
   5298 
   5299 	/* Set up the interrupt registers. */
   5300 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5301 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5302 	    ICR_RXO | ICR_RXT0;
   5303 	if (wm_is_using_msix(sc)) {
   5304 		uint32_t mask;
   5305 		struct wm_queue *wmq;
   5306 
   5307 		switch (sc->sc_type) {
   5308 		case WM_T_82574:
   5309 			mask = 0;
   5310 			for (i = 0; i < sc->sc_nqueues; i++) {
   5311 				wmq = &sc->sc_queue[i];
   5312 				mask |= ICR_TXQ(wmq->wmq_id);
   5313 				mask |= ICR_RXQ(wmq->wmq_id);
   5314 			}
   5315 			mask |= ICR_OTHER;
   5316 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5317 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5318 			break;
   5319 		default:
   5320 			if (sc->sc_type == WM_T_82575) {
   5321 				mask = 0;
   5322 				for (i = 0; i < sc->sc_nqueues; i++) {
   5323 					wmq = &sc->sc_queue[i];
   5324 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5325 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5326 				}
   5327 				mask |= EITR_OTHER;
   5328 			} else {
   5329 				mask = 0;
   5330 				for (i = 0; i < sc->sc_nqueues; i++) {
   5331 					wmq = &sc->sc_queue[i];
   5332 					mask |= 1 << wmq->wmq_intr_idx;
   5333 				}
   5334 				mask |= 1 << sc->sc_link_intr_idx;
   5335 			}
   5336 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5337 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5338 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5339 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5340 			break;
   5341 		}
   5342 	} else
   5343 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5344 
   5345 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5346 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5347 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5348 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   5349 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5350 		reg |= KABGTXD_BGSQLBIAS;
   5351 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5352 	}
   5353 
   5354 	/* Set up the inter-packet gap. */
   5355 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5356 
   5357 	if (sc->sc_type >= WM_T_82543) {
   5358 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5359 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5360 			wm_itrs_writereg(sc, wmq);
   5361 		}
   5362 		/*
   5363 		 * Link interrupts occur much less than TX
   5364 		 * interrupts and RX interrupts. So, we don't
   5365 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5366 		 * FreeBSD's if_igb.
   5367 		 */
   5368 	}
   5369 
   5370 	/* Set the VLAN ethernetype. */
   5371 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5372 
   5373 	/*
   5374 	 * Set up the transmit control register; we start out with
   5375 	 * a collision distance suitable for FDX, but update it whe
   5376 	 * we resolve the media type.
   5377 	 */
   5378 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5379 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5380 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5381 	if (sc->sc_type >= WM_T_82571)
   5382 		sc->sc_tctl |= TCTL_MULR;
   5383 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5384 
   5385 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5386 		/* Write TDT after TCTL.EN is set. See the document. */
   5387 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5388 	}
   5389 
   5390 	if (sc->sc_type == WM_T_80003) {
   5391 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5392 		reg &= ~TCTL_EXT_GCEX_MASK;
   5393 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5394 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5395 	}
   5396 
   5397 	/* Set the media. */
   5398 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5399 		goto out;
   5400 
   5401 	/* Configure for OS presence */
   5402 	wm_init_manageability(sc);
   5403 
   5404 	/*
   5405 	 * Set up the receive control register; we actually program
   5406 	 * the register when we set the receive filter.  Use multicast
   5407 	 * address offset type 0.
   5408 	 *
   5409 	 * Only the i82544 has the ability to strip the incoming
   5410 	 * CRC, so we don't enable that feature.
   5411 	 */
   5412 	sc->sc_mchash_type = 0;
   5413 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5414 	    | RCTL_MO(sc->sc_mchash_type);
   5415 
   5416 	/*
   5417 	 * 82574 use one buffer extended Rx descriptor.
   5418 	 */
   5419 	if (sc->sc_type == WM_T_82574)
   5420 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5421 
   5422 	/*
   5423 	 * The I350 has a bug where it always strips the CRC whether
   5424 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5425 	 */
   5426 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5427 	    || (sc->sc_type == WM_T_I210))
   5428 		sc->sc_rctl |= RCTL_SECRC;
   5429 
   5430 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5431 	    && (ifp->if_mtu > ETHERMTU)) {
   5432 		sc->sc_rctl |= RCTL_LPE;
   5433 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5434 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5435 	}
   5436 
   5437 	if (MCLBYTES == 2048) {
   5438 		sc->sc_rctl |= RCTL_2k;
   5439 	} else {
   5440 		if (sc->sc_type >= WM_T_82543) {
   5441 			switch (MCLBYTES) {
   5442 			case 4096:
   5443 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5444 				break;
   5445 			case 8192:
   5446 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5447 				break;
   5448 			case 16384:
   5449 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5450 				break;
   5451 			default:
   5452 				panic("wm_init: MCLBYTES %d unsupported",
   5453 				    MCLBYTES);
   5454 				break;
   5455 			}
   5456 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5457 	}
   5458 
   5459 	/* Set the receive filter. */
   5460 	wm_set_filter(sc);
   5461 
   5462 	/* Enable ECC */
   5463 	switch (sc->sc_type) {
   5464 	case WM_T_82571:
   5465 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5466 		reg |= PBA_ECC_CORR_EN;
   5467 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5468 		break;
   5469 	case WM_T_PCH_LPT:
   5470 	case WM_T_PCH_SPT:
   5471 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5472 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5473 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5474 
   5475 		sc->sc_ctrl |= CTRL_MEHE;
   5476 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5477 		break;
   5478 	default:
   5479 		break;
   5480 	}
   5481 
   5482 	/* On 575 and later set RDT only if RX enabled */
   5483 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5484 		int qidx;
   5485 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5486 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5487 			for (i = 0; i < WM_NRXDESC; i++) {
   5488 				mutex_enter(rxq->rxq_lock);
   5489 				wm_init_rxdesc(rxq, i);
   5490 				mutex_exit(rxq->rxq_lock);
   5491 
   5492 			}
   5493 		}
   5494 	}
   5495 
   5496 	wm_turnon(sc);
   5497 
   5498 	/* Start the one second link check clock. */
   5499 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5500 
   5501 	/* ...all done! */
   5502 	ifp->if_flags |= IFF_RUNNING;
   5503 	ifp->if_flags &= ~IFF_OACTIVE;
   5504 
   5505  out:
   5506 	sc->sc_if_flags = ifp->if_flags;
   5507 	if (error)
   5508 		log(LOG_ERR, "%s: interface not running\n",
   5509 		    device_xname(sc->sc_dev));
   5510 	return error;
   5511 }
   5512 
   5513 /*
   5514  * wm_stop:		[ifnet interface function]
   5515  *
   5516  *	Stop transmission on the interface.
   5517  */
   5518 static void
   5519 wm_stop(struct ifnet *ifp, int disable)
   5520 {
   5521 	struct wm_softc *sc = ifp->if_softc;
   5522 
   5523 	WM_CORE_LOCK(sc);
   5524 	wm_stop_locked(ifp, disable);
   5525 	WM_CORE_UNLOCK(sc);
   5526 }
   5527 
   5528 static void
   5529 wm_stop_locked(struct ifnet *ifp, int disable)
   5530 {
   5531 	struct wm_softc *sc = ifp->if_softc;
   5532 	struct wm_txsoft *txs;
   5533 	int i, qidx;
   5534 
   5535 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5536 		device_xname(sc->sc_dev), __func__));
   5537 	KASSERT(WM_CORE_LOCKED(sc));
   5538 
   5539 	wm_turnoff(sc);
   5540 
   5541 	/* Stop the one second clock. */
   5542 	callout_stop(&sc->sc_tick_ch);
   5543 
   5544 	/* Stop the 82547 Tx FIFO stall check timer. */
   5545 	if (sc->sc_type == WM_T_82547)
   5546 		callout_stop(&sc->sc_txfifo_ch);
   5547 
   5548 	if (sc->sc_flags & WM_F_HAS_MII) {
   5549 		/* Down the MII. */
   5550 		mii_down(&sc->sc_mii);
   5551 	} else {
   5552 #if 0
   5553 		/* Should we clear PHY's status properly? */
   5554 		wm_reset(sc);
   5555 #endif
   5556 	}
   5557 
   5558 	/* Stop the transmit and receive processes. */
   5559 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5560 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5561 	sc->sc_rctl &= ~RCTL_EN;
   5562 
   5563 	/*
   5564 	 * Clear the interrupt mask to ensure the device cannot assert its
   5565 	 * interrupt line.
   5566 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5567 	 * service any currently pending or shared interrupt.
   5568 	 */
   5569 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5570 	sc->sc_icr = 0;
   5571 	if (wm_is_using_msix(sc)) {
   5572 		if (sc->sc_type != WM_T_82574) {
   5573 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5574 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5575 		} else
   5576 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5577 	}
   5578 
   5579 	/* Release any queued transmit buffers. */
   5580 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5581 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5582 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5583 		mutex_enter(txq->txq_lock);
   5584 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5585 			txs = &txq->txq_soft[i];
   5586 			if (txs->txs_mbuf != NULL) {
   5587 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5588 				m_freem(txs->txs_mbuf);
   5589 				txs->txs_mbuf = NULL;
   5590 			}
   5591 		}
   5592 		mutex_exit(txq->txq_lock);
   5593 	}
   5594 
   5595 	/* Mark the interface as down and cancel the watchdog timer. */
   5596 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5597 	ifp->if_timer = 0;
   5598 
   5599 	if (disable) {
   5600 		for (i = 0; i < sc->sc_nqueues; i++) {
   5601 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5602 			mutex_enter(rxq->rxq_lock);
   5603 			wm_rxdrain(rxq);
   5604 			mutex_exit(rxq->rxq_lock);
   5605 		}
   5606 	}
   5607 
   5608 #if 0 /* notyet */
   5609 	if (sc->sc_type >= WM_T_82544)
   5610 		CSR_WRITE(sc, WMREG_WUC, 0);
   5611 #endif
   5612 }
   5613 
   5614 static void
   5615 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5616 {
   5617 	struct mbuf *m;
   5618 	int i;
   5619 
   5620 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5621 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5622 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5623 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5624 		    m->m_data, m->m_len, m->m_flags);
   5625 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5626 	    i, i == 1 ? "" : "s");
   5627 }
   5628 
   5629 /*
   5630  * wm_82547_txfifo_stall:
   5631  *
   5632  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5633  *	reset the FIFO pointers, and restart packet transmission.
   5634  */
   5635 static void
   5636 wm_82547_txfifo_stall(void *arg)
   5637 {
   5638 	struct wm_softc *sc = arg;
   5639 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5640 
   5641 	mutex_enter(txq->txq_lock);
   5642 
   5643 	if (txq->txq_stopping)
   5644 		goto out;
   5645 
   5646 	if (txq->txq_fifo_stall) {
   5647 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5648 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5649 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5650 			/*
   5651 			 * Packets have drained.  Stop transmitter, reset
   5652 			 * FIFO pointers, restart transmitter, and kick
   5653 			 * the packet queue.
   5654 			 */
   5655 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5656 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5657 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5658 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5659 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5660 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5661 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5662 			CSR_WRITE_FLUSH(sc);
   5663 
   5664 			txq->txq_fifo_head = 0;
   5665 			txq->txq_fifo_stall = 0;
   5666 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5667 		} else {
   5668 			/*
   5669 			 * Still waiting for packets to drain; try again in
   5670 			 * another tick.
   5671 			 */
   5672 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5673 		}
   5674 	}
   5675 
   5676 out:
   5677 	mutex_exit(txq->txq_lock);
   5678 }
   5679 
   5680 /*
   5681  * wm_82547_txfifo_bugchk:
   5682  *
   5683  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5684  *	prevent enqueueing a packet that would wrap around the end
   5685  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5686  *
   5687  *	We do this by checking the amount of space before the end
   5688  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5689  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5690  *	the internal FIFO pointers to the beginning, and restart
   5691  *	transmission on the interface.
   5692  */
   5693 #define	WM_FIFO_HDR		0x10
   5694 #define	WM_82547_PAD_LEN	0x3e0
   5695 static int
   5696 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5697 {
   5698 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5699 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5700 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5701 
   5702 	/* Just return if already stalled. */
   5703 	if (txq->txq_fifo_stall)
   5704 		return 1;
   5705 
   5706 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5707 		/* Stall only occurs in half-duplex mode. */
   5708 		goto send_packet;
   5709 	}
   5710 
   5711 	if (len >= WM_82547_PAD_LEN + space) {
   5712 		txq->txq_fifo_stall = 1;
   5713 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5714 		return 1;
   5715 	}
   5716 
   5717  send_packet:
   5718 	txq->txq_fifo_head += len;
   5719 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5720 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5721 
   5722 	return 0;
   5723 }
   5724 
   5725 static int
   5726 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5727 {
   5728 	int error;
   5729 
   5730 	/*
   5731 	 * Allocate the control data structures, and create and load the
   5732 	 * DMA map for it.
   5733 	 *
   5734 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5735 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5736 	 * both sets within the same 4G segment.
   5737 	 */
   5738 	if (sc->sc_type < WM_T_82544)
   5739 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5740 	else
   5741 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5742 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5743 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5744 	else
   5745 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5746 
   5747 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5748 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5749 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5750 		aprint_error_dev(sc->sc_dev,
   5751 		    "unable to allocate TX control data, error = %d\n",
   5752 		    error);
   5753 		goto fail_0;
   5754 	}
   5755 
   5756 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5757 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5758 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5759 		aprint_error_dev(sc->sc_dev,
   5760 		    "unable to map TX control data, error = %d\n", error);
   5761 		goto fail_1;
   5762 	}
   5763 
   5764 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5765 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5766 		aprint_error_dev(sc->sc_dev,
   5767 		    "unable to create TX control data DMA map, error = %d\n",
   5768 		    error);
   5769 		goto fail_2;
   5770 	}
   5771 
   5772 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5773 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5774 		aprint_error_dev(sc->sc_dev,
   5775 		    "unable to load TX control data DMA map, error = %d\n",
   5776 		    error);
   5777 		goto fail_3;
   5778 	}
   5779 
   5780 	return 0;
   5781 
   5782  fail_3:
   5783 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5784  fail_2:
   5785 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5786 	    WM_TXDESCS_SIZE(txq));
   5787  fail_1:
   5788 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5789  fail_0:
   5790 	return error;
   5791 }
   5792 
   5793 static void
   5794 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5795 {
   5796 
   5797 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5798 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5799 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5800 	    WM_TXDESCS_SIZE(txq));
   5801 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5802 }
   5803 
   5804 static int
   5805 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5806 {
   5807 	int error;
   5808 	size_t rxq_descs_size;
   5809 
   5810 	/*
   5811 	 * Allocate the control data structures, and create and load the
   5812 	 * DMA map for it.
   5813 	 *
   5814 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5815 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5816 	 * both sets within the same 4G segment.
   5817 	 */
   5818 	rxq->rxq_ndesc = WM_NRXDESC;
   5819 	if (sc->sc_type == WM_T_82574)
   5820 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   5821 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5822 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   5823 	else
   5824 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   5825 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   5826 
   5827 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   5828 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5829 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5830 		aprint_error_dev(sc->sc_dev,
   5831 		    "unable to allocate RX control data, error = %d\n",
   5832 		    error);
   5833 		goto fail_0;
   5834 	}
   5835 
   5836 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5837 		    rxq->rxq_desc_rseg, rxq_descs_size,
   5838 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5839 		aprint_error_dev(sc->sc_dev,
   5840 		    "unable to map RX control data, error = %d\n", error);
   5841 		goto fail_1;
   5842 	}
   5843 
   5844 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   5845 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5846 		aprint_error_dev(sc->sc_dev,
   5847 		    "unable to create RX control data DMA map, error = %d\n",
   5848 		    error);
   5849 		goto fail_2;
   5850 	}
   5851 
   5852 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5853 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   5854 		aprint_error_dev(sc->sc_dev,
   5855 		    "unable to load RX control data DMA map, error = %d\n",
   5856 		    error);
   5857 		goto fail_3;
   5858 	}
   5859 
   5860 	return 0;
   5861 
   5862  fail_3:
   5863 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5864  fail_2:
   5865 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5866 	    rxq_descs_size);
   5867  fail_1:
   5868 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5869  fail_0:
   5870 	return error;
   5871 }
   5872 
   5873 static void
   5874 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5875 {
   5876 
   5877 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5878 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5879 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5880 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   5881 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5882 }
   5883 
   5884 
   5885 static int
   5886 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5887 {
   5888 	int i, error;
   5889 
   5890 	/* Create the transmit buffer DMA maps. */
   5891 	WM_TXQUEUELEN(txq) =
   5892 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5893 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5894 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5895 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5896 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5897 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5898 			aprint_error_dev(sc->sc_dev,
   5899 			    "unable to create Tx DMA map %d, error = %d\n",
   5900 			    i, error);
   5901 			goto fail;
   5902 		}
   5903 	}
   5904 
   5905 	return 0;
   5906 
   5907  fail:
   5908 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5909 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5910 			bus_dmamap_destroy(sc->sc_dmat,
   5911 			    txq->txq_soft[i].txs_dmamap);
   5912 	}
   5913 	return error;
   5914 }
   5915 
   5916 static void
   5917 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5918 {
   5919 	int i;
   5920 
   5921 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5922 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5923 			bus_dmamap_destroy(sc->sc_dmat,
   5924 			    txq->txq_soft[i].txs_dmamap);
   5925 	}
   5926 }
   5927 
   5928 static int
   5929 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5930 {
   5931 	int i, error;
   5932 
   5933 	/* Create the receive buffer DMA maps. */
   5934 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5935 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5936 			    MCLBYTES, 0, 0,
   5937 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5938 			aprint_error_dev(sc->sc_dev,
   5939 			    "unable to create Rx DMA map %d error = %d\n",
   5940 			    i, error);
   5941 			goto fail;
   5942 		}
   5943 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5944 	}
   5945 
   5946 	return 0;
   5947 
   5948  fail:
   5949 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5950 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5951 			bus_dmamap_destroy(sc->sc_dmat,
   5952 			    rxq->rxq_soft[i].rxs_dmamap);
   5953 	}
   5954 	return error;
   5955 }
   5956 
   5957 static void
   5958 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5959 {
   5960 	int i;
   5961 
   5962 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5963 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5964 			bus_dmamap_destroy(sc->sc_dmat,
   5965 			    rxq->rxq_soft[i].rxs_dmamap);
   5966 	}
   5967 }
   5968 
   5969 /*
   5970  * wm_alloc_quques:
   5971  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5972  */
   5973 static int
   5974 wm_alloc_txrx_queues(struct wm_softc *sc)
   5975 {
   5976 	int i, error, tx_done, rx_done;
   5977 
   5978 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5979 	    KM_SLEEP);
   5980 	if (sc->sc_queue == NULL) {
   5981 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5982 		error = ENOMEM;
   5983 		goto fail_0;
   5984 	}
   5985 
   5986 	/*
   5987 	 * For transmission
   5988 	 */
   5989 	error = 0;
   5990 	tx_done = 0;
   5991 	for (i = 0; i < sc->sc_nqueues; i++) {
   5992 #ifdef WM_EVENT_COUNTERS
   5993 		int j;
   5994 		const char *xname;
   5995 #endif
   5996 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5997 		txq->txq_sc = sc;
   5998 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5999 
   6000 		error = wm_alloc_tx_descs(sc, txq);
   6001 		if (error)
   6002 			break;
   6003 		error = wm_alloc_tx_buffer(sc, txq);
   6004 		if (error) {
   6005 			wm_free_tx_descs(sc, txq);
   6006 			break;
   6007 		}
   6008 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6009 		if (txq->txq_interq == NULL) {
   6010 			wm_free_tx_descs(sc, txq);
   6011 			wm_free_tx_buffer(sc, txq);
   6012 			error = ENOMEM;
   6013 			break;
   6014 		}
   6015 
   6016 #ifdef WM_EVENT_COUNTERS
   6017 		xname = device_xname(sc->sc_dev);
   6018 
   6019 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6020 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6021 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6022 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6023 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6024 
   6025 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6026 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6027 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6028 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6029 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6030 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6031 
   6032 		for (j = 0; j < WM_NTXSEGS; j++) {
   6033 			snprintf(txq->txq_txseg_evcnt_names[j],
   6034 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6035 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6036 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6037 		}
   6038 
   6039 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6040 
   6041 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6042 #endif /* WM_EVENT_COUNTERS */
   6043 
   6044 		tx_done++;
   6045 	}
   6046 	if (error)
   6047 		goto fail_1;
   6048 
   6049 	/*
   6050 	 * For recieve
   6051 	 */
   6052 	error = 0;
   6053 	rx_done = 0;
   6054 	for (i = 0; i < sc->sc_nqueues; i++) {
   6055 #ifdef WM_EVENT_COUNTERS
   6056 		const char *xname;
   6057 #endif
   6058 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6059 		rxq->rxq_sc = sc;
   6060 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6061 
   6062 		error = wm_alloc_rx_descs(sc, rxq);
   6063 		if (error)
   6064 			break;
   6065 
   6066 		error = wm_alloc_rx_buffer(sc, rxq);
   6067 		if (error) {
   6068 			wm_free_rx_descs(sc, rxq);
   6069 			break;
   6070 		}
   6071 
   6072 #ifdef WM_EVENT_COUNTERS
   6073 		xname = device_xname(sc->sc_dev);
   6074 
   6075 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6076 
   6077 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6078 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6079 #endif /* WM_EVENT_COUNTERS */
   6080 
   6081 		rx_done++;
   6082 	}
   6083 	if (error)
   6084 		goto fail_2;
   6085 
   6086 	return 0;
   6087 
   6088  fail_2:
   6089 	for (i = 0; i < rx_done; i++) {
   6090 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6091 		wm_free_rx_buffer(sc, rxq);
   6092 		wm_free_rx_descs(sc, rxq);
   6093 		if (rxq->rxq_lock)
   6094 			mutex_obj_free(rxq->rxq_lock);
   6095 	}
   6096  fail_1:
   6097 	for (i = 0; i < tx_done; i++) {
   6098 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6099 		pcq_destroy(txq->txq_interq);
   6100 		wm_free_tx_buffer(sc, txq);
   6101 		wm_free_tx_descs(sc, txq);
   6102 		if (txq->txq_lock)
   6103 			mutex_obj_free(txq->txq_lock);
   6104 	}
   6105 
   6106 	kmem_free(sc->sc_queue,
   6107 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6108  fail_0:
   6109 	return error;
   6110 }
   6111 
   6112 /*
   6113  * wm_free_quques:
   6114  *	Free {tx,rx}descs and {tx,rx} buffers
   6115  */
   6116 static void
   6117 wm_free_txrx_queues(struct wm_softc *sc)
   6118 {
   6119 	int i;
   6120 
   6121 	for (i = 0; i < sc->sc_nqueues; i++) {
   6122 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6123 
   6124 #ifdef WM_EVENT_COUNTERS
   6125 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6126 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6127 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6128 #endif /* WM_EVENT_COUNTERS */
   6129 
   6130 		wm_free_rx_buffer(sc, rxq);
   6131 		wm_free_rx_descs(sc, rxq);
   6132 		if (rxq->rxq_lock)
   6133 			mutex_obj_free(rxq->rxq_lock);
   6134 	}
   6135 
   6136 	for (i = 0; i < sc->sc_nqueues; i++) {
   6137 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6138 		struct mbuf *m;
   6139 #ifdef WM_EVENT_COUNTERS
   6140 		int j;
   6141 
   6142 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6143 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6144 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6145 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6146 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6147 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6148 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6149 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6150 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6151 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6152 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6153 
   6154 		for (j = 0; j < WM_NTXSEGS; j++)
   6155 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6156 
   6157 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6158 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6159 #endif /* WM_EVENT_COUNTERS */
   6160 
   6161 		/* drain txq_interq */
   6162 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6163 			m_freem(m);
   6164 		pcq_destroy(txq->txq_interq);
   6165 
   6166 		wm_free_tx_buffer(sc, txq);
   6167 		wm_free_tx_descs(sc, txq);
   6168 		if (txq->txq_lock)
   6169 			mutex_obj_free(txq->txq_lock);
   6170 	}
   6171 
   6172 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6173 }
   6174 
   6175 static void
   6176 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6177 {
   6178 
   6179 	KASSERT(mutex_owned(txq->txq_lock));
   6180 
   6181 	/* Initialize the transmit descriptor ring. */
   6182 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6183 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6184 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6185 	txq->txq_free = WM_NTXDESC(txq);
   6186 	txq->txq_next = 0;
   6187 }
   6188 
   6189 static void
   6190 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6191     struct wm_txqueue *txq)
   6192 {
   6193 
   6194 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6195 		device_xname(sc->sc_dev), __func__));
   6196 	KASSERT(mutex_owned(txq->txq_lock));
   6197 
   6198 	if (sc->sc_type < WM_T_82543) {
   6199 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6200 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6201 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6202 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6203 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6204 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6205 	} else {
   6206 		int qid = wmq->wmq_id;
   6207 
   6208 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6209 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6210 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6211 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6212 
   6213 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6214 			/*
   6215 			 * Don't write TDT before TCTL.EN is set.
   6216 			 * See the document.
   6217 			 */
   6218 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6219 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6220 			    | TXDCTL_WTHRESH(0));
   6221 		else {
   6222 			/* XXX should update with AIM? */
   6223 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6224 			if (sc->sc_type >= WM_T_82540) {
   6225 				/* should be same */
   6226 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6227 			}
   6228 
   6229 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6230 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6231 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6232 		}
   6233 	}
   6234 }
   6235 
   6236 static void
   6237 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6238 {
   6239 	int i;
   6240 
   6241 	KASSERT(mutex_owned(txq->txq_lock));
   6242 
   6243 	/* Initialize the transmit job descriptors. */
   6244 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6245 		txq->txq_soft[i].txs_mbuf = NULL;
   6246 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6247 	txq->txq_snext = 0;
   6248 	txq->txq_sdirty = 0;
   6249 }
   6250 
   6251 static void
   6252 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6253     struct wm_txqueue *txq)
   6254 {
   6255 
   6256 	KASSERT(mutex_owned(txq->txq_lock));
   6257 
   6258 	/*
   6259 	 * Set up some register offsets that are different between
   6260 	 * the i82542 and the i82543 and later chips.
   6261 	 */
   6262 	if (sc->sc_type < WM_T_82543)
   6263 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6264 	else
   6265 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6266 
   6267 	wm_init_tx_descs(sc, txq);
   6268 	wm_init_tx_regs(sc, wmq, txq);
   6269 	wm_init_tx_buffer(sc, txq);
   6270 }
   6271 
   6272 static void
   6273 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6274     struct wm_rxqueue *rxq)
   6275 {
   6276 
   6277 	KASSERT(mutex_owned(rxq->rxq_lock));
   6278 
   6279 	/*
   6280 	 * Initialize the receive descriptor and receive job
   6281 	 * descriptor rings.
   6282 	 */
   6283 	if (sc->sc_type < WM_T_82543) {
   6284 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6285 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6286 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6287 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6288 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6289 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6290 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6291 
   6292 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6293 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6294 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6295 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6296 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6297 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6298 	} else {
   6299 		int qid = wmq->wmq_id;
   6300 
   6301 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6302 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6303 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6304 
   6305 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6306 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6307 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6308 
   6309 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6310 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6311 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6312 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6313 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6314 			    | RXDCTL_WTHRESH(1));
   6315 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6316 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6317 		} else {
   6318 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6319 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6320 			/* XXX should update with AIM? */
   6321 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6322 			/* MUST be same */
   6323 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6324 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6325 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6326 		}
   6327 	}
   6328 }
   6329 
   6330 static int
   6331 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6332 {
   6333 	struct wm_rxsoft *rxs;
   6334 	int error, i;
   6335 
   6336 	KASSERT(mutex_owned(rxq->rxq_lock));
   6337 
   6338 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6339 		rxs = &rxq->rxq_soft[i];
   6340 		if (rxs->rxs_mbuf == NULL) {
   6341 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6342 				log(LOG_ERR, "%s: unable to allocate or map "
   6343 				    "rx buffer %d, error = %d\n",
   6344 				    device_xname(sc->sc_dev), i, error);
   6345 				/*
   6346 				 * XXX Should attempt to run with fewer receive
   6347 				 * XXX buffers instead of just failing.
   6348 				 */
   6349 				wm_rxdrain(rxq);
   6350 				return ENOMEM;
   6351 			}
   6352 		} else {
   6353 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6354 				wm_init_rxdesc(rxq, i);
   6355 			/*
   6356 			 * For 82575 and newer device, the RX descriptors
   6357 			 * must be initialized after the setting of RCTL.EN in
   6358 			 * wm_set_filter()
   6359 			 */
   6360 		}
   6361 	}
   6362 	rxq->rxq_ptr = 0;
   6363 	rxq->rxq_discard = 0;
   6364 	WM_RXCHAIN_RESET(rxq);
   6365 
   6366 	return 0;
   6367 }
   6368 
   6369 static int
   6370 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6371     struct wm_rxqueue *rxq)
   6372 {
   6373 
   6374 	KASSERT(mutex_owned(rxq->rxq_lock));
   6375 
   6376 	/*
   6377 	 * Set up some register offsets that are different between
   6378 	 * the i82542 and the i82543 and later chips.
   6379 	 */
   6380 	if (sc->sc_type < WM_T_82543)
   6381 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6382 	else
   6383 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6384 
   6385 	wm_init_rx_regs(sc, wmq, rxq);
   6386 	return wm_init_rx_buffer(sc, rxq);
   6387 }
   6388 
   6389 /*
   6390  * wm_init_quques:
   6391  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6392  */
   6393 static int
   6394 wm_init_txrx_queues(struct wm_softc *sc)
   6395 {
   6396 	int i, error = 0;
   6397 
   6398 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6399 		device_xname(sc->sc_dev), __func__));
   6400 
   6401 	for (i = 0; i < sc->sc_nqueues; i++) {
   6402 		struct wm_queue *wmq = &sc->sc_queue[i];
   6403 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6404 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6405 
   6406 		/*
   6407 		 * TODO
   6408 		 * Currently, use constant variable instead of AIM.
   6409 		 * Furthermore, the interrupt interval of multiqueue which use
   6410 		 * polling mode is less than default value.
   6411 		 * More tuning and AIM are required.
   6412 		 */
   6413 		if (wm_is_using_multiqueue(sc))
   6414 			wmq->wmq_itr = 50;
   6415 		else
   6416 			wmq->wmq_itr = sc->sc_itr_init;
   6417 		wmq->wmq_set_itr = true;
   6418 
   6419 		mutex_enter(txq->txq_lock);
   6420 		wm_init_tx_queue(sc, wmq, txq);
   6421 		mutex_exit(txq->txq_lock);
   6422 
   6423 		mutex_enter(rxq->rxq_lock);
   6424 		error = wm_init_rx_queue(sc, wmq, rxq);
   6425 		mutex_exit(rxq->rxq_lock);
   6426 		if (error)
   6427 			break;
   6428 	}
   6429 
   6430 	return error;
   6431 }
   6432 
   6433 /*
   6434  * wm_tx_offload:
   6435  *
   6436  *	Set up TCP/IP checksumming parameters for the
   6437  *	specified packet.
   6438  */
   6439 static int
   6440 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6441     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6442 {
   6443 	struct mbuf *m0 = txs->txs_mbuf;
   6444 	struct livengood_tcpip_ctxdesc *t;
   6445 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6446 	uint32_t ipcse;
   6447 	struct ether_header *eh;
   6448 	int offset, iphl;
   6449 	uint8_t fields;
   6450 
   6451 	/*
   6452 	 * XXX It would be nice if the mbuf pkthdr had offset
   6453 	 * fields for the protocol headers.
   6454 	 */
   6455 
   6456 	eh = mtod(m0, struct ether_header *);
   6457 	switch (htons(eh->ether_type)) {
   6458 	case ETHERTYPE_IP:
   6459 	case ETHERTYPE_IPV6:
   6460 		offset = ETHER_HDR_LEN;
   6461 		break;
   6462 
   6463 	case ETHERTYPE_VLAN:
   6464 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6465 		break;
   6466 
   6467 	default:
   6468 		/*
   6469 		 * Don't support this protocol or encapsulation.
   6470 		 */
   6471 		*fieldsp = 0;
   6472 		*cmdp = 0;
   6473 		return 0;
   6474 	}
   6475 
   6476 	if ((m0->m_pkthdr.csum_flags &
   6477 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6478 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6479 	} else {
   6480 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6481 	}
   6482 	ipcse = offset + iphl - 1;
   6483 
   6484 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6485 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6486 	seg = 0;
   6487 	fields = 0;
   6488 
   6489 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6490 		int hlen = offset + iphl;
   6491 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6492 
   6493 		if (__predict_false(m0->m_len <
   6494 				    (hlen + sizeof(struct tcphdr)))) {
   6495 			/*
   6496 			 * TCP/IP headers are not in the first mbuf; we need
   6497 			 * to do this the slow and painful way.  Let's just
   6498 			 * hope this doesn't happen very often.
   6499 			 */
   6500 			struct tcphdr th;
   6501 
   6502 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6503 
   6504 			m_copydata(m0, hlen, sizeof(th), &th);
   6505 			if (v4) {
   6506 				struct ip ip;
   6507 
   6508 				m_copydata(m0, offset, sizeof(ip), &ip);
   6509 				ip.ip_len = 0;
   6510 				m_copyback(m0,
   6511 				    offset + offsetof(struct ip, ip_len),
   6512 				    sizeof(ip.ip_len), &ip.ip_len);
   6513 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6514 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6515 			} else {
   6516 				struct ip6_hdr ip6;
   6517 
   6518 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6519 				ip6.ip6_plen = 0;
   6520 				m_copyback(m0,
   6521 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6522 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6523 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6524 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6525 			}
   6526 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6527 			    sizeof(th.th_sum), &th.th_sum);
   6528 
   6529 			hlen += th.th_off << 2;
   6530 		} else {
   6531 			/*
   6532 			 * TCP/IP headers are in the first mbuf; we can do
   6533 			 * this the easy way.
   6534 			 */
   6535 			struct tcphdr *th;
   6536 
   6537 			if (v4) {
   6538 				struct ip *ip =
   6539 				    (void *)(mtod(m0, char *) + offset);
   6540 				th = (void *)(mtod(m0, char *) + hlen);
   6541 
   6542 				ip->ip_len = 0;
   6543 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6544 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6545 			} else {
   6546 				struct ip6_hdr *ip6 =
   6547 				    (void *)(mtod(m0, char *) + offset);
   6548 				th = (void *)(mtod(m0, char *) + hlen);
   6549 
   6550 				ip6->ip6_plen = 0;
   6551 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6552 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6553 			}
   6554 			hlen += th->th_off << 2;
   6555 		}
   6556 
   6557 		if (v4) {
   6558 			WM_Q_EVCNT_INCR(txq, txtso);
   6559 			cmdlen |= WTX_TCPIP_CMD_IP;
   6560 		} else {
   6561 			WM_Q_EVCNT_INCR(txq, txtso6);
   6562 			ipcse = 0;
   6563 		}
   6564 		cmd |= WTX_TCPIP_CMD_TSE;
   6565 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6566 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6567 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6568 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6569 	}
   6570 
   6571 	/*
   6572 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6573 	 * offload feature, if we load the context descriptor, we
   6574 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6575 	 */
   6576 
   6577 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6578 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6579 	    WTX_TCPIP_IPCSE(ipcse);
   6580 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6581 		WM_Q_EVCNT_INCR(txq, txipsum);
   6582 		fields |= WTX_IXSM;
   6583 	}
   6584 
   6585 	offset += iphl;
   6586 
   6587 	if (m0->m_pkthdr.csum_flags &
   6588 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6589 		WM_Q_EVCNT_INCR(txq, txtusum);
   6590 		fields |= WTX_TXSM;
   6591 		tucs = WTX_TCPIP_TUCSS(offset) |
   6592 		    WTX_TCPIP_TUCSO(offset +
   6593 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6594 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6595 	} else if ((m0->m_pkthdr.csum_flags &
   6596 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6597 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6598 		fields |= WTX_TXSM;
   6599 		tucs = WTX_TCPIP_TUCSS(offset) |
   6600 		    WTX_TCPIP_TUCSO(offset +
   6601 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6602 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6603 	} else {
   6604 		/* Just initialize it to a valid TCP context. */
   6605 		tucs = WTX_TCPIP_TUCSS(offset) |
   6606 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6607 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6608 	}
   6609 
   6610 	/*
   6611 	 * We don't have to write context descriptor for every packet
   6612 	 * except for 82574. For 82574, we must write context descriptor
   6613 	 * for every packet when we use two descriptor queues.
   6614 	 * It would be overhead to write context descriptor for every packet,
   6615 	 * however it does not cause problems.
   6616 	 */
   6617 	/* Fill in the context descriptor. */
   6618 	t = (struct livengood_tcpip_ctxdesc *)
   6619 	    &txq->txq_descs[txq->txq_next];
   6620 	t->tcpip_ipcs = htole32(ipcs);
   6621 	t->tcpip_tucs = htole32(tucs);
   6622 	t->tcpip_cmdlen = htole32(cmdlen);
   6623 	t->tcpip_seg = htole32(seg);
   6624 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6625 
   6626 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6627 	txs->txs_ndesc++;
   6628 
   6629 	*cmdp = cmd;
   6630 	*fieldsp = fields;
   6631 
   6632 	return 0;
   6633 }
   6634 
   6635 static inline int
   6636 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6637 {
   6638 	struct wm_softc *sc = ifp->if_softc;
   6639 	u_int cpuid = cpu_index(curcpu());
   6640 
   6641 	/*
   6642 	 * Currently, simple distribute strategy.
   6643 	 * TODO:
   6644 	 * distribute by flowid(RSS has value).
   6645 	 */
   6646         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6647 }
   6648 
   6649 /*
   6650  * wm_start:		[ifnet interface function]
   6651  *
   6652  *	Start packet transmission on the interface.
   6653  */
   6654 static void
   6655 wm_start(struct ifnet *ifp)
   6656 {
   6657 	struct wm_softc *sc = ifp->if_softc;
   6658 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6659 
   6660 #ifdef WM_MPSAFE
   6661 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6662 #endif
   6663 	/*
   6664 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6665 	 */
   6666 
   6667 	mutex_enter(txq->txq_lock);
   6668 	if (!txq->txq_stopping)
   6669 		wm_start_locked(ifp);
   6670 	mutex_exit(txq->txq_lock);
   6671 }
   6672 
   6673 static void
   6674 wm_start_locked(struct ifnet *ifp)
   6675 {
   6676 	struct wm_softc *sc = ifp->if_softc;
   6677 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6678 
   6679 	wm_send_common_locked(ifp, txq, false);
   6680 }
   6681 
   6682 static int
   6683 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6684 {
   6685 	int qid;
   6686 	struct wm_softc *sc = ifp->if_softc;
   6687 	struct wm_txqueue *txq;
   6688 
   6689 	qid = wm_select_txqueue(ifp, m);
   6690 	txq = &sc->sc_queue[qid].wmq_txq;
   6691 
   6692 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6693 		m_freem(m);
   6694 		WM_Q_EVCNT_INCR(txq, txdrop);
   6695 		return ENOBUFS;
   6696 	}
   6697 
   6698 	/*
   6699 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6700 	 */
   6701 	ifp->if_obytes += m->m_pkthdr.len;
   6702 	if (m->m_flags & M_MCAST)
   6703 		ifp->if_omcasts++;
   6704 
   6705 	if (mutex_tryenter(txq->txq_lock)) {
   6706 		if (!txq->txq_stopping)
   6707 			wm_transmit_locked(ifp, txq);
   6708 		mutex_exit(txq->txq_lock);
   6709 	}
   6710 
   6711 	return 0;
   6712 }
   6713 
   6714 static void
   6715 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6716 {
   6717 
   6718 	wm_send_common_locked(ifp, txq, true);
   6719 }
   6720 
   6721 static void
   6722 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6723     bool is_transmit)
   6724 {
   6725 	struct wm_softc *sc = ifp->if_softc;
   6726 	struct mbuf *m0;
   6727 	struct m_tag *mtag;
   6728 	struct wm_txsoft *txs;
   6729 	bus_dmamap_t dmamap;
   6730 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6731 	bus_addr_t curaddr;
   6732 	bus_size_t seglen, curlen;
   6733 	uint32_t cksumcmd;
   6734 	uint8_t cksumfields;
   6735 
   6736 	KASSERT(mutex_owned(txq->txq_lock));
   6737 
   6738 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   6739 		return;
   6740 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   6741 		return;
   6742 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6743 		return;
   6744 
   6745 	/* Remember the previous number of free descriptors. */
   6746 	ofree = txq->txq_free;
   6747 
   6748 	/*
   6749 	 * Loop through the send queue, setting up transmit descriptors
   6750 	 * until we drain the queue, or use up all available transmit
   6751 	 * descriptors.
   6752 	 */
   6753 	for (;;) {
   6754 		m0 = NULL;
   6755 
   6756 		/* Get a work queue entry. */
   6757 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6758 			wm_txeof(sc, txq);
   6759 			if (txq->txq_sfree == 0) {
   6760 				DPRINTF(WM_DEBUG_TX,
   6761 				    ("%s: TX: no free job descriptors\n",
   6762 					device_xname(sc->sc_dev)));
   6763 				WM_Q_EVCNT_INCR(txq, txsstall);
   6764 				break;
   6765 			}
   6766 		}
   6767 
   6768 		/* Grab a packet off the queue. */
   6769 		if (is_transmit)
   6770 			m0 = pcq_get(txq->txq_interq);
   6771 		else
   6772 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6773 		if (m0 == NULL)
   6774 			break;
   6775 
   6776 		DPRINTF(WM_DEBUG_TX,
   6777 		    ("%s: TX: have packet to transmit: %p\n",
   6778 		    device_xname(sc->sc_dev), m0));
   6779 
   6780 		txs = &txq->txq_soft[txq->txq_snext];
   6781 		dmamap = txs->txs_dmamap;
   6782 
   6783 		use_tso = (m0->m_pkthdr.csum_flags &
   6784 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6785 
   6786 		/*
   6787 		 * So says the Linux driver:
   6788 		 * The controller does a simple calculation to make sure
   6789 		 * there is enough room in the FIFO before initiating the
   6790 		 * DMA for each buffer.  The calc is:
   6791 		 *	4 = ceil(buffer len / MSS)
   6792 		 * To make sure we don't overrun the FIFO, adjust the max
   6793 		 * buffer len if the MSS drops.
   6794 		 */
   6795 		dmamap->dm_maxsegsz =
   6796 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6797 		    ? m0->m_pkthdr.segsz << 2
   6798 		    : WTX_MAX_LEN;
   6799 
   6800 		/*
   6801 		 * Load the DMA map.  If this fails, the packet either
   6802 		 * didn't fit in the allotted number of segments, or we
   6803 		 * were short on resources.  For the too-many-segments
   6804 		 * case, we simply report an error and drop the packet,
   6805 		 * since we can't sanely copy a jumbo packet to a single
   6806 		 * buffer.
   6807 		 */
   6808 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6809 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6810 		if (error) {
   6811 			if (error == EFBIG) {
   6812 				WM_Q_EVCNT_INCR(txq, txdrop);
   6813 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6814 				    "DMA segments, dropping...\n",
   6815 				    device_xname(sc->sc_dev));
   6816 				wm_dump_mbuf_chain(sc, m0);
   6817 				m_freem(m0);
   6818 				continue;
   6819 			}
   6820 			/*  Short on resources, just stop for now. */
   6821 			DPRINTF(WM_DEBUG_TX,
   6822 			    ("%s: TX: dmamap load failed: %d\n",
   6823 			    device_xname(sc->sc_dev), error));
   6824 			break;
   6825 		}
   6826 
   6827 		segs_needed = dmamap->dm_nsegs;
   6828 		if (use_tso) {
   6829 			/* For sentinel descriptor; see below. */
   6830 			segs_needed++;
   6831 		}
   6832 
   6833 		/*
   6834 		 * Ensure we have enough descriptors free to describe
   6835 		 * the packet.  Note, we always reserve one descriptor
   6836 		 * at the end of the ring due to the semantics of the
   6837 		 * TDT register, plus one more in the event we need
   6838 		 * to load offload context.
   6839 		 */
   6840 		if (segs_needed > txq->txq_free - 2) {
   6841 			/*
   6842 			 * Not enough free descriptors to transmit this
   6843 			 * packet.  We haven't committed anything yet,
   6844 			 * so just unload the DMA map, put the packet
   6845 			 * pack on the queue, and punt.  Notify the upper
   6846 			 * layer that there are no more slots left.
   6847 			 */
   6848 			DPRINTF(WM_DEBUG_TX,
   6849 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6850 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6851 			    segs_needed, txq->txq_free - 1));
   6852 			if (!is_transmit)
   6853 				ifp->if_flags |= IFF_OACTIVE;
   6854 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6855 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6856 			WM_Q_EVCNT_INCR(txq, txdstall);
   6857 			break;
   6858 		}
   6859 
   6860 		/*
   6861 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6862 		 * once we know we can transmit the packet, since we
   6863 		 * do some internal FIFO space accounting here.
   6864 		 */
   6865 		if (sc->sc_type == WM_T_82547 &&
   6866 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6867 			DPRINTF(WM_DEBUG_TX,
   6868 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6869 			    device_xname(sc->sc_dev)));
   6870 			if (!is_transmit)
   6871 				ifp->if_flags |= IFF_OACTIVE;
   6872 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6873 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6874 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6875 			break;
   6876 		}
   6877 
   6878 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6879 
   6880 		DPRINTF(WM_DEBUG_TX,
   6881 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6882 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6883 
   6884 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6885 
   6886 		/*
   6887 		 * Store a pointer to the packet so that we can free it
   6888 		 * later.
   6889 		 *
   6890 		 * Initially, we consider the number of descriptors the
   6891 		 * packet uses the number of DMA segments.  This may be
   6892 		 * incremented by 1 if we do checksum offload (a descriptor
   6893 		 * is used to set the checksum context).
   6894 		 */
   6895 		txs->txs_mbuf = m0;
   6896 		txs->txs_firstdesc = txq->txq_next;
   6897 		txs->txs_ndesc = segs_needed;
   6898 
   6899 		/* Set up offload parameters for this packet. */
   6900 		if (m0->m_pkthdr.csum_flags &
   6901 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6902 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6903 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6904 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   6905 					  &cksumfields) != 0) {
   6906 				/* Error message already displayed. */
   6907 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6908 				continue;
   6909 			}
   6910 		} else {
   6911 			cksumcmd = 0;
   6912 			cksumfields = 0;
   6913 		}
   6914 
   6915 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6916 
   6917 		/* Sync the DMA map. */
   6918 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6919 		    BUS_DMASYNC_PREWRITE);
   6920 
   6921 		/* Initialize the transmit descriptor. */
   6922 		for (nexttx = txq->txq_next, seg = 0;
   6923 		     seg < dmamap->dm_nsegs; seg++) {
   6924 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6925 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6926 			     seglen != 0;
   6927 			     curaddr += curlen, seglen -= curlen,
   6928 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6929 				curlen = seglen;
   6930 
   6931 				/*
   6932 				 * So says the Linux driver:
   6933 				 * Work around for premature descriptor
   6934 				 * write-backs in TSO mode.  Append a
   6935 				 * 4-byte sentinel descriptor.
   6936 				 */
   6937 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6938 				    curlen > 8)
   6939 					curlen -= 4;
   6940 
   6941 				wm_set_dma_addr(
   6942 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6943 				txq->txq_descs[nexttx].wtx_cmdlen
   6944 				    = htole32(cksumcmd | curlen);
   6945 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6946 				    = 0;
   6947 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6948 				    = cksumfields;
   6949 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6950 				lasttx = nexttx;
   6951 
   6952 				DPRINTF(WM_DEBUG_TX,
   6953 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6954 				     "len %#04zx\n",
   6955 				    device_xname(sc->sc_dev), nexttx,
   6956 				    (uint64_t)curaddr, curlen));
   6957 			}
   6958 		}
   6959 
   6960 		KASSERT(lasttx != -1);
   6961 
   6962 		/*
   6963 		 * Set up the command byte on the last descriptor of
   6964 		 * the packet.  If we're in the interrupt delay window,
   6965 		 * delay the interrupt.
   6966 		 */
   6967 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6968 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6969 
   6970 		/*
   6971 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6972 		 * up the descriptor to encapsulate the packet for us.
   6973 		 *
   6974 		 * This is only valid on the last descriptor of the packet.
   6975 		 */
   6976 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6977 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6978 			    htole32(WTX_CMD_VLE);
   6979 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6980 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6981 		}
   6982 
   6983 		txs->txs_lastdesc = lasttx;
   6984 
   6985 		DPRINTF(WM_DEBUG_TX,
   6986 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6987 		    device_xname(sc->sc_dev),
   6988 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6989 
   6990 		/* Sync the descriptors we're using. */
   6991 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6992 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6993 
   6994 		/* Give the packet to the chip. */
   6995 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6996 
   6997 		DPRINTF(WM_DEBUG_TX,
   6998 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6999 
   7000 		DPRINTF(WM_DEBUG_TX,
   7001 		    ("%s: TX: finished transmitting packet, job %d\n",
   7002 		    device_xname(sc->sc_dev), txq->txq_snext));
   7003 
   7004 		/* Advance the tx pointer. */
   7005 		txq->txq_free -= txs->txs_ndesc;
   7006 		txq->txq_next = nexttx;
   7007 
   7008 		txq->txq_sfree--;
   7009 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7010 
   7011 		/* Pass the packet to any BPF listeners. */
   7012 		bpf_mtap(ifp, m0);
   7013 	}
   7014 
   7015 	if (m0 != NULL) {
   7016 		if (!is_transmit)
   7017 			ifp->if_flags |= IFF_OACTIVE;
   7018 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7019 		WM_Q_EVCNT_INCR(txq, txdrop);
   7020 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7021 			__func__));
   7022 		m_freem(m0);
   7023 	}
   7024 
   7025 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7026 		/* No more slots; notify upper layer. */
   7027 		if (!is_transmit)
   7028 			ifp->if_flags |= IFF_OACTIVE;
   7029 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7030 	}
   7031 
   7032 	if (txq->txq_free != ofree) {
   7033 		/* Set a watchdog timer in case the chip flakes out. */
   7034 		ifp->if_timer = 5;
   7035 	}
   7036 }
   7037 
   7038 /*
   7039  * wm_nq_tx_offload:
   7040  *
   7041  *	Set up TCP/IP checksumming parameters for the
   7042  *	specified packet, for NEWQUEUE devices
   7043  */
   7044 static int
   7045 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7046     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7047 {
   7048 	struct mbuf *m0 = txs->txs_mbuf;
   7049 	struct m_tag *mtag;
   7050 	uint32_t vl_len, mssidx, cmdc;
   7051 	struct ether_header *eh;
   7052 	int offset, iphl;
   7053 
   7054 	/*
   7055 	 * XXX It would be nice if the mbuf pkthdr had offset
   7056 	 * fields for the protocol headers.
   7057 	 */
   7058 	*cmdlenp = 0;
   7059 	*fieldsp = 0;
   7060 
   7061 	eh = mtod(m0, struct ether_header *);
   7062 	switch (htons(eh->ether_type)) {
   7063 	case ETHERTYPE_IP:
   7064 	case ETHERTYPE_IPV6:
   7065 		offset = ETHER_HDR_LEN;
   7066 		break;
   7067 
   7068 	case ETHERTYPE_VLAN:
   7069 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7070 		break;
   7071 
   7072 	default:
   7073 		/* Don't support this protocol or encapsulation. */
   7074 		*do_csum = false;
   7075 		return 0;
   7076 	}
   7077 	*do_csum = true;
   7078 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7079 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7080 
   7081 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7082 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7083 
   7084 	if ((m0->m_pkthdr.csum_flags &
   7085 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7086 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7087 	} else {
   7088 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7089 	}
   7090 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7091 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7092 
   7093 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7094 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7095 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7096 		*cmdlenp |= NQTX_CMD_VLE;
   7097 	}
   7098 
   7099 	mssidx = 0;
   7100 
   7101 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7102 		int hlen = offset + iphl;
   7103 		int tcp_hlen;
   7104 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7105 
   7106 		if (__predict_false(m0->m_len <
   7107 				    (hlen + sizeof(struct tcphdr)))) {
   7108 			/*
   7109 			 * TCP/IP headers are not in the first mbuf; we need
   7110 			 * to do this the slow and painful way.  Let's just
   7111 			 * hope this doesn't happen very often.
   7112 			 */
   7113 			struct tcphdr th;
   7114 
   7115 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7116 
   7117 			m_copydata(m0, hlen, sizeof(th), &th);
   7118 			if (v4) {
   7119 				struct ip ip;
   7120 
   7121 				m_copydata(m0, offset, sizeof(ip), &ip);
   7122 				ip.ip_len = 0;
   7123 				m_copyback(m0,
   7124 				    offset + offsetof(struct ip, ip_len),
   7125 				    sizeof(ip.ip_len), &ip.ip_len);
   7126 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7127 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7128 			} else {
   7129 				struct ip6_hdr ip6;
   7130 
   7131 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7132 				ip6.ip6_plen = 0;
   7133 				m_copyback(m0,
   7134 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7135 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7136 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7137 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7138 			}
   7139 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7140 			    sizeof(th.th_sum), &th.th_sum);
   7141 
   7142 			tcp_hlen = th.th_off << 2;
   7143 		} else {
   7144 			/*
   7145 			 * TCP/IP headers are in the first mbuf; we can do
   7146 			 * this the easy way.
   7147 			 */
   7148 			struct tcphdr *th;
   7149 
   7150 			if (v4) {
   7151 				struct ip *ip =
   7152 				    (void *)(mtod(m0, char *) + offset);
   7153 				th = (void *)(mtod(m0, char *) + hlen);
   7154 
   7155 				ip->ip_len = 0;
   7156 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7157 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7158 			} else {
   7159 				struct ip6_hdr *ip6 =
   7160 				    (void *)(mtod(m0, char *) + offset);
   7161 				th = (void *)(mtod(m0, char *) + hlen);
   7162 
   7163 				ip6->ip6_plen = 0;
   7164 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7165 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7166 			}
   7167 			tcp_hlen = th->th_off << 2;
   7168 		}
   7169 		hlen += tcp_hlen;
   7170 		*cmdlenp |= NQTX_CMD_TSE;
   7171 
   7172 		if (v4) {
   7173 			WM_Q_EVCNT_INCR(txq, txtso);
   7174 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7175 		} else {
   7176 			WM_Q_EVCNT_INCR(txq, txtso6);
   7177 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7178 		}
   7179 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7180 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7181 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7182 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7183 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7184 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7185 	} else {
   7186 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7187 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7188 	}
   7189 
   7190 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7191 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7192 		cmdc |= NQTXC_CMD_IP4;
   7193 	}
   7194 
   7195 	if (m0->m_pkthdr.csum_flags &
   7196 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7197 		WM_Q_EVCNT_INCR(txq, txtusum);
   7198 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7199 			cmdc |= NQTXC_CMD_TCP;
   7200 		} else {
   7201 			cmdc |= NQTXC_CMD_UDP;
   7202 		}
   7203 		cmdc |= NQTXC_CMD_IP4;
   7204 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7205 	}
   7206 	if (m0->m_pkthdr.csum_flags &
   7207 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7208 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7209 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7210 			cmdc |= NQTXC_CMD_TCP;
   7211 		} else {
   7212 			cmdc |= NQTXC_CMD_UDP;
   7213 		}
   7214 		cmdc |= NQTXC_CMD_IP6;
   7215 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7216 	}
   7217 
   7218 	/*
   7219 	 * We don't have to write context descriptor for every packet to
   7220 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7221 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7222 	 * controllers.
   7223 	 * It would be overhead to write context descriptor for every packet,
   7224 	 * however it does not cause problems.
   7225 	 */
   7226 	/* Fill in the context descriptor. */
   7227 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7228 	    htole32(vl_len);
   7229 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7230 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7231 	    htole32(cmdc);
   7232 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7233 	    htole32(mssidx);
   7234 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7235 	DPRINTF(WM_DEBUG_TX,
   7236 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7237 	    txq->txq_next, 0, vl_len));
   7238 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7239 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7240 	txs->txs_ndesc++;
   7241 	return 0;
   7242 }
   7243 
   7244 /*
   7245  * wm_nq_start:		[ifnet interface function]
   7246  *
   7247  *	Start packet transmission on the interface for NEWQUEUE devices
   7248  */
   7249 static void
   7250 wm_nq_start(struct ifnet *ifp)
   7251 {
   7252 	struct wm_softc *sc = ifp->if_softc;
   7253 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7254 
   7255 #ifdef WM_MPSAFE
   7256 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7257 #endif
   7258 	/*
   7259 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7260 	 */
   7261 
   7262 	mutex_enter(txq->txq_lock);
   7263 	if (!txq->txq_stopping)
   7264 		wm_nq_start_locked(ifp);
   7265 	mutex_exit(txq->txq_lock);
   7266 }
   7267 
   7268 static void
   7269 wm_nq_start_locked(struct ifnet *ifp)
   7270 {
   7271 	struct wm_softc *sc = ifp->if_softc;
   7272 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7273 
   7274 	wm_nq_send_common_locked(ifp, txq, false);
   7275 }
   7276 
   7277 static int
   7278 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7279 {
   7280 	int qid;
   7281 	struct wm_softc *sc = ifp->if_softc;
   7282 	struct wm_txqueue *txq;
   7283 
   7284 	qid = wm_select_txqueue(ifp, m);
   7285 	txq = &sc->sc_queue[qid].wmq_txq;
   7286 
   7287 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7288 		m_freem(m);
   7289 		WM_Q_EVCNT_INCR(txq, txdrop);
   7290 		return ENOBUFS;
   7291 	}
   7292 
   7293 	/*
   7294 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7295 	 */
   7296 	ifp->if_obytes += m->m_pkthdr.len;
   7297 	if (m->m_flags & M_MCAST)
   7298 		ifp->if_omcasts++;
   7299 
   7300 	/*
   7301 	 * The situations which this mutex_tryenter() fails at running time
   7302 	 * are below two patterns.
   7303 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7304 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7305 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7306 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7307 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7308 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7309 	 */
   7310 	if (mutex_tryenter(txq->txq_lock)) {
   7311 		if (!txq->txq_stopping)
   7312 			wm_nq_transmit_locked(ifp, txq);
   7313 		mutex_exit(txq->txq_lock);
   7314 	}
   7315 
   7316 	return 0;
   7317 }
   7318 
   7319 static void
   7320 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7321 {
   7322 
   7323 	wm_nq_send_common_locked(ifp, txq, true);
   7324 }
   7325 
   7326 static void
   7327 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7328     bool is_transmit)
   7329 {
   7330 	struct wm_softc *sc = ifp->if_softc;
   7331 	struct mbuf *m0;
   7332 	struct m_tag *mtag;
   7333 	struct wm_txsoft *txs;
   7334 	bus_dmamap_t dmamap;
   7335 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7336 	bool do_csum, sent;
   7337 
   7338 	KASSERT(mutex_owned(txq->txq_lock));
   7339 
   7340 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7341 		return;
   7342 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7343 		return;
   7344 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7345 		return;
   7346 
   7347 	sent = false;
   7348 
   7349 	/*
   7350 	 * Loop through the send queue, setting up transmit descriptors
   7351 	 * until we drain the queue, or use up all available transmit
   7352 	 * descriptors.
   7353 	 */
   7354 	for (;;) {
   7355 		m0 = NULL;
   7356 
   7357 		/* Get a work queue entry. */
   7358 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7359 			wm_txeof(sc, txq);
   7360 			if (txq->txq_sfree == 0) {
   7361 				DPRINTF(WM_DEBUG_TX,
   7362 				    ("%s: TX: no free job descriptors\n",
   7363 					device_xname(sc->sc_dev)));
   7364 				WM_Q_EVCNT_INCR(txq, txsstall);
   7365 				break;
   7366 			}
   7367 		}
   7368 
   7369 		/* Grab a packet off the queue. */
   7370 		if (is_transmit)
   7371 			m0 = pcq_get(txq->txq_interq);
   7372 		else
   7373 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7374 		if (m0 == NULL)
   7375 			break;
   7376 
   7377 		DPRINTF(WM_DEBUG_TX,
   7378 		    ("%s: TX: have packet to transmit: %p\n",
   7379 		    device_xname(sc->sc_dev), m0));
   7380 
   7381 		txs = &txq->txq_soft[txq->txq_snext];
   7382 		dmamap = txs->txs_dmamap;
   7383 
   7384 		/*
   7385 		 * Load the DMA map.  If this fails, the packet either
   7386 		 * didn't fit in the allotted number of segments, or we
   7387 		 * were short on resources.  For the too-many-segments
   7388 		 * case, we simply report an error and drop the packet,
   7389 		 * since we can't sanely copy a jumbo packet to a single
   7390 		 * buffer.
   7391 		 */
   7392 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7393 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7394 		if (error) {
   7395 			if (error == EFBIG) {
   7396 				WM_Q_EVCNT_INCR(txq, txdrop);
   7397 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7398 				    "DMA segments, dropping...\n",
   7399 				    device_xname(sc->sc_dev));
   7400 				wm_dump_mbuf_chain(sc, m0);
   7401 				m_freem(m0);
   7402 				continue;
   7403 			}
   7404 			/* Short on resources, just stop for now. */
   7405 			DPRINTF(WM_DEBUG_TX,
   7406 			    ("%s: TX: dmamap load failed: %d\n",
   7407 			    device_xname(sc->sc_dev), error));
   7408 			break;
   7409 		}
   7410 
   7411 		segs_needed = dmamap->dm_nsegs;
   7412 
   7413 		/*
   7414 		 * Ensure we have enough descriptors free to describe
   7415 		 * the packet.  Note, we always reserve one descriptor
   7416 		 * at the end of the ring due to the semantics of the
   7417 		 * TDT register, plus one more in the event we need
   7418 		 * to load offload context.
   7419 		 */
   7420 		if (segs_needed > txq->txq_free - 2) {
   7421 			/*
   7422 			 * Not enough free descriptors to transmit this
   7423 			 * packet.  We haven't committed anything yet,
   7424 			 * so just unload the DMA map, put the packet
   7425 			 * pack on the queue, and punt.  Notify the upper
   7426 			 * layer that there are no more slots left.
   7427 			 */
   7428 			DPRINTF(WM_DEBUG_TX,
   7429 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7430 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7431 			    segs_needed, txq->txq_free - 1));
   7432 			if (!is_transmit)
   7433 				ifp->if_flags |= IFF_OACTIVE;
   7434 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7435 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7436 			WM_Q_EVCNT_INCR(txq, txdstall);
   7437 			break;
   7438 		}
   7439 
   7440 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7441 
   7442 		DPRINTF(WM_DEBUG_TX,
   7443 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7444 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7445 
   7446 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7447 
   7448 		/*
   7449 		 * Store a pointer to the packet so that we can free it
   7450 		 * later.
   7451 		 *
   7452 		 * Initially, we consider the number of descriptors the
   7453 		 * packet uses the number of DMA segments.  This may be
   7454 		 * incremented by 1 if we do checksum offload (a descriptor
   7455 		 * is used to set the checksum context).
   7456 		 */
   7457 		txs->txs_mbuf = m0;
   7458 		txs->txs_firstdesc = txq->txq_next;
   7459 		txs->txs_ndesc = segs_needed;
   7460 
   7461 		/* Set up offload parameters for this packet. */
   7462 		uint32_t cmdlen, fields, dcmdlen;
   7463 		if (m0->m_pkthdr.csum_flags &
   7464 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7465 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7466 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7467 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7468 			    &do_csum) != 0) {
   7469 				/* Error message already displayed. */
   7470 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7471 				continue;
   7472 			}
   7473 		} else {
   7474 			do_csum = false;
   7475 			cmdlen = 0;
   7476 			fields = 0;
   7477 		}
   7478 
   7479 		/* Sync the DMA map. */
   7480 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7481 		    BUS_DMASYNC_PREWRITE);
   7482 
   7483 		/* Initialize the first transmit descriptor. */
   7484 		nexttx = txq->txq_next;
   7485 		if (!do_csum) {
   7486 			/* setup a legacy descriptor */
   7487 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7488 			    dmamap->dm_segs[0].ds_addr);
   7489 			txq->txq_descs[nexttx].wtx_cmdlen =
   7490 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7491 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7492 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7493 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7494 			    NULL) {
   7495 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7496 				    htole32(WTX_CMD_VLE);
   7497 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7498 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7499 			} else {
   7500 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7501 			}
   7502 			dcmdlen = 0;
   7503 		} else {
   7504 			/* setup an advanced data descriptor */
   7505 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7506 			    htole64(dmamap->dm_segs[0].ds_addr);
   7507 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7508 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7509 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7510 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7511 			    htole32(fields);
   7512 			DPRINTF(WM_DEBUG_TX,
   7513 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7514 			    device_xname(sc->sc_dev), nexttx,
   7515 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7516 			DPRINTF(WM_DEBUG_TX,
   7517 			    ("\t 0x%08x%08x\n", fields,
   7518 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7519 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7520 		}
   7521 
   7522 		lasttx = nexttx;
   7523 		nexttx = WM_NEXTTX(txq, nexttx);
   7524 		/*
   7525 		 * fill in the next descriptors. legacy or adcanced format
   7526 		 * is the same here
   7527 		 */
   7528 		for (seg = 1; seg < dmamap->dm_nsegs;
   7529 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7530 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7531 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7532 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7533 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7534 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7535 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7536 			lasttx = nexttx;
   7537 
   7538 			DPRINTF(WM_DEBUG_TX,
   7539 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7540 			     "len %#04zx\n",
   7541 			    device_xname(sc->sc_dev), nexttx,
   7542 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7543 			    dmamap->dm_segs[seg].ds_len));
   7544 		}
   7545 
   7546 		KASSERT(lasttx != -1);
   7547 
   7548 		/*
   7549 		 * Set up the command byte on the last descriptor of
   7550 		 * the packet.  If we're in the interrupt delay window,
   7551 		 * delay the interrupt.
   7552 		 */
   7553 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7554 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7555 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7556 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7557 
   7558 		txs->txs_lastdesc = lasttx;
   7559 
   7560 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7561 		    device_xname(sc->sc_dev),
   7562 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7563 
   7564 		/* Sync the descriptors we're using. */
   7565 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7566 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7567 
   7568 		/* Give the packet to the chip. */
   7569 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7570 		sent = true;
   7571 
   7572 		DPRINTF(WM_DEBUG_TX,
   7573 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7574 
   7575 		DPRINTF(WM_DEBUG_TX,
   7576 		    ("%s: TX: finished transmitting packet, job %d\n",
   7577 		    device_xname(sc->sc_dev), txq->txq_snext));
   7578 
   7579 		/* Advance the tx pointer. */
   7580 		txq->txq_free -= txs->txs_ndesc;
   7581 		txq->txq_next = nexttx;
   7582 
   7583 		txq->txq_sfree--;
   7584 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7585 
   7586 		/* Pass the packet to any BPF listeners. */
   7587 		bpf_mtap(ifp, m0);
   7588 	}
   7589 
   7590 	if (m0 != NULL) {
   7591 		if (!is_transmit)
   7592 			ifp->if_flags |= IFF_OACTIVE;
   7593 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7594 		WM_Q_EVCNT_INCR(txq, txdrop);
   7595 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7596 			__func__));
   7597 		m_freem(m0);
   7598 	}
   7599 
   7600 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7601 		/* No more slots; notify upper layer. */
   7602 		if (!is_transmit)
   7603 			ifp->if_flags |= IFF_OACTIVE;
   7604 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7605 	}
   7606 
   7607 	if (sent) {
   7608 		/* Set a watchdog timer in case the chip flakes out. */
   7609 		ifp->if_timer = 5;
   7610 	}
   7611 }
   7612 
   7613 static void
   7614 wm_deferred_start_locked(struct wm_txqueue *txq)
   7615 {
   7616 	struct wm_softc *sc = txq->txq_sc;
   7617 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7618 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7619 	int qid = wmq->wmq_id;
   7620 
   7621 	KASSERT(mutex_owned(txq->txq_lock));
   7622 
   7623 	if (txq->txq_stopping) {
   7624 		mutex_exit(txq->txq_lock);
   7625 		return;
   7626 	}
   7627 
   7628 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7629 		/* XXX need for ALTQ or one CPU system */
   7630 		if (qid == 0)
   7631 			wm_nq_start_locked(ifp);
   7632 		wm_nq_transmit_locked(ifp, txq);
   7633 	} else {
   7634 		/* XXX need for ALTQ or one CPU system */
   7635 		if (qid == 0)
   7636 			wm_start_locked(ifp);
   7637 		wm_transmit_locked(ifp, txq);
   7638 	}
   7639 }
   7640 
   7641 /* Interrupt */
   7642 
   7643 /*
   7644  * wm_txeof:
   7645  *
   7646  *	Helper; handle transmit interrupts.
   7647  */
   7648 static int
   7649 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7650 {
   7651 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7652 	struct wm_txsoft *txs;
   7653 	bool processed = false;
   7654 	int count = 0;
   7655 	int i;
   7656 	uint8_t status;
   7657 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7658 
   7659 	KASSERT(mutex_owned(txq->txq_lock));
   7660 
   7661 	if (txq->txq_stopping)
   7662 		return 0;
   7663 
   7664 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7665 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7666 	if (wmq->wmq_id == 0)
   7667 		ifp->if_flags &= ~IFF_OACTIVE;
   7668 
   7669 	/*
   7670 	 * Go through the Tx list and free mbufs for those
   7671 	 * frames which have been transmitted.
   7672 	 */
   7673 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7674 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7675 		txs = &txq->txq_soft[i];
   7676 
   7677 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7678 			device_xname(sc->sc_dev), i));
   7679 
   7680 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7681 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7682 
   7683 		status =
   7684 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7685 		if ((status & WTX_ST_DD) == 0) {
   7686 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7687 			    BUS_DMASYNC_PREREAD);
   7688 			break;
   7689 		}
   7690 
   7691 		processed = true;
   7692 		count++;
   7693 		DPRINTF(WM_DEBUG_TX,
   7694 		    ("%s: TX: job %d done: descs %d..%d\n",
   7695 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7696 		    txs->txs_lastdesc));
   7697 
   7698 		/*
   7699 		 * XXX We should probably be using the statistics
   7700 		 * XXX registers, but I don't know if they exist
   7701 		 * XXX on chips before the i82544.
   7702 		 */
   7703 
   7704 #ifdef WM_EVENT_COUNTERS
   7705 		if (status & WTX_ST_TU)
   7706 			WM_Q_EVCNT_INCR(txq, tu);
   7707 #endif /* WM_EVENT_COUNTERS */
   7708 
   7709 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7710 			ifp->if_oerrors++;
   7711 			if (status & WTX_ST_LC)
   7712 				log(LOG_WARNING, "%s: late collision\n",
   7713 				    device_xname(sc->sc_dev));
   7714 			else if (status & WTX_ST_EC) {
   7715 				ifp->if_collisions += 16;
   7716 				log(LOG_WARNING, "%s: excessive collisions\n",
   7717 				    device_xname(sc->sc_dev));
   7718 			}
   7719 		} else
   7720 			ifp->if_opackets++;
   7721 
   7722 		txq->txq_packets++;
   7723 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   7724 
   7725 		txq->txq_free += txs->txs_ndesc;
   7726 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7727 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7728 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7729 		m_freem(txs->txs_mbuf);
   7730 		txs->txs_mbuf = NULL;
   7731 	}
   7732 
   7733 	/* Update the dirty transmit buffer pointer. */
   7734 	txq->txq_sdirty = i;
   7735 	DPRINTF(WM_DEBUG_TX,
   7736 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7737 
   7738 	if (count != 0)
   7739 		rnd_add_uint32(&sc->rnd_source, count);
   7740 
   7741 	/*
   7742 	 * If there are no more pending transmissions, cancel the watchdog
   7743 	 * timer.
   7744 	 */
   7745 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7746 		ifp->if_timer = 0;
   7747 
   7748 	return processed;
   7749 }
   7750 
   7751 static inline uint32_t
   7752 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   7753 {
   7754 	struct wm_softc *sc = rxq->rxq_sc;
   7755 
   7756 	if (sc->sc_type == WM_T_82574)
   7757 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7758 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7759 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7760 	else
   7761 		return rxq->rxq_descs[idx].wrx_status;
   7762 }
   7763 
   7764 static inline uint32_t
   7765 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   7766 {
   7767 	struct wm_softc *sc = rxq->rxq_sc;
   7768 
   7769 	if (sc->sc_type == WM_T_82574)
   7770 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7771 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7772 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7773 	else
   7774 		return rxq->rxq_descs[idx].wrx_errors;
   7775 }
   7776 
   7777 static inline uint16_t
   7778 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   7779 {
   7780 	struct wm_softc *sc = rxq->rxq_sc;
   7781 
   7782 	if (sc->sc_type == WM_T_82574)
   7783 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   7784 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7785 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   7786 	else
   7787 		return rxq->rxq_descs[idx].wrx_special;
   7788 }
   7789 
   7790 static inline int
   7791 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   7792 {
   7793 	struct wm_softc *sc = rxq->rxq_sc;
   7794 
   7795 	if (sc->sc_type == WM_T_82574)
   7796 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   7797 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7798 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   7799 	else
   7800 		return rxq->rxq_descs[idx].wrx_len;
   7801 }
   7802 
   7803 #ifdef WM_DEBUG
   7804 static inline uint32_t
   7805 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   7806 {
   7807 	struct wm_softc *sc = rxq->rxq_sc;
   7808 
   7809 	if (sc->sc_type == WM_T_82574)
   7810 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   7811 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7812 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   7813 	else
   7814 		return 0;
   7815 }
   7816 
   7817 static inline uint8_t
   7818 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   7819 {
   7820 	struct wm_softc *sc = rxq->rxq_sc;
   7821 
   7822 	if (sc->sc_type == WM_T_82574)
   7823 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   7824 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7825 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   7826 	else
   7827 		return 0;
   7828 }
   7829 #endif /* WM_DEBUG */
   7830 
   7831 static inline bool
   7832 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   7833     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7834 {
   7835 
   7836 	if (sc->sc_type == WM_T_82574)
   7837 		return (status & ext_bit) != 0;
   7838 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7839 		return (status & nq_bit) != 0;
   7840 	else
   7841 		return (status & legacy_bit) != 0;
   7842 }
   7843 
   7844 static inline bool
   7845 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   7846     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7847 {
   7848 
   7849 	if (sc->sc_type == WM_T_82574)
   7850 		return (error & ext_bit) != 0;
   7851 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7852 		return (error & nq_bit) != 0;
   7853 	else
   7854 		return (error & legacy_bit) != 0;
   7855 }
   7856 
   7857 static inline bool
   7858 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   7859 {
   7860 
   7861 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7862 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   7863 		return true;
   7864 	else
   7865 		return false;
   7866 }
   7867 
   7868 static inline bool
   7869 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   7870 {
   7871 	struct wm_softc *sc = rxq->rxq_sc;
   7872 
   7873 	/* XXXX missing error bit for newqueue? */
   7874 	if (wm_rxdesc_is_set_error(sc, errors,
   7875 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   7876 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   7877 		NQRXC_ERROR_RXE)) {
   7878 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   7879 			log(LOG_WARNING, "%s: symbol error\n",
   7880 			    device_xname(sc->sc_dev));
   7881 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   7882 			log(LOG_WARNING, "%s: receive sequence error\n",
   7883 			    device_xname(sc->sc_dev));
   7884 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   7885 			log(LOG_WARNING, "%s: CRC error\n",
   7886 			    device_xname(sc->sc_dev));
   7887 		return true;
   7888 	}
   7889 
   7890 	return false;
   7891 }
   7892 
   7893 static inline bool
   7894 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   7895 {
   7896 	struct wm_softc *sc = rxq->rxq_sc;
   7897 
   7898 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   7899 		NQRXC_STATUS_DD)) {
   7900 		/* We have processed all of the receive descriptors. */
   7901 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   7902 		return false;
   7903 	}
   7904 
   7905 	return true;
   7906 }
   7907 
   7908 static inline bool
   7909 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   7910     struct mbuf *m)
   7911 {
   7912 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   7913 
   7914 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7915 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   7916 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   7917 	}
   7918 
   7919 	return true;
   7920 }
   7921 
   7922 static inline void
   7923 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   7924     uint32_t errors, struct mbuf *m)
   7925 {
   7926 	struct wm_softc *sc = rxq->rxq_sc;
   7927 
   7928 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   7929 		if (wm_rxdesc_is_set_status(sc, status,
   7930 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   7931 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   7932 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7933 			if (wm_rxdesc_is_set_error(sc, errors,
   7934 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   7935 				m->m_pkthdr.csum_flags |=
   7936 					M_CSUM_IPv4_BAD;
   7937 		}
   7938 		if (wm_rxdesc_is_set_status(sc, status,
   7939 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   7940 			/*
   7941 			 * Note: we don't know if this was TCP or UDP,
   7942 			 * so we just set both bits, and expect the
   7943 			 * upper layers to deal.
   7944 			 */
   7945 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   7946 			m->m_pkthdr.csum_flags |=
   7947 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7948 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7949 			if (wm_rxdesc_is_set_error(sc, errors,
   7950 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   7951 				m->m_pkthdr.csum_flags |=
   7952 					M_CSUM_TCP_UDP_BAD;
   7953 		}
   7954 	}
   7955 }
   7956 
   7957 /*
   7958  * wm_rxeof:
   7959  *
   7960  *	Helper; handle receive interrupts.
   7961  */
   7962 static void
   7963 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   7964 {
   7965 	struct wm_softc *sc = rxq->rxq_sc;
   7966 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7967 	struct wm_rxsoft *rxs;
   7968 	struct mbuf *m;
   7969 	int i, len;
   7970 	int count = 0;
   7971 	uint32_t status, errors;
   7972 	uint16_t vlantag;
   7973 
   7974 	KASSERT(mutex_owned(rxq->rxq_lock));
   7975 
   7976 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7977 		if (limit-- == 0) {
   7978 			rxq->rxq_ptr = i;
   7979 			break;
   7980 		}
   7981 
   7982 		rxs = &rxq->rxq_soft[i];
   7983 
   7984 		DPRINTF(WM_DEBUG_RX,
   7985 		    ("%s: RX: checking descriptor %d\n",
   7986 		    device_xname(sc->sc_dev), i));
   7987 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7988 
   7989 		status = wm_rxdesc_get_status(rxq, i);
   7990 		errors = wm_rxdesc_get_errors(rxq, i);
   7991 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   7992 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   7993 #ifdef WM_DEBUG
   7994 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   7995 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   7996 #endif
   7997 
   7998 		if (!wm_rxdesc_dd(rxq, i, status)) {
   7999 			/*
   8000 			 * Update the receive pointer holding rxq_lock
   8001 			 * consistent with increment counter.
   8002 			 */
   8003 			rxq->rxq_ptr = i;
   8004 			break;
   8005 		}
   8006 
   8007 		count++;
   8008 		if (__predict_false(rxq->rxq_discard)) {
   8009 			DPRINTF(WM_DEBUG_RX,
   8010 			    ("%s: RX: discarding contents of descriptor %d\n",
   8011 			    device_xname(sc->sc_dev), i));
   8012 			wm_init_rxdesc(rxq, i);
   8013 			if (wm_rxdesc_is_eop(rxq, status)) {
   8014 				/* Reset our state. */
   8015 				DPRINTF(WM_DEBUG_RX,
   8016 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8017 				    device_xname(sc->sc_dev)));
   8018 				rxq->rxq_discard = 0;
   8019 			}
   8020 			continue;
   8021 		}
   8022 
   8023 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8024 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8025 
   8026 		m = rxs->rxs_mbuf;
   8027 
   8028 		/*
   8029 		 * Add a new receive buffer to the ring, unless of
   8030 		 * course the length is zero. Treat the latter as a
   8031 		 * failed mapping.
   8032 		 */
   8033 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8034 			/*
   8035 			 * Failed, throw away what we've done so
   8036 			 * far, and discard the rest of the packet.
   8037 			 */
   8038 			ifp->if_ierrors++;
   8039 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8040 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8041 			wm_init_rxdesc(rxq, i);
   8042 			if (!wm_rxdesc_is_eop(rxq, status))
   8043 				rxq->rxq_discard = 1;
   8044 			if (rxq->rxq_head != NULL)
   8045 				m_freem(rxq->rxq_head);
   8046 			WM_RXCHAIN_RESET(rxq);
   8047 			DPRINTF(WM_DEBUG_RX,
   8048 			    ("%s: RX: Rx buffer allocation failed, "
   8049 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8050 			    rxq->rxq_discard ? " (discard)" : ""));
   8051 			continue;
   8052 		}
   8053 
   8054 		m->m_len = len;
   8055 		rxq->rxq_len += len;
   8056 		DPRINTF(WM_DEBUG_RX,
   8057 		    ("%s: RX: buffer at %p len %d\n",
   8058 		    device_xname(sc->sc_dev), m->m_data, len));
   8059 
   8060 		/* If this is not the end of the packet, keep looking. */
   8061 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8062 			WM_RXCHAIN_LINK(rxq, m);
   8063 			DPRINTF(WM_DEBUG_RX,
   8064 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8065 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8066 			continue;
   8067 		}
   8068 
   8069 		/*
   8070 		 * Okay, we have the entire packet now.  The chip is
   8071 		 * configured to include the FCS except I350 and I21[01]
   8072 		 * (not all chips can be configured to strip it),
   8073 		 * so we need to trim it.
   8074 		 * May need to adjust length of previous mbuf in the
   8075 		 * chain if the current mbuf is too short.
   8076 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8077 		 * is always set in I350, so we don't trim it.
   8078 		 */
   8079 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8080 		    && (sc->sc_type != WM_T_I210)
   8081 		    && (sc->sc_type != WM_T_I211)) {
   8082 			if (m->m_len < ETHER_CRC_LEN) {
   8083 				rxq->rxq_tail->m_len
   8084 				    -= (ETHER_CRC_LEN - m->m_len);
   8085 				m->m_len = 0;
   8086 			} else
   8087 				m->m_len -= ETHER_CRC_LEN;
   8088 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8089 		} else
   8090 			len = rxq->rxq_len;
   8091 
   8092 		WM_RXCHAIN_LINK(rxq, m);
   8093 
   8094 		*rxq->rxq_tailp = NULL;
   8095 		m = rxq->rxq_head;
   8096 
   8097 		WM_RXCHAIN_RESET(rxq);
   8098 
   8099 		DPRINTF(WM_DEBUG_RX,
   8100 		    ("%s: RX: have entire packet, len -> %d\n",
   8101 		    device_xname(sc->sc_dev), len));
   8102 
   8103 		/* If an error occurred, update stats and drop the packet. */
   8104 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8105 			m_freem(m);
   8106 			continue;
   8107 		}
   8108 
   8109 		/* No errors.  Receive the packet. */
   8110 		m_set_rcvif(m, ifp);
   8111 		m->m_pkthdr.len = len;
   8112 		/*
   8113 		 * TODO
   8114 		 * should be save rsshash and rsstype to this mbuf.
   8115 		 */
   8116 		DPRINTF(WM_DEBUG_RX,
   8117 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8118 			device_xname(sc->sc_dev), rsstype, rsshash));
   8119 
   8120 		/*
   8121 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8122 		 * for us.  Associate the tag with the packet.
   8123 		 */
   8124 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8125 			continue;
   8126 
   8127 		/* Set up checksum info for this packet. */
   8128 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8129 		/*
   8130 		 * Update the receive pointer holding rxq_lock consistent with
   8131 		 * increment counter.
   8132 		 */
   8133 		rxq->rxq_ptr = i;
   8134 		rxq->rxq_packets++;
   8135 		rxq->rxq_bytes += len;
   8136 		mutex_exit(rxq->rxq_lock);
   8137 
   8138 		/* Pass it on. */
   8139 		if_percpuq_enqueue(sc->sc_ipq, m);
   8140 
   8141 		mutex_enter(rxq->rxq_lock);
   8142 
   8143 		if (rxq->rxq_stopping)
   8144 			break;
   8145 	}
   8146 
   8147 	if (count != 0)
   8148 		rnd_add_uint32(&sc->rnd_source, count);
   8149 
   8150 	DPRINTF(WM_DEBUG_RX,
   8151 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8152 }
   8153 
   8154 /*
   8155  * wm_linkintr_gmii:
   8156  *
   8157  *	Helper; handle link interrupts for GMII.
   8158  */
   8159 static void
   8160 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8161 {
   8162 
   8163 	KASSERT(WM_CORE_LOCKED(sc));
   8164 
   8165 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8166 		__func__));
   8167 
   8168 	if (icr & ICR_LSC) {
   8169 		uint32_t reg;
   8170 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8171 
   8172 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8173 			wm_gig_downshift_workaround_ich8lan(sc);
   8174 
   8175 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8176 			device_xname(sc->sc_dev)));
   8177 		mii_pollstat(&sc->sc_mii);
   8178 		if (sc->sc_type == WM_T_82543) {
   8179 			int miistatus, active;
   8180 
   8181 			/*
   8182 			 * With 82543, we need to force speed and
   8183 			 * duplex on the MAC equal to what the PHY
   8184 			 * speed and duplex configuration is.
   8185 			 */
   8186 			miistatus = sc->sc_mii.mii_media_status;
   8187 
   8188 			if (miistatus & IFM_ACTIVE) {
   8189 				active = sc->sc_mii.mii_media_active;
   8190 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8191 				switch (IFM_SUBTYPE(active)) {
   8192 				case IFM_10_T:
   8193 					sc->sc_ctrl |= CTRL_SPEED_10;
   8194 					break;
   8195 				case IFM_100_TX:
   8196 					sc->sc_ctrl |= CTRL_SPEED_100;
   8197 					break;
   8198 				case IFM_1000_T:
   8199 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8200 					break;
   8201 				default:
   8202 					/*
   8203 					 * fiber?
   8204 					 * Shoud not enter here.
   8205 					 */
   8206 					printf("unknown media (%x)\n", active);
   8207 					break;
   8208 				}
   8209 				if (active & IFM_FDX)
   8210 					sc->sc_ctrl |= CTRL_FD;
   8211 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8212 			}
   8213 		} else if ((sc->sc_type == WM_T_ICH8)
   8214 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8215 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8216 		} else if (sc->sc_type == WM_T_PCH) {
   8217 			wm_k1_gig_workaround_hv(sc,
   8218 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8219 		}
   8220 
   8221 		if ((sc->sc_phytype == WMPHY_82578)
   8222 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8223 			== IFM_1000_T)) {
   8224 
   8225 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8226 				delay(200*1000); /* XXX too big */
   8227 
   8228 				/* Link stall fix for link up */
   8229 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8230 				    HV_MUX_DATA_CTRL,
   8231 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8232 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8233 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8234 				    HV_MUX_DATA_CTRL,
   8235 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8236 			}
   8237 		}
   8238 		/*
   8239 		 * I217 Packet Loss issue:
   8240 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8241 		 * on power up.
   8242 		 * Set the Beacon Duration for I217 to 8 usec
   8243 		 */
   8244 		if ((sc->sc_type == WM_T_PCH_LPT)
   8245 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8246 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8247 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8248 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8249 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8250 		}
   8251 
   8252 		/* XXX Work-around I218 hang issue */
   8253 		/* e1000_k1_workaround_lpt_lp() */
   8254 
   8255 		if ((sc->sc_type == WM_T_PCH_LPT)
   8256 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8257 			/*
   8258 			 * Set platform power management values for Latency
   8259 			 * Tolerance Reporting (LTR)
   8260 			 */
   8261 			wm_platform_pm_pch_lpt(sc,
   8262 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8263 				    != 0));
   8264 		}
   8265 
   8266 		/* FEXTNVM6 K1-off workaround */
   8267 		if (sc->sc_type == WM_T_PCH_SPT) {
   8268 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8269 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8270 			    & FEXTNVM6_K1_OFF_ENABLE)
   8271 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8272 			else
   8273 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8274 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8275 		}
   8276 	} else if (icr & ICR_RXSEQ) {
   8277 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8278 			device_xname(sc->sc_dev)));
   8279 	}
   8280 }
   8281 
   8282 /*
   8283  * wm_linkintr_tbi:
   8284  *
   8285  *	Helper; handle link interrupts for TBI mode.
   8286  */
   8287 static void
   8288 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8289 {
   8290 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8291 	uint32_t status;
   8292 
   8293 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8294 		__func__));
   8295 
   8296 	status = CSR_READ(sc, WMREG_STATUS);
   8297 	if (icr & ICR_LSC) {
   8298 		if (status & STATUS_LU) {
   8299 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8300 			    device_xname(sc->sc_dev),
   8301 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8302 			/*
   8303 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8304 			 * so we should update sc->sc_ctrl
   8305 			 */
   8306 
   8307 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8308 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8309 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8310 			if (status & STATUS_FD)
   8311 				sc->sc_tctl |=
   8312 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8313 			else
   8314 				sc->sc_tctl |=
   8315 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8316 			if (sc->sc_ctrl & CTRL_TFCE)
   8317 				sc->sc_fcrtl |= FCRTL_XONE;
   8318 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8319 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8320 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8321 				      sc->sc_fcrtl);
   8322 			sc->sc_tbi_linkup = 1;
   8323 			if_link_state_change(ifp, LINK_STATE_UP);
   8324 		} else {
   8325 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8326 			    device_xname(sc->sc_dev)));
   8327 			sc->sc_tbi_linkup = 0;
   8328 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8329 		}
   8330 		/* Update LED */
   8331 		wm_tbi_serdes_set_linkled(sc);
   8332 	} else if (icr & ICR_RXSEQ) {
   8333 		DPRINTF(WM_DEBUG_LINK,
   8334 		    ("%s: LINK: Receive sequence error\n",
   8335 		    device_xname(sc->sc_dev)));
   8336 	}
   8337 }
   8338 
   8339 /*
   8340  * wm_linkintr_serdes:
   8341  *
   8342  *	Helper; handle link interrupts for TBI mode.
   8343  */
   8344 static void
   8345 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8346 {
   8347 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8348 	struct mii_data *mii = &sc->sc_mii;
   8349 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8350 	uint32_t pcs_adv, pcs_lpab, reg;
   8351 
   8352 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8353 		__func__));
   8354 
   8355 	if (icr & ICR_LSC) {
   8356 		/* Check PCS */
   8357 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8358 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8359 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8360 				device_xname(sc->sc_dev)));
   8361 			mii->mii_media_status |= IFM_ACTIVE;
   8362 			sc->sc_tbi_linkup = 1;
   8363 			if_link_state_change(ifp, LINK_STATE_UP);
   8364 		} else {
   8365 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8366 				device_xname(sc->sc_dev)));
   8367 			mii->mii_media_status |= IFM_NONE;
   8368 			sc->sc_tbi_linkup = 0;
   8369 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8370 			wm_tbi_serdes_set_linkled(sc);
   8371 			return;
   8372 		}
   8373 		mii->mii_media_active |= IFM_1000_SX;
   8374 		if ((reg & PCS_LSTS_FDX) != 0)
   8375 			mii->mii_media_active |= IFM_FDX;
   8376 		else
   8377 			mii->mii_media_active |= IFM_HDX;
   8378 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8379 			/* Check flow */
   8380 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8381 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8382 				DPRINTF(WM_DEBUG_LINK,
   8383 				    ("XXX LINKOK but not ACOMP\n"));
   8384 				return;
   8385 			}
   8386 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8387 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8388 			DPRINTF(WM_DEBUG_LINK,
   8389 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8390 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8391 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8392 				mii->mii_media_active |= IFM_FLOW
   8393 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8394 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8395 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8396 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8397 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8398 				mii->mii_media_active |= IFM_FLOW
   8399 				    | IFM_ETH_TXPAUSE;
   8400 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8401 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8402 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8403 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8404 				mii->mii_media_active |= IFM_FLOW
   8405 				    | IFM_ETH_RXPAUSE;
   8406 		}
   8407 		/* Update LED */
   8408 		wm_tbi_serdes_set_linkled(sc);
   8409 	} else {
   8410 		DPRINTF(WM_DEBUG_LINK,
   8411 		    ("%s: LINK: Receive sequence error\n",
   8412 		    device_xname(sc->sc_dev)));
   8413 	}
   8414 }
   8415 
   8416 /*
   8417  * wm_linkintr:
   8418  *
   8419  *	Helper; handle link interrupts.
   8420  */
   8421 static void
   8422 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8423 {
   8424 
   8425 	KASSERT(WM_CORE_LOCKED(sc));
   8426 
   8427 	if (sc->sc_flags & WM_F_HAS_MII)
   8428 		wm_linkintr_gmii(sc, icr);
   8429 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8430 	    && (sc->sc_type >= WM_T_82575))
   8431 		wm_linkintr_serdes(sc, icr);
   8432 	else
   8433 		wm_linkintr_tbi(sc, icr);
   8434 }
   8435 
   8436 /*
   8437  * wm_intr_legacy:
   8438  *
   8439  *	Interrupt service routine for INTx and MSI.
   8440  */
   8441 static int
   8442 wm_intr_legacy(void *arg)
   8443 {
   8444 	struct wm_softc *sc = arg;
   8445 	struct wm_queue *wmq = &sc->sc_queue[0];
   8446 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8447 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8448 	uint32_t icr, rndval = 0;
   8449 	int handled = 0;
   8450 
   8451 	DPRINTF(WM_DEBUG_TX,
   8452 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   8453 	while (1 /* CONSTCOND */) {
   8454 		icr = CSR_READ(sc, WMREG_ICR);
   8455 		if ((icr & sc->sc_icr) == 0)
   8456 			break;
   8457 		if (rndval == 0)
   8458 			rndval = icr;
   8459 
   8460 		mutex_enter(rxq->rxq_lock);
   8461 
   8462 		if (rxq->rxq_stopping) {
   8463 			mutex_exit(rxq->rxq_lock);
   8464 			break;
   8465 		}
   8466 
   8467 		handled = 1;
   8468 
   8469 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8470 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8471 			DPRINTF(WM_DEBUG_RX,
   8472 			    ("%s: RX: got Rx intr 0x%08x\n",
   8473 			    device_xname(sc->sc_dev),
   8474 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8475 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8476 		}
   8477 #endif
   8478 		wm_rxeof(rxq, UINT_MAX);
   8479 
   8480 		mutex_exit(rxq->rxq_lock);
   8481 		mutex_enter(txq->txq_lock);
   8482 
   8483 		if (txq->txq_stopping) {
   8484 			mutex_exit(txq->txq_lock);
   8485 			break;
   8486 		}
   8487 
   8488 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8489 		if (icr & ICR_TXDW) {
   8490 			DPRINTF(WM_DEBUG_TX,
   8491 			    ("%s: TX: got TXDW interrupt\n",
   8492 			    device_xname(sc->sc_dev)));
   8493 			WM_Q_EVCNT_INCR(txq, txdw);
   8494 		}
   8495 #endif
   8496 		wm_txeof(sc, txq);
   8497 
   8498 		mutex_exit(txq->txq_lock);
   8499 		WM_CORE_LOCK(sc);
   8500 
   8501 		if (sc->sc_core_stopping) {
   8502 			WM_CORE_UNLOCK(sc);
   8503 			break;
   8504 		}
   8505 
   8506 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8507 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8508 			wm_linkintr(sc, icr);
   8509 		}
   8510 
   8511 		WM_CORE_UNLOCK(sc);
   8512 
   8513 		if (icr & ICR_RXO) {
   8514 #if defined(WM_DEBUG)
   8515 			log(LOG_WARNING, "%s: Receive overrun\n",
   8516 			    device_xname(sc->sc_dev));
   8517 #endif /* defined(WM_DEBUG) */
   8518 		}
   8519 	}
   8520 
   8521 	rnd_add_uint32(&sc->rnd_source, rndval);
   8522 
   8523 	if (handled) {
   8524 		/* Try to get more packets going. */
   8525 		softint_schedule(wmq->wmq_si);
   8526 	}
   8527 
   8528 	return handled;
   8529 }
   8530 
   8531 static inline void
   8532 wm_txrxintr_disable(struct wm_queue *wmq)
   8533 {
   8534 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8535 
   8536 	if (sc->sc_type == WM_T_82574)
   8537 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8538 	else if (sc->sc_type == WM_T_82575)
   8539 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8540 	else
   8541 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8542 }
   8543 
   8544 static inline void
   8545 wm_txrxintr_enable(struct wm_queue *wmq)
   8546 {
   8547 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8548 
   8549 	wm_itrs_calculate(sc, wmq);
   8550 
   8551 	if (sc->sc_type == WM_T_82574)
   8552 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8553 	else if (sc->sc_type == WM_T_82575)
   8554 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8555 	else
   8556 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8557 }
   8558 
   8559 static int
   8560 wm_txrxintr_msix(void *arg)
   8561 {
   8562 	struct wm_queue *wmq = arg;
   8563 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8564 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8565 	struct wm_softc *sc = txq->txq_sc;
   8566 	u_int limit = sc->sc_rx_intr_process_limit;
   8567 
   8568 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8569 
   8570 	DPRINTF(WM_DEBUG_TX,
   8571 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8572 
   8573 	wm_txrxintr_disable(wmq);
   8574 
   8575 	mutex_enter(txq->txq_lock);
   8576 
   8577 	if (txq->txq_stopping) {
   8578 		mutex_exit(txq->txq_lock);
   8579 		return 0;
   8580 	}
   8581 
   8582 	WM_Q_EVCNT_INCR(txq, txdw);
   8583 	wm_txeof(sc, txq);
   8584 	/* wm_deferred start() is done in wm_handle_queue(). */
   8585 	mutex_exit(txq->txq_lock);
   8586 
   8587 	DPRINTF(WM_DEBUG_RX,
   8588 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8589 	mutex_enter(rxq->rxq_lock);
   8590 
   8591 	if (rxq->rxq_stopping) {
   8592 		mutex_exit(rxq->rxq_lock);
   8593 		return 0;
   8594 	}
   8595 
   8596 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8597 	wm_rxeof(rxq, limit);
   8598 	mutex_exit(rxq->rxq_lock);
   8599 
   8600 	wm_itrs_writereg(sc, wmq);
   8601 
   8602 	softint_schedule(wmq->wmq_si);
   8603 
   8604 	return 1;
   8605 }
   8606 
   8607 static void
   8608 wm_handle_queue(void *arg)
   8609 {
   8610 	struct wm_queue *wmq = arg;
   8611 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8612 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8613 	struct wm_softc *sc = txq->txq_sc;
   8614 	u_int limit = sc->sc_rx_process_limit;
   8615 
   8616 	mutex_enter(txq->txq_lock);
   8617 	if (txq->txq_stopping) {
   8618 		mutex_exit(txq->txq_lock);
   8619 		return;
   8620 	}
   8621 	wm_txeof(sc, txq);
   8622 	wm_deferred_start_locked(txq);
   8623 	mutex_exit(txq->txq_lock);
   8624 
   8625 	mutex_enter(rxq->rxq_lock);
   8626 	if (rxq->rxq_stopping) {
   8627 		mutex_exit(rxq->rxq_lock);
   8628 		return;
   8629 	}
   8630 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8631 	wm_rxeof(rxq, limit);
   8632 	mutex_exit(rxq->rxq_lock);
   8633 
   8634 	wm_txrxintr_enable(wmq);
   8635 }
   8636 
   8637 /*
   8638  * wm_linkintr_msix:
   8639  *
   8640  *	Interrupt service routine for link status change for MSI-X.
   8641  */
   8642 static int
   8643 wm_linkintr_msix(void *arg)
   8644 {
   8645 	struct wm_softc *sc = arg;
   8646 	uint32_t reg;
   8647 
   8648 	DPRINTF(WM_DEBUG_LINK,
   8649 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8650 
   8651 	reg = CSR_READ(sc, WMREG_ICR);
   8652 	WM_CORE_LOCK(sc);
   8653 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8654 		goto out;
   8655 
   8656 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8657 	wm_linkintr(sc, ICR_LSC);
   8658 
   8659 out:
   8660 	WM_CORE_UNLOCK(sc);
   8661 
   8662 	if (sc->sc_type == WM_T_82574)
   8663 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8664 	else if (sc->sc_type == WM_T_82575)
   8665 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8666 	else
   8667 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8668 
   8669 	return 1;
   8670 }
   8671 
   8672 /*
   8673  * Media related.
   8674  * GMII, SGMII, TBI (and SERDES)
   8675  */
   8676 
   8677 /* Common */
   8678 
   8679 /*
   8680  * wm_tbi_serdes_set_linkled:
   8681  *
   8682  *	Update the link LED on TBI and SERDES devices.
   8683  */
   8684 static void
   8685 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8686 {
   8687 
   8688 	if (sc->sc_tbi_linkup)
   8689 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8690 	else
   8691 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8692 
   8693 	/* 82540 or newer devices are active low */
   8694 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8695 
   8696 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8697 }
   8698 
   8699 /* GMII related */
   8700 
   8701 /*
   8702  * wm_gmii_reset:
   8703  *
   8704  *	Reset the PHY.
   8705  */
   8706 static void
   8707 wm_gmii_reset(struct wm_softc *sc)
   8708 {
   8709 	uint32_t reg;
   8710 	int rv;
   8711 
   8712 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8713 		device_xname(sc->sc_dev), __func__));
   8714 
   8715 	rv = sc->phy.acquire(sc);
   8716 	if (rv != 0) {
   8717 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8718 		    __func__);
   8719 		return;
   8720 	}
   8721 
   8722 	switch (sc->sc_type) {
   8723 	case WM_T_82542_2_0:
   8724 	case WM_T_82542_2_1:
   8725 		/* null */
   8726 		break;
   8727 	case WM_T_82543:
   8728 		/*
   8729 		 * With 82543, we need to force speed and duplex on the MAC
   8730 		 * equal to what the PHY speed and duplex configuration is.
   8731 		 * In addition, we need to perform a hardware reset on the PHY
   8732 		 * to take it out of reset.
   8733 		 */
   8734 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8735 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8736 
   8737 		/* The PHY reset pin is active-low. */
   8738 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8739 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8740 		    CTRL_EXT_SWDPIN(4));
   8741 		reg |= CTRL_EXT_SWDPIO(4);
   8742 
   8743 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8744 		CSR_WRITE_FLUSH(sc);
   8745 		delay(10*1000);
   8746 
   8747 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8748 		CSR_WRITE_FLUSH(sc);
   8749 		delay(150);
   8750 #if 0
   8751 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8752 #endif
   8753 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8754 		break;
   8755 	case WM_T_82544:	/* reset 10000us */
   8756 	case WM_T_82540:
   8757 	case WM_T_82545:
   8758 	case WM_T_82545_3:
   8759 	case WM_T_82546:
   8760 	case WM_T_82546_3:
   8761 	case WM_T_82541:
   8762 	case WM_T_82541_2:
   8763 	case WM_T_82547:
   8764 	case WM_T_82547_2:
   8765 	case WM_T_82571:	/* reset 100us */
   8766 	case WM_T_82572:
   8767 	case WM_T_82573:
   8768 	case WM_T_82574:
   8769 	case WM_T_82575:
   8770 	case WM_T_82576:
   8771 	case WM_T_82580:
   8772 	case WM_T_I350:
   8773 	case WM_T_I354:
   8774 	case WM_T_I210:
   8775 	case WM_T_I211:
   8776 	case WM_T_82583:
   8777 	case WM_T_80003:
   8778 		/* generic reset */
   8779 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8780 		CSR_WRITE_FLUSH(sc);
   8781 		delay(20000);
   8782 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8783 		CSR_WRITE_FLUSH(sc);
   8784 		delay(20000);
   8785 
   8786 		if ((sc->sc_type == WM_T_82541)
   8787 		    || (sc->sc_type == WM_T_82541_2)
   8788 		    || (sc->sc_type == WM_T_82547)
   8789 		    || (sc->sc_type == WM_T_82547_2)) {
   8790 			/* workaround for igp are done in igp_reset() */
   8791 			/* XXX add code to set LED after phy reset */
   8792 		}
   8793 		break;
   8794 	case WM_T_ICH8:
   8795 	case WM_T_ICH9:
   8796 	case WM_T_ICH10:
   8797 	case WM_T_PCH:
   8798 	case WM_T_PCH2:
   8799 	case WM_T_PCH_LPT:
   8800 	case WM_T_PCH_SPT:
   8801 		/* generic reset */
   8802 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8803 		CSR_WRITE_FLUSH(sc);
   8804 		delay(100);
   8805 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8806 		CSR_WRITE_FLUSH(sc);
   8807 		delay(150);
   8808 		break;
   8809 	default:
   8810 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8811 		    __func__);
   8812 		break;
   8813 	}
   8814 
   8815 	sc->phy.release(sc);
   8816 
   8817 	/* get_cfg_done */
   8818 	wm_get_cfg_done(sc);
   8819 
   8820 	/* extra setup */
   8821 	switch (sc->sc_type) {
   8822 	case WM_T_82542_2_0:
   8823 	case WM_T_82542_2_1:
   8824 	case WM_T_82543:
   8825 	case WM_T_82544:
   8826 	case WM_T_82540:
   8827 	case WM_T_82545:
   8828 	case WM_T_82545_3:
   8829 	case WM_T_82546:
   8830 	case WM_T_82546_3:
   8831 	case WM_T_82541_2:
   8832 	case WM_T_82547_2:
   8833 	case WM_T_82571:
   8834 	case WM_T_82572:
   8835 	case WM_T_82573:
   8836 	case WM_T_82575:
   8837 	case WM_T_82576:
   8838 	case WM_T_82580:
   8839 	case WM_T_I350:
   8840 	case WM_T_I354:
   8841 	case WM_T_I210:
   8842 	case WM_T_I211:
   8843 	case WM_T_80003:
   8844 		/* null */
   8845 		break;
   8846 	case WM_T_82574:
   8847 	case WM_T_82583:
   8848 		wm_lplu_d0_disable(sc);
   8849 		break;
   8850 	case WM_T_82541:
   8851 	case WM_T_82547:
   8852 		/* XXX Configure actively LED after PHY reset */
   8853 		break;
   8854 	case WM_T_ICH8:
   8855 	case WM_T_ICH9:
   8856 	case WM_T_ICH10:
   8857 	case WM_T_PCH:
   8858 	case WM_T_PCH2:
   8859 	case WM_T_PCH_LPT:
   8860 	case WM_T_PCH_SPT:
   8861 		/* Allow time for h/w to get to a quiescent state afer reset */
   8862 		delay(10*1000);
   8863 
   8864 		if (sc->sc_type == WM_T_PCH)
   8865 			wm_hv_phy_workaround_ich8lan(sc);
   8866 
   8867 		if (sc->sc_type == WM_T_PCH2)
   8868 			wm_lv_phy_workaround_ich8lan(sc);
   8869 
   8870 		/* Clear the host wakeup bit after lcd reset */
   8871 		if (sc->sc_type >= WM_T_PCH) {
   8872 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8873 			    BM_PORT_GEN_CFG);
   8874 			reg &= ~BM_WUC_HOST_WU_BIT;
   8875 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8876 			    BM_PORT_GEN_CFG, reg);
   8877 		}
   8878 
   8879 		/*
   8880 		 * XXX Configure the LCD with th extended configuration region
   8881 		 * in NVM
   8882 		 */
   8883 
   8884 		/* Disable D0 LPLU. */
   8885 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8886 			wm_lplu_d0_disable_pch(sc);
   8887 		else
   8888 			wm_lplu_d0_disable(sc);	/* ICH* */
   8889 		break;
   8890 	default:
   8891 		panic("%s: unknown type\n", __func__);
   8892 		break;
   8893 	}
   8894 }
   8895 
   8896 /*
   8897  * Setup sc_phytype and mii_{read|write}reg.
   8898  *
   8899  *  To identify PHY type, correct read/write function should be selected.
   8900  * To select correct read/write function, PCI ID or MAC type are required
   8901  * without accessing PHY registers.
   8902  *
   8903  *  On the first call of this function, PHY ID is not known yet. Check
   8904  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   8905  * result might be incorrect.
   8906  *
   8907  *  In the second call, PHY OUI and model is used to identify PHY type.
   8908  * It might not be perfpect because of the lack of compared entry, but it
   8909  * would be better than the first call.
   8910  *
   8911  *  If the detected new result and previous assumption is different,
   8912  * diagnous message will be printed.
   8913  */
   8914 static void
   8915 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   8916     uint16_t phy_model)
   8917 {
   8918 	device_t dev = sc->sc_dev;
   8919 	struct mii_data *mii = &sc->sc_mii;
   8920 	uint16_t new_phytype = WMPHY_UNKNOWN;
   8921 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   8922 	mii_readreg_t new_readreg;
   8923 	mii_writereg_t new_writereg;
   8924 
   8925 	if (mii->mii_readreg == NULL) {
   8926 		/*
   8927 		 *  This is the first call of this function. For ICH and PCH
   8928 		 * variants, it's difficult to determine the PHY access method
   8929 		 * by sc_type, so use the PCI product ID for some devices.
   8930 		 */
   8931 
   8932 		switch (sc->sc_pcidevid) {
   8933 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   8934 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   8935 			/* 82577 */
   8936 			new_phytype = WMPHY_82577;
   8937 			break;
   8938 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   8939 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   8940 			/* 82578 */
   8941 			new_phytype = WMPHY_82578;
   8942 			break;
   8943 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8944 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8945 			/* 82579 */
   8946 			new_phytype = WMPHY_82579;
   8947 			break;
   8948 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8949 		case PCI_PRODUCT_INTEL_82801I_BM:
   8950 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   8951 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8952 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8953 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8954 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8955 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8956 			/* ICH8, 9, 10 with 82567 */
   8957 			new_phytype = WMPHY_BM;
   8958 			break;
   8959 		default:
   8960 			break;
   8961 		}
   8962 	} else {
   8963 		/* It's not the first call. Use PHY OUI and model */
   8964 		switch (phy_oui) {
   8965 		case MII_OUI_ATHEROS: /* XXX ??? */
   8966 			switch (phy_model) {
   8967 			case 0x0004: /* XXX */
   8968 				new_phytype = WMPHY_82578;
   8969 				break;
   8970 			default:
   8971 				break;
   8972 			}
   8973 			break;
   8974 		case MII_OUI_xxMARVELL:
   8975 			switch (phy_model) {
   8976 			case MII_MODEL_xxMARVELL_I210:
   8977 				new_phytype = WMPHY_I210;
   8978 				break;
   8979 			case MII_MODEL_xxMARVELL_E1011:
   8980 			case MII_MODEL_xxMARVELL_E1000_3:
   8981 			case MII_MODEL_xxMARVELL_E1000_5:
   8982 			case MII_MODEL_xxMARVELL_E1112:
   8983 				new_phytype = WMPHY_M88;
   8984 				break;
   8985 			case MII_MODEL_xxMARVELL_E1149:
   8986 				new_phytype = WMPHY_BM;
   8987 				break;
   8988 			case MII_MODEL_xxMARVELL_E1111:
   8989 			case MII_MODEL_xxMARVELL_I347:
   8990 			case MII_MODEL_xxMARVELL_E1512:
   8991 			case MII_MODEL_xxMARVELL_E1340M:
   8992 			case MII_MODEL_xxMARVELL_E1543:
   8993 				new_phytype = WMPHY_M88;
   8994 				break;
   8995 			case MII_MODEL_xxMARVELL_I82563:
   8996 				new_phytype = WMPHY_GG82563;
   8997 				break;
   8998 			default:
   8999 				break;
   9000 			}
   9001 			break;
   9002 		case MII_OUI_INTEL:
   9003 			switch (phy_model) {
   9004 			case MII_MODEL_INTEL_I82577:
   9005 				new_phytype = WMPHY_82577;
   9006 				break;
   9007 			case MII_MODEL_INTEL_I82579:
   9008 				new_phytype = WMPHY_82579;
   9009 				break;
   9010 			case MII_MODEL_INTEL_I217:
   9011 				new_phytype = WMPHY_I217;
   9012 				break;
   9013 			case MII_MODEL_INTEL_I82580:
   9014 			case MII_MODEL_INTEL_I350:
   9015 				new_phytype = WMPHY_82580;
   9016 				break;
   9017 			default:
   9018 				break;
   9019 			}
   9020 			break;
   9021 		case MII_OUI_yyINTEL:
   9022 			switch (phy_model) {
   9023 			case MII_MODEL_yyINTEL_I82562G:
   9024 			case MII_MODEL_yyINTEL_I82562EM:
   9025 			case MII_MODEL_yyINTEL_I82562ET:
   9026 				new_phytype = WMPHY_IFE;
   9027 				break;
   9028 			case MII_MODEL_yyINTEL_IGP01E1000:
   9029 				new_phytype = WMPHY_IGP;
   9030 				break;
   9031 			case MII_MODEL_yyINTEL_I82566:
   9032 				new_phytype = WMPHY_IGP_3;
   9033 				break;
   9034 			default:
   9035 				break;
   9036 			}
   9037 			break;
   9038 		default:
   9039 			break;
   9040 		}
   9041 		if (new_phytype == WMPHY_UNKNOWN)
   9042 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9043 			    __func__);
   9044 
   9045 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9046 		    && (sc->sc_phytype != new_phytype )) {
   9047 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9048 			    "was incorrect. PHY type from PHY ID = %u\n",
   9049 			    sc->sc_phytype, new_phytype);
   9050 		}
   9051 	}
   9052 
   9053 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9054 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9055 		/* SGMII */
   9056 		new_readreg = wm_sgmii_readreg;
   9057 		new_writereg = wm_sgmii_writereg;
   9058 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9059 		/* BM2 (phyaddr == 1) */
   9060 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9061 		    && (new_phytype != WMPHY_BM)
   9062 		    && (new_phytype != WMPHY_UNKNOWN))
   9063 			doubt_phytype = new_phytype;
   9064 		new_phytype = WMPHY_BM;
   9065 		new_readreg = wm_gmii_bm_readreg;
   9066 		new_writereg = wm_gmii_bm_writereg;
   9067 	} else if (sc->sc_type >= WM_T_PCH) {
   9068 		/* All PCH* use _hv_ */
   9069 		new_readreg = wm_gmii_hv_readreg;
   9070 		new_writereg = wm_gmii_hv_writereg;
   9071 	} else if (sc->sc_type >= WM_T_ICH8) {
   9072 		/* non-82567 ICH8, 9 and 10 */
   9073 		new_readreg = wm_gmii_i82544_readreg;
   9074 		new_writereg = wm_gmii_i82544_writereg;
   9075 	} else if (sc->sc_type >= WM_T_80003) {
   9076 		/* 80003 */
   9077 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9078 		    && (new_phytype != WMPHY_GG82563)
   9079 		    && (new_phytype != WMPHY_UNKNOWN))
   9080 			doubt_phytype = new_phytype;
   9081 		new_phytype = WMPHY_GG82563;
   9082 		new_readreg = wm_gmii_i80003_readreg;
   9083 		new_writereg = wm_gmii_i80003_writereg;
   9084 	} else if (sc->sc_type >= WM_T_I210) {
   9085 		/* I210 and I211 */
   9086 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9087 		    && (new_phytype != WMPHY_I210)
   9088 		    && (new_phytype != WMPHY_UNKNOWN))
   9089 			doubt_phytype = new_phytype;
   9090 		new_phytype = WMPHY_I210;
   9091 		new_readreg = wm_gmii_gs40g_readreg;
   9092 		new_writereg = wm_gmii_gs40g_writereg;
   9093 	} else if (sc->sc_type >= WM_T_82580) {
   9094 		/* 82580, I350 and I354 */
   9095 		new_readreg = wm_gmii_82580_readreg;
   9096 		new_writereg = wm_gmii_82580_writereg;
   9097 	} else if (sc->sc_type >= WM_T_82544) {
   9098 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9099 		new_readreg = wm_gmii_i82544_readreg;
   9100 		new_writereg = wm_gmii_i82544_writereg;
   9101 	} else {
   9102 		new_readreg = wm_gmii_i82543_readreg;
   9103 		new_writereg = wm_gmii_i82543_writereg;
   9104 	}
   9105 
   9106 	if (new_phytype == WMPHY_BM) {
   9107 		/* All BM use _bm_ */
   9108 		new_readreg = wm_gmii_bm_readreg;
   9109 		new_writereg = wm_gmii_bm_writereg;
   9110 	}
   9111 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9112 		/* All PCH* use _hv_ */
   9113 		new_readreg = wm_gmii_hv_readreg;
   9114 		new_writereg = wm_gmii_hv_writereg;
   9115 	}
   9116 
   9117 	/* Diag output */
   9118 	if (doubt_phytype != WMPHY_UNKNOWN)
   9119 		aprint_error_dev(dev, "Assumed new PHY type was "
   9120 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9121 		    new_phytype);
   9122 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9123 	    && (sc->sc_phytype != new_phytype ))
   9124 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9125 		    "was incorrect. New PHY type = %u\n",
   9126 		    sc->sc_phytype, new_phytype);
   9127 
   9128 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9129 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9130 
   9131 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9132 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9133 		    "function was incorrect.\n");
   9134 
   9135 	/* Update now */
   9136 	sc->sc_phytype = new_phytype;
   9137 	mii->mii_readreg = new_readreg;
   9138 	mii->mii_writereg = new_writereg;
   9139 }
   9140 
   9141 /*
   9142  * wm_get_phy_id_82575:
   9143  *
   9144  * Return PHY ID. Return -1 if it failed.
   9145  */
   9146 static int
   9147 wm_get_phy_id_82575(struct wm_softc *sc)
   9148 {
   9149 	uint32_t reg;
   9150 	int phyid = -1;
   9151 
   9152 	/* XXX */
   9153 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9154 		return -1;
   9155 
   9156 	if (wm_sgmii_uses_mdio(sc)) {
   9157 		switch (sc->sc_type) {
   9158 		case WM_T_82575:
   9159 		case WM_T_82576:
   9160 			reg = CSR_READ(sc, WMREG_MDIC);
   9161 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9162 			break;
   9163 		case WM_T_82580:
   9164 		case WM_T_I350:
   9165 		case WM_T_I354:
   9166 		case WM_T_I210:
   9167 		case WM_T_I211:
   9168 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9169 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9170 			break;
   9171 		default:
   9172 			return -1;
   9173 		}
   9174 	}
   9175 
   9176 	return phyid;
   9177 }
   9178 
   9179 
   9180 /*
   9181  * wm_gmii_mediainit:
   9182  *
   9183  *	Initialize media for use on 1000BASE-T devices.
   9184  */
   9185 static void
   9186 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9187 {
   9188 	device_t dev = sc->sc_dev;
   9189 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9190 	struct mii_data *mii = &sc->sc_mii;
   9191 	uint32_t reg;
   9192 
   9193 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9194 		device_xname(sc->sc_dev), __func__));
   9195 
   9196 	/* We have GMII. */
   9197 	sc->sc_flags |= WM_F_HAS_MII;
   9198 
   9199 	if (sc->sc_type == WM_T_80003)
   9200 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9201 	else
   9202 		sc->sc_tipg = TIPG_1000T_DFLT;
   9203 
   9204 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9205 	if ((sc->sc_type == WM_T_82580)
   9206 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9207 	    || (sc->sc_type == WM_T_I211)) {
   9208 		reg = CSR_READ(sc, WMREG_PHPM);
   9209 		reg &= ~PHPM_GO_LINK_D;
   9210 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9211 	}
   9212 
   9213 	/*
   9214 	 * Let the chip set speed/duplex on its own based on
   9215 	 * signals from the PHY.
   9216 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9217 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9218 	 */
   9219 	sc->sc_ctrl |= CTRL_SLU;
   9220 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9221 
   9222 	/* Initialize our media structures and probe the GMII. */
   9223 	mii->mii_ifp = ifp;
   9224 
   9225 	/*
   9226 	 * The first call of wm_mii_setup_phytype. The result might be
   9227 	 * incorrect.
   9228 	 */
   9229 	wm_gmii_setup_phytype(sc, 0, 0);
   9230 
   9231 	mii->mii_statchg = wm_gmii_statchg;
   9232 
   9233 	/* get PHY control from SMBus to PCIe */
   9234 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9235 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9236 		wm_smbustopci(sc);
   9237 
   9238 	wm_gmii_reset(sc);
   9239 
   9240 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9241 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9242 	    wm_gmii_mediastatus);
   9243 
   9244 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9245 	    || (sc->sc_type == WM_T_82580)
   9246 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9247 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9248 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9249 			/* Attach only one port */
   9250 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9251 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9252 		} else {
   9253 			int i, id;
   9254 			uint32_t ctrl_ext;
   9255 
   9256 			id = wm_get_phy_id_82575(sc);
   9257 			if (id != -1) {
   9258 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9259 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9260 			}
   9261 			if ((id == -1)
   9262 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9263 				/* Power on sgmii phy if it is disabled */
   9264 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9265 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9266 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9267 				CSR_WRITE_FLUSH(sc);
   9268 				delay(300*1000); /* XXX too long */
   9269 
   9270 				/* from 1 to 8 */
   9271 				for (i = 1; i < 8; i++)
   9272 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9273 					    0xffffffff, i, MII_OFFSET_ANY,
   9274 					    MIIF_DOPAUSE);
   9275 
   9276 				/* restore previous sfp cage power state */
   9277 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9278 			}
   9279 		}
   9280 	} else {
   9281 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9282 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9283 	}
   9284 
   9285 	/*
   9286 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9287 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9288 	 */
   9289 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9290 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9291 		wm_set_mdio_slow_mode_hv(sc);
   9292 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9293 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9294 	}
   9295 
   9296 	/*
   9297 	 * (For ICH8 variants)
   9298 	 * If PHY detection failed, use BM's r/w function and retry.
   9299 	 */
   9300 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9301 		/* if failed, retry with *_bm_* */
   9302 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9303 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9304 		    sc->sc_phytype);
   9305 		sc->sc_phytype = WMPHY_BM;
   9306 		mii->mii_readreg = wm_gmii_bm_readreg;
   9307 		mii->mii_writereg = wm_gmii_bm_writereg;
   9308 
   9309 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9310 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9311 	}
   9312 
   9313 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9314 		/* Any PHY wasn't find */
   9315 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9316 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9317 		sc->sc_phytype = WMPHY_NONE;
   9318 	} else {
   9319 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9320 
   9321 		/*
   9322 		 * PHY Found! Check PHY type again by the second call of
   9323 		 * wm_mii_setup_phytype.
   9324 		 */
   9325 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9326 		    child->mii_mpd_model);
   9327 
   9328 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9329 	}
   9330 }
   9331 
   9332 /*
   9333  * wm_gmii_mediachange:	[ifmedia interface function]
   9334  *
   9335  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9336  */
   9337 static int
   9338 wm_gmii_mediachange(struct ifnet *ifp)
   9339 {
   9340 	struct wm_softc *sc = ifp->if_softc;
   9341 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9342 	int rc;
   9343 
   9344 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9345 		device_xname(sc->sc_dev), __func__));
   9346 	if ((ifp->if_flags & IFF_UP) == 0)
   9347 		return 0;
   9348 
   9349 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9350 	sc->sc_ctrl |= CTRL_SLU;
   9351 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9352 	    || (sc->sc_type > WM_T_82543)) {
   9353 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9354 	} else {
   9355 		sc->sc_ctrl &= ~CTRL_ASDE;
   9356 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9357 		if (ife->ifm_media & IFM_FDX)
   9358 			sc->sc_ctrl |= CTRL_FD;
   9359 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9360 		case IFM_10_T:
   9361 			sc->sc_ctrl |= CTRL_SPEED_10;
   9362 			break;
   9363 		case IFM_100_TX:
   9364 			sc->sc_ctrl |= CTRL_SPEED_100;
   9365 			break;
   9366 		case IFM_1000_T:
   9367 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9368 			break;
   9369 		default:
   9370 			panic("wm_gmii_mediachange: bad media 0x%x",
   9371 			    ife->ifm_media);
   9372 		}
   9373 	}
   9374 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9375 	if (sc->sc_type <= WM_T_82543)
   9376 		wm_gmii_reset(sc);
   9377 
   9378 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9379 		return 0;
   9380 	return rc;
   9381 }
   9382 
   9383 /*
   9384  * wm_gmii_mediastatus:	[ifmedia interface function]
   9385  *
   9386  *	Get the current interface media status on a 1000BASE-T device.
   9387  */
   9388 static void
   9389 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9390 {
   9391 	struct wm_softc *sc = ifp->if_softc;
   9392 
   9393 	ether_mediastatus(ifp, ifmr);
   9394 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9395 	    | sc->sc_flowflags;
   9396 }
   9397 
   9398 #define	MDI_IO		CTRL_SWDPIN(2)
   9399 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9400 #define	MDI_CLK		CTRL_SWDPIN(3)
   9401 
   9402 static void
   9403 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9404 {
   9405 	uint32_t i, v;
   9406 
   9407 	v = CSR_READ(sc, WMREG_CTRL);
   9408 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9409 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9410 
   9411 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9412 		if (data & i)
   9413 			v |= MDI_IO;
   9414 		else
   9415 			v &= ~MDI_IO;
   9416 		CSR_WRITE(sc, WMREG_CTRL, v);
   9417 		CSR_WRITE_FLUSH(sc);
   9418 		delay(10);
   9419 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9420 		CSR_WRITE_FLUSH(sc);
   9421 		delay(10);
   9422 		CSR_WRITE(sc, WMREG_CTRL, v);
   9423 		CSR_WRITE_FLUSH(sc);
   9424 		delay(10);
   9425 	}
   9426 }
   9427 
   9428 static uint32_t
   9429 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9430 {
   9431 	uint32_t v, i, data = 0;
   9432 
   9433 	v = CSR_READ(sc, WMREG_CTRL);
   9434 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9435 	v |= CTRL_SWDPIO(3);
   9436 
   9437 	CSR_WRITE(sc, WMREG_CTRL, v);
   9438 	CSR_WRITE_FLUSH(sc);
   9439 	delay(10);
   9440 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9441 	CSR_WRITE_FLUSH(sc);
   9442 	delay(10);
   9443 	CSR_WRITE(sc, WMREG_CTRL, v);
   9444 	CSR_WRITE_FLUSH(sc);
   9445 	delay(10);
   9446 
   9447 	for (i = 0; i < 16; i++) {
   9448 		data <<= 1;
   9449 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9450 		CSR_WRITE_FLUSH(sc);
   9451 		delay(10);
   9452 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9453 			data |= 1;
   9454 		CSR_WRITE(sc, WMREG_CTRL, v);
   9455 		CSR_WRITE_FLUSH(sc);
   9456 		delay(10);
   9457 	}
   9458 
   9459 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9460 	CSR_WRITE_FLUSH(sc);
   9461 	delay(10);
   9462 	CSR_WRITE(sc, WMREG_CTRL, v);
   9463 	CSR_WRITE_FLUSH(sc);
   9464 	delay(10);
   9465 
   9466 	return data;
   9467 }
   9468 
   9469 #undef MDI_IO
   9470 #undef MDI_DIR
   9471 #undef MDI_CLK
   9472 
   9473 /*
   9474  * wm_gmii_i82543_readreg:	[mii interface function]
   9475  *
   9476  *	Read a PHY register on the GMII (i82543 version).
   9477  */
   9478 static int
   9479 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   9480 {
   9481 	struct wm_softc *sc = device_private(self);
   9482 	int rv;
   9483 
   9484 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9485 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9486 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9487 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9488 
   9489 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9490 	    device_xname(sc->sc_dev), phy, reg, rv));
   9491 
   9492 	return rv;
   9493 }
   9494 
   9495 /*
   9496  * wm_gmii_i82543_writereg:	[mii interface function]
   9497  *
   9498  *	Write a PHY register on the GMII (i82543 version).
   9499  */
   9500 static void
   9501 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   9502 {
   9503 	struct wm_softc *sc = device_private(self);
   9504 
   9505 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9506 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9507 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9508 	    (MII_COMMAND_START << 30), 32);
   9509 }
   9510 
   9511 /*
   9512  * wm_gmii_mdic_readreg:	[mii interface function]
   9513  *
   9514  *	Read a PHY register on the GMII.
   9515  */
   9516 static int
   9517 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   9518 {
   9519 	struct wm_softc *sc = device_private(self);
   9520 	uint32_t mdic = 0;
   9521 	int i, rv;
   9522 
   9523 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9524 	    MDIC_REGADD(reg));
   9525 
   9526 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9527 		mdic = CSR_READ(sc, WMREG_MDIC);
   9528 		if (mdic & MDIC_READY)
   9529 			break;
   9530 		delay(50);
   9531 	}
   9532 
   9533 	if ((mdic & MDIC_READY) == 0) {
   9534 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9535 		    device_xname(sc->sc_dev), phy, reg);
   9536 		rv = 0;
   9537 	} else if (mdic & MDIC_E) {
   9538 #if 0 /* This is normal if no PHY is present. */
   9539 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9540 		    device_xname(sc->sc_dev), phy, reg);
   9541 #endif
   9542 		rv = 0;
   9543 	} else {
   9544 		rv = MDIC_DATA(mdic);
   9545 		if (rv == 0xffff)
   9546 			rv = 0;
   9547 	}
   9548 
   9549 	return rv;
   9550 }
   9551 
   9552 /*
   9553  * wm_gmii_mdic_writereg:	[mii interface function]
   9554  *
   9555  *	Write a PHY register on the GMII.
   9556  */
   9557 static void
   9558 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   9559 {
   9560 	struct wm_softc *sc = device_private(self);
   9561 	uint32_t mdic = 0;
   9562 	int i;
   9563 
   9564 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9565 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9566 
   9567 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9568 		mdic = CSR_READ(sc, WMREG_MDIC);
   9569 		if (mdic & MDIC_READY)
   9570 			break;
   9571 		delay(50);
   9572 	}
   9573 
   9574 	if ((mdic & MDIC_READY) == 0)
   9575 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9576 		    device_xname(sc->sc_dev), phy, reg);
   9577 	else if (mdic & MDIC_E)
   9578 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9579 		    device_xname(sc->sc_dev), phy, reg);
   9580 }
   9581 
   9582 /*
   9583  * wm_gmii_i82544_readreg:	[mii interface function]
   9584  *
   9585  *	Read a PHY register on the GMII.
   9586  */
   9587 static int
   9588 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   9589 {
   9590 	struct wm_softc *sc = device_private(self);
   9591 	int rv;
   9592 
   9593 	if (sc->phy.acquire(sc)) {
   9594 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9595 		    __func__);
   9596 		return 0;
   9597 	}
   9598 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9599 	sc->phy.release(sc);
   9600 
   9601 	return rv;
   9602 }
   9603 
   9604 /*
   9605  * wm_gmii_i82544_writereg:	[mii interface function]
   9606  *
   9607  *	Write a PHY register on the GMII.
   9608  */
   9609 static void
   9610 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   9611 {
   9612 	struct wm_softc *sc = device_private(self);
   9613 
   9614 	if (sc->phy.acquire(sc)) {
   9615 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9616 		    __func__);
   9617 	}
   9618 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9619 	sc->phy.release(sc);
   9620 }
   9621 
   9622 /*
   9623  * wm_gmii_i80003_readreg:	[mii interface function]
   9624  *
   9625  *	Read a PHY register on the kumeran
   9626  * This could be handled by the PHY layer if we didn't have to lock the
   9627  * ressource ...
   9628  */
   9629 static int
   9630 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   9631 {
   9632 	struct wm_softc *sc = device_private(self);
   9633 	int rv;
   9634 
   9635 	if (phy != 1) /* only one PHY on kumeran bus */
   9636 		return 0;
   9637 
   9638 	if (sc->phy.acquire(sc)) {
   9639 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9640 		    __func__);
   9641 		return 0;
   9642 	}
   9643 
   9644 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9645 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9646 		    reg >> GG82563_PAGE_SHIFT);
   9647 	} else {
   9648 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9649 		    reg >> GG82563_PAGE_SHIFT);
   9650 	}
   9651 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9652 	delay(200);
   9653 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9654 	delay(200);
   9655 	sc->phy.release(sc);
   9656 
   9657 	return rv;
   9658 }
   9659 
   9660 /*
   9661  * wm_gmii_i80003_writereg:	[mii interface function]
   9662  *
   9663  *	Write a PHY register on the kumeran.
   9664  * This could be handled by the PHY layer if we didn't have to lock the
   9665  * ressource ...
   9666  */
   9667 static void
   9668 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   9669 {
   9670 	struct wm_softc *sc = device_private(self);
   9671 
   9672 	if (phy != 1) /* only one PHY on kumeran bus */
   9673 		return;
   9674 
   9675 	if (sc->phy.acquire(sc)) {
   9676 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9677 		    __func__);
   9678 		return;
   9679 	}
   9680 
   9681 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9682 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9683 		    reg >> GG82563_PAGE_SHIFT);
   9684 	} else {
   9685 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9686 		    reg >> GG82563_PAGE_SHIFT);
   9687 	}
   9688 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9689 	delay(200);
   9690 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9691 	delay(200);
   9692 
   9693 	sc->phy.release(sc);
   9694 }
   9695 
   9696 /*
   9697  * wm_gmii_bm_readreg:	[mii interface function]
   9698  *
   9699  *	Read a PHY register on the kumeran
   9700  * This could be handled by the PHY layer if we didn't have to lock the
   9701  * ressource ...
   9702  */
   9703 static int
   9704 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   9705 {
   9706 	struct wm_softc *sc = device_private(self);
   9707 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9708 	uint16_t val;
   9709 	int rv;
   9710 
   9711 	if (sc->phy.acquire(sc)) {
   9712 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9713 		    __func__);
   9714 		return 0;
   9715 	}
   9716 
   9717 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9718 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9719 		    || (reg == 31)) ? 1 : phy;
   9720 	/* Page 800 works differently than the rest so it has its own func */
   9721 	if (page == BM_WUC_PAGE) {
   9722 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9723 		rv = val;
   9724 		goto release;
   9725 	}
   9726 
   9727 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9728 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9729 		    && (sc->sc_type != WM_T_82583))
   9730 			wm_gmii_mdic_writereg(self, phy,
   9731 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9732 		else
   9733 			wm_gmii_mdic_writereg(self, phy,
   9734 			    BME1000_PHY_PAGE_SELECT, page);
   9735 	}
   9736 
   9737 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9738 
   9739 release:
   9740 	sc->phy.release(sc);
   9741 	return rv;
   9742 }
   9743 
   9744 /*
   9745  * wm_gmii_bm_writereg:	[mii interface function]
   9746  *
   9747  *	Write a PHY register on the kumeran.
   9748  * This could be handled by the PHY layer if we didn't have to lock the
   9749  * ressource ...
   9750  */
   9751 static void
   9752 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   9753 {
   9754 	struct wm_softc *sc = device_private(self);
   9755 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9756 
   9757 	if (sc->phy.acquire(sc)) {
   9758 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9759 		    __func__);
   9760 		return;
   9761 	}
   9762 
   9763 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9764 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9765 		    || (reg == 31)) ? 1 : phy;
   9766 	/* Page 800 works differently than the rest so it has its own func */
   9767 	if (page == BM_WUC_PAGE) {
   9768 		uint16_t tmp;
   9769 
   9770 		tmp = val;
   9771 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9772 		goto release;
   9773 	}
   9774 
   9775 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9776 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9777 		    && (sc->sc_type != WM_T_82583))
   9778 			wm_gmii_mdic_writereg(self, phy,
   9779 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9780 		else
   9781 			wm_gmii_mdic_writereg(self, phy,
   9782 			    BME1000_PHY_PAGE_SELECT, page);
   9783 	}
   9784 
   9785 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9786 
   9787 release:
   9788 	sc->phy.release(sc);
   9789 }
   9790 
   9791 static void
   9792 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   9793 {
   9794 	struct wm_softc *sc = device_private(self);
   9795 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9796 	uint16_t wuce, reg;
   9797 
   9798 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9799 		device_xname(sc->sc_dev), __func__));
   9800 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9801 	if (sc->sc_type == WM_T_PCH) {
   9802 		/* XXX e1000 driver do nothing... why? */
   9803 	}
   9804 
   9805 	/*
   9806 	 * 1) Enable PHY wakeup register first.
   9807 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9808 	 */
   9809 
   9810 	/* Set page 769 */
   9811 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9812 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9813 
   9814 	/* Read WUCE and save it */
   9815 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9816 
   9817 	reg = wuce | BM_WUC_ENABLE_BIT;
   9818 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9819 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9820 
   9821 	/* Select page 800 */
   9822 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9823 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9824 
   9825 	/*
   9826 	 * 2) Access PHY wakeup register.
   9827 	 * See e1000_access_phy_wakeup_reg_bm.
   9828 	 */
   9829 
   9830 	/* Write page 800 */
   9831 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9832 
   9833 	if (rd)
   9834 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9835 	else
   9836 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9837 
   9838 	/*
   9839 	 * 3) Disable PHY wakeup register.
   9840 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9841 	 */
   9842 	/* Set page 769 */
   9843 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9844 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9845 
   9846 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9847 }
   9848 
   9849 /*
   9850  * wm_gmii_hv_readreg:	[mii interface function]
   9851  *
   9852  *	Read a PHY register on the kumeran
   9853  * This could be handled by the PHY layer if we didn't have to lock the
   9854  * ressource ...
   9855  */
   9856 static int
   9857 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9858 {
   9859 	struct wm_softc *sc = device_private(self);
   9860 	int rv;
   9861 
   9862 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9863 		device_xname(sc->sc_dev), __func__));
   9864 	if (sc->phy.acquire(sc)) {
   9865 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9866 		    __func__);
   9867 		return 0;
   9868 	}
   9869 
   9870 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9871 	sc->phy.release(sc);
   9872 	return rv;
   9873 }
   9874 
   9875 static int
   9876 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9877 {
   9878 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9879 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9880 	uint16_t val;
   9881 	int rv;
   9882 
   9883 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9884 
   9885 	/* Page 800 works differently than the rest so it has its own func */
   9886 	if (page == BM_WUC_PAGE) {
   9887 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9888 		return val;
   9889 	}
   9890 
   9891 	/*
   9892 	 * Lower than page 768 works differently than the rest so it has its
   9893 	 * own func
   9894 	 */
   9895 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9896 		printf("gmii_hv_readreg!!!\n");
   9897 		return 0;
   9898 	}
   9899 
   9900 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9901 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9902 		    page << BME1000_PAGE_SHIFT);
   9903 	}
   9904 
   9905 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9906 	return rv;
   9907 }
   9908 
   9909 /*
   9910  * wm_gmii_hv_writereg:	[mii interface function]
   9911  *
   9912  *	Write a PHY register on the kumeran.
   9913  * This could be handled by the PHY layer if we didn't have to lock the
   9914  * ressource ...
   9915  */
   9916 static void
   9917 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9918 {
   9919 	struct wm_softc *sc = device_private(self);
   9920 
   9921 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9922 		device_xname(sc->sc_dev), __func__));
   9923 
   9924 	if (sc->phy.acquire(sc)) {
   9925 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9926 		    __func__);
   9927 		return;
   9928 	}
   9929 
   9930 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   9931 	sc->phy.release(sc);
   9932 }
   9933 
   9934 static void
   9935 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   9936 {
   9937 	struct wm_softc *sc = device_private(self);
   9938 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9939 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9940 
   9941 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9942 
   9943 	/* Page 800 works differently than the rest so it has its own func */
   9944 	if (page == BM_WUC_PAGE) {
   9945 		uint16_t tmp;
   9946 
   9947 		tmp = val;
   9948 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9949 		return;
   9950 	}
   9951 
   9952 	/*
   9953 	 * Lower than page 768 works differently than the rest so it has its
   9954 	 * own func
   9955 	 */
   9956 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9957 		printf("gmii_hv_writereg!!!\n");
   9958 		return;
   9959 	}
   9960 
   9961 	{
   9962 		/*
   9963 		 * XXX Workaround MDIO accesses being disabled after entering
   9964 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9965 		 * register is set)
   9966 		 */
   9967 		if (sc->sc_phytype == WMPHY_82578) {
   9968 			struct mii_softc *child;
   9969 
   9970 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9971 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   9972 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   9973 			    && ((val & (1 << 11)) != 0)) {
   9974 				printf("XXX need workaround\n");
   9975 			}
   9976 		}
   9977 
   9978 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9979 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9980 			    page << BME1000_PAGE_SHIFT);
   9981 		}
   9982 	}
   9983 
   9984 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   9985 }
   9986 
   9987 /*
   9988  * wm_gmii_82580_readreg:	[mii interface function]
   9989  *
   9990  *	Read a PHY register on the 82580 and I350.
   9991  * This could be handled by the PHY layer if we didn't have to lock the
   9992  * ressource ...
   9993  */
   9994 static int
   9995 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   9996 {
   9997 	struct wm_softc *sc = device_private(self);
   9998 	int rv;
   9999 
   10000 	if (sc->phy.acquire(sc) != 0) {
   10001 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10002 		    __func__);
   10003 		return 0;
   10004 	}
   10005 
   10006 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   10007 
   10008 	sc->phy.release(sc);
   10009 	return rv;
   10010 }
   10011 
   10012 /*
   10013  * wm_gmii_82580_writereg:	[mii interface function]
   10014  *
   10015  *	Write a PHY register on the 82580 and I350.
   10016  * This could be handled by the PHY layer if we didn't have to lock the
   10017  * ressource ...
   10018  */
   10019 static void
   10020 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   10021 {
   10022 	struct wm_softc *sc = device_private(self);
   10023 
   10024 	if (sc->phy.acquire(sc) != 0) {
   10025 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10026 		    __func__);
   10027 		return;
   10028 	}
   10029 
   10030 	wm_gmii_mdic_writereg(self, phy, reg, val);
   10031 
   10032 	sc->phy.release(sc);
   10033 }
   10034 
   10035 /*
   10036  * wm_gmii_gs40g_readreg:	[mii interface function]
   10037  *
   10038  *	Read a PHY register on the I2100 and I211.
   10039  * This could be handled by the PHY layer if we didn't have to lock the
   10040  * ressource ...
   10041  */
   10042 static int
   10043 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   10044 {
   10045 	struct wm_softc *sc = device_private(self);
   10046 	int page, offset;
   10047 	int rv;
   10048 
   10049 	/* Acquire semaphore */
   10050 	if (sc->phy.acquire(sc)) {
   10051 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10052 		    __func__);
   10053 		return 0;
   10054 	}
   10055 
   10056 	/* Page select */
   10057 	page = reg >> GS40G_PAGE_SHIFT;
   10058 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10059 
   10060 	/* Read reg */
   10061 	offset = reg & GS40G_OFFSET_MASK;
   10062 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   10063 
   10064 	sc->phy.release(sc);
   10065 	return rv;
   10066 }
   10067 
   10068 /*
   10069  * wm_gmii_gs40g_writereg:	[mii interface function]
   10070  *
   10071  *	Write a PHY register on the I210 and I211.
   10072  * This could be handled by the PHY layer if we didn't have to lock the
   10073  * ressource ...
   10074  */
   10075 static void
   10076 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   10077 {
   10078 	struct wm_softc *sc = device_private(self);
   10079 	int page, offset;
   10080 
   10081 	/* Acquire semaphore */
   10082 	if (sc->phy.acquire(sc)) {
   10083 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10084 		    __func__);
   10085 		return;
   10086 	}
   10087 
   10088 	/* Page select */
   10089 	page = reg >> GS40G_PAGE_SHIFT;
   10090 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10091 
   10092 	/* Write reg */
   10093 	offset = reg & GS40G_OFFSET_MASK;
   10094 	wm_gmii_mdic_writereg(self, phy, offset, val);
   10095 
   10096 	/* Release semaphore */
   10097 	sc->phy.release(sc);
   10098 }
   10099 
   10100 /*
   10101  * wm_gmii_statchg:	[mii interface function]
   10102  *
   10103  *	Callback from MII layer when media changes.
   10104  */
   10105 static void
   10106 wm_gmii_statchg(struct ifnet *ifp)
   10107 {
   10108 	struct wm_softc *sc = ifp->if_softc;
   10109 	struct mii_data *mii = &sc->sc_mii;
   10110 
   10111 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10112 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10113 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10114 
   10115 	/*
   10116 	 * Get flow control negotiation result.
   10117 	 */
   10118 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10119 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10120 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10121 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10122 	}
   10123 
   10124 	if (sc->sc_flowflags & IFM_FLOW) {
   10125 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10126 			sc->sc_ctrl |= CTRL_TFCE;
   10127 			sc->sc_fcrtl |= FCRTL_XONE;
   10128 		}
   10129 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10130 			sc->sc_ctrl |= CTRL_RFCE;
   10131 	}
   10132 
   10133 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10134 		DPRINTF(WM_DEBUG_LINK,
   10135 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10136 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10137 	} else {
   10138 		DPRINTF(WM_DEBUG_LINK,
   10139 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10140 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10141 	}
   10142 
   10143 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10144 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10145 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10146 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10147 	if (sc->sc_type == WM_T_80003) {
   10148 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10149 		case IFM_1000_T:
   10150 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10151 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10152 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10153 			break;
   10154 		default:
   10155 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10156 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10157 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10158 			break;
   10159 		}
   10160 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10161 	}
   10162 }
   10163 
   10164 /* kumeran related (80003, ICH* and PCH*) */
   10165 
   10166 /*
   10167  * wm_kmrn_readreg:
   10168  *
   10169  *	Read a kumeran register
   10170  */
   10171 static int
   10172 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   10173 {
   10174 	int rv;
   10175 
   10176 	if (sc->sc_type == WM_T_80003)
   10177 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10178 	else
   10179 		rv = sc->phy.acquire(sc);
   10180 	if (rv != 0) {
   10181 		aprint_error_dev(sc->sc_dev,
   10182 		    "%s: failed to get semaphore\n", __func__);
   10183 		return 0;
   10184 	}
   10185 
   10186 	rv = wm_kmrn_readreg_locked(sc, reg);
   10187 
   10188 	if (sc->sc_type == WM_T_80003)
   10189 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10190 	else
   10191 		sc->phy.release(sc);
   10192 
   10193 	return rv;
   10194 }
   10195 
   10196 static int
   10197 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   10198 {
   10199 	int rv;
   10200 
   10201 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10202 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10203 	    KUMCTRLSTA_REN);
   10204 	CSR_WRITE_FLUSH(sc);
   10205 	delay(2);
   10206 
   10207 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10208 
   10209 	return rv;
   10210 }
   10211 
   10212 /*
   10213  * wm_kmrn_writereg:
   10214  *
   10215  *	Write a kumeran register
   10216  */
   10217 static void
   10218 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   10219 {
   10220 	int rv;
   10221 
   10222 	if (sc->sc_type == WM_T_80003)
   10223 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10224 	else
   10225 		rv = sc->phy.acquire(sc);
   10226 	if (rv != 0) {
   10227 		aprint_error_dev(sc->sc_dev,
   10228 		    "%s: failed to get semaphore\n", __func__);
   10229 		return;
   10230 	}
   10231 
   10232 	wm_kmrn_writereg_locked(sc, reg, val);
   10233 
   10234 	if (sc->sc_type == WM_T_80003)
   10235 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10236 	else
   10237 		sc->phy.release(sc);
   10238 }
   10239 
   10240 static void
   10241 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   10242 {
   10243 
   10244 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10245 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10246 	    (val & KUMCTRLSTA_MASK));
   10247 }
   10248 
   10249 /* SGMII related */
   10250 
   10251 /*
   10252  * wm_sgmii_uses_mdio
   10253  *
   10254  * Check whether the transaction is to the internal PHY or the external
   10255  * MDIO interface. Return true if it's MDIO.
   10256  */
   10257 static bool
   10258 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10259 {
   10260 	uint32_t reg;
   10261 	bool ismdio = false;
   10262 
   10263 	switch (sc->sc_type) {
   10264 	case WM_T_82575:
   10265 	case WM_T_82576:
   10266 		reg = CSR_READ(sc, WMREG_MDIC);
   10267 		ismdio = ((reg & MDIC_DEST) != 0);
   10268 		break;
   10269 	case WM_T_82580:
   10270 	case WM_T_I350:
   10271 	case WM_T_I354:
   10272 	case WM_T_I210:
   10273 	case WM_T_I211:
   10274 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10275 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10276 		break;
   10277 	default:
   10278 		break;
   10279 	}
   10280 
   10281 	return ismdio;
   10282 }
   10283 
   10284 /*
   10285  * wm_sgmii_readreg:	[mii interface function]
   10286  *
   10287  *	Read a PHY register on the SGMII
   10288  * This could be handled by the PHY layer if we didn't have to lock the
   10289  * ressource ...
   10290  */
   10291 static int
   10292 wm_sgmii_readreg(device_t self, int phy, int reg)
   10293 {
   10294 	struct wm_softc *sc = device_private(self);
   10295 	uint32_t i2ccmd;
   10296 	int i, rv;
   10297 
   10298 	if (sc->phy.acquire(sc)) {
   10299 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10300 		    __func__);
   10301 		return 0;
   10302 	}
   10303 
   10304 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10305 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10306 	    | I2CCMD_OPCODE_READ;
   10307 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10308 
   10309 	/* Poll the ready bit */
   10310 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10311 		delay(50);
   10312 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10313 		if (i2ccmd & I2CCMD_READY)
   10314 			break;
   10315 	}
   10316 	if ((i2ccmd & I2CCMD_READY) == 0)
   10317 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   10318 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10319 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10320 
   10321 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10322 
   10323 	sc->phy.release(sc);
   10324 	return rv;
   10325 }
   10326 
   10327 /*
   10328  * wm_sgmii_writereg:	[mii interface function]
   10329  *
   10330  *	Write a PHY register on the SGMII.
   10331  * This could be handled by the PHY layer if we didn't have to lock the
   10332  * ressource ...
   10333  */
   10334 static void
   10335 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   10336 {
   10337 	struct wm_softc *sc = device_private(self);
   10338 	uint32_t i2ccmd;
   10339 	int i;
   10340 	int val_swapped;
   10341 
   10342 	if (sc->phy.acquire(sc) != 0) {
   10343 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10344 		    __func__);
   10345 		return;
   10346 	}
   10347 	/* Swap the data bytes for the I2C interface */
   10348 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10349 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10350 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10351 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10352 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10353 
   10354 	/* Poll the ready bit */
   10355 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10356 		delay(50);
   10357 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10358 		if (i2ccmd & I2CCMD_READY)
   10359 			break;
   10360 	}
   10361 	if ((i2ccmd & I2CCMD_READY) == 0)
   10362 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   10363 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10364 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10365 
   10366 	sc->phy.release(sc);
   10367 }
   10368 
   10369 /* TBI related */
   10370 
   10371 /*
   10372  * wm_tbi_mediainit:
   10373  *
   10374  *	Initialize media for use on 1000BASE-X devices.
   10375  */
   10376 static void
   10377 wm_tbi_mediainit(struct wm_softc *sc)
   10378 {
   10379 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10380 	const char *sep = "";
   10381 
   10382 	if (sc->sc_type < WM_T_82543)
   10383 		sc->sc_tipg = TIPG_WM_DFLT;
   10384 	else
   10385 		sc->sc_tipg = TIPG_LG_DFLT;
   10386 
   10387 	sc->sc_tbi_serdes_anegticks = 5;
   10388 
   10389 	/* Initialize our media structures */
   10390 	sc->sc_mii.mii_ifp = ifp;
   10391 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10392 
   10393 	if ((sc->sc_type >= WM_T_82575)
   10394 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10395 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10396 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10397 	else
   10398 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10399 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10400 
   10401 	/*
   10402 	 * SWD Pins:
   10403 	 *
   10404 	 *	0 = Link LED (output)
   10405 	 *	1 = Loss Of Signal (input)
   10406 	 */
   10407 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10408 
   10409 	/* XXX Perhaps this is only for TBI */
   10410 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10411 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10412 
   10413 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10414 		sc->sc_ctrl &= ~CTRL_LRST;
   10415 
   10416 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10417 
   10418 #define	ADD(ss, mm, dd)							\
   10419 do {									\
   10420 	aprint_normal("%s%s", sep, ss);					\
   10421 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10422 	sep = ", ";							\
   10423 } while (/*CONSTCOND*/0)
   10424 
   10425 	aprint_normal_dev(sc->sc_dev, "");
   10426 
   10427 	if (sc->sc_type == WM_T_I354) {
   10428 		uint32_t status;
   10429 
   10430 		status = CSR_READ(sc, WMREG_STATUS);
   10431 		if (((status & STATUS_2P5_SKU) != 0)
   10432 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10433 			ADD("2500baseKX-FDX", IFM_2500_SX | IFM_FDX,ANAR_X_FD);
   10434 		} else
   10435 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX,ANAR_X_FD);
   10436 	} else if (sc->sc_type == WM_T_82545) {
   10437 		/* Only 82545 is LX (XXX except SFP) */
   10438 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10439 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10440 	} else {
   10441 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10442 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10443 	}
   10444 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10445 	aprint_normal("\n");
   10446 
   10447 #undef ADD
   10448 
   10449 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10450 }
   10451 
   10452 /*
   10453  * wm_tbi_mediachange:	[ifmedia interface function]
   10454  *
   10455  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10456  */
   10457 static int
   10458 wm_tbi_mediachange(struct ifnet *ifp)
   10459 {
   10460 	struct wm_softc *sc = ifp->if_softc;
   10461 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10462 	uint32_t status;
   10463 	int i;
   10464 
   10465 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10466 		/* XXX need some work for >= 82571 and < 82575 */
   10467 		if (sc->sc_type < WM_T_82575)
   10468 			return 0;
   10469 	}
   10470 
   10471 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10472 	    || (sc->sc_type >= WM_T_82575))
   10473 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10474 
   10475 	sc->sc_ctrl &= ~CTRL_LRST;
   10476 	sc->sc_txcw = TXCW_ANE;
   10477 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10478 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10479 	else if (ife->ifm_media & IFM_FDX)
   10480 		sc->sc_txcw |= TXCW_FD;
   10481 	else
   10482 		sc->sc_txcw |= TXCW_HD;
   10483 
   10484 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10485 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10486 
   10487 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10488 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10489 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10490 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10491 	CSR_WRITE_FLUSH(sc);
   10492 	delay(1000);
   10493 
   10494 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10495 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10496 
   10497 	/*
   10498 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10499 	 * optics detect a signal, 0 if they don't.
   10500 	 */
   10501 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10502 		/* Have signal; wait for the link to come up. */
   10503 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10504 			delay(10000);
   10505 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10506 				break;
   10507 		}
   10508 
   10509 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10510 			    device_xname(sc->sc_dev),i));
   10511 
   10512 		status = CSR_READ(sc, WMREG_STATUS);
   10513 		DPRINTF(WM_DEBUG_LINK,
   10514 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10515 			device_xname(sc->sc_dev),status, STATUS_LU));
   10516 		if (status & STATUS_LU) {
   10517 			/* Link is up. */
   10518 			DPRINTF(WM_DEBUG_LINK,
   10519 			    ("%s: LINK: set media -> link up %s\n",
   10520 			    device_xname(sc->sc_dev),
   10521 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10522 
   10523 			/*
   10524 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10525 			 * so we should update sc->sc_ctrl
   10526 			 */
   10527 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10528 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10529 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10530 			if (status & STATUS_FD)
   10531 				sc->sc_tctl |=
   10532 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10533 			else
   10534 				sc->sc_tctl |=
   10535 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10536 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10537 				sc->sc_fcrtl |= FCRTL_XONE;
   10538 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10539 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10540 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10541 				      sc->sc_fcrtl);
   10542 			sc->sc_tbi_linkup = 1;
   10543 		} else {
   10544 			if (i == WM_LINKUP_TIMEOUT)
   10545 				wm_check_for_link(sc);
   10546 			/* Link is down. */
   10547 			DPRINTF(WM_DEBUG_LINK,
   10548 			    ("%s: LINK: set media -> link down\n",
   10549 			    device_xname(sc->sc_dev)));
   10550 			sc->sc_tbi_linkup = 0;
   10551 		}
   10552 	} else {
   10553 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10554 		    device_xname(sc->sc_dev)));
   10555 		sc->sc_tbi_linkup = 0;
   10556 	}
   10557 
   10558 	wm_tbi_serdes_set_linkled(sc);
   10559 
   10560 	return 0;
   10561 }
   10562 
   10563 /*
   10564  * wm_tbi_mediastatus:	[ifmedia interface function]
   10565  *
   10566  *	Get the current interface media status on a 1000BASE-X device.
   10567  */
   10568 static void
   10569 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10570 {
   10571 	struct wm_softc *sc = ifp->if_softc;
   10572 	uint32_t ctrl, status;
   10573 
   10574 	ifmr->ifm_status = IFM_AVALID;
   10575 	ifmr->ifm_active = IFM_ETHER;
   10576 
   10577 	status = CSR_READ(sc, WMREG_STATUS);
   10578 	if ((status & STATUS_LU) == 0) {
   10579 		ifmr->ifm_active |= IFM_NONE;
   10580 		return;
   10581 	}
   10582 
   10583 	ifmr->ifm_status |= IFM_ACTIVE;
   10584 	/* Only 82545 is LX */
   10585 	if (sc->sc_type == WM_T_82545)
   10586 		ifmr->ifm_active |= IFM_1000_LX;
   10587 	else
   10588 		ifmr->ifm_active |= IFM_1000_SX;
   10589 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10590 		ifmr->ifm_active |= IFM_FDX;
   10591 	else
   10592 		ifmr->ifm_active |= IFM_HDX;
   10593 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10594 	if (ctrl & CTRL_RFCE)
   10595 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10596 	if (ctrl & CTRL_TFCE)
   10597 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10598 }
   10599 
   10600 /* XXX TBI only */
   10601 static int
   10602 wm_check_for_link(struct wm_softc *sc)
   10603 {
   10604 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10605 	uint32_t rxcw;
   10606 	uint32_t ctrl;
   10607 	uint32_t status;
   10608 	uint32_t sig;
   10609 
   10610 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10611 		/* XXX need some work for >= 82571 */
   10612 		if (sc->sc_type >= WM_T_82571) {
   10613 			sc->sc_tbi_linkup = 1;
   10614 			return 0;
   10615 		}
   10616 	}
   10617 
   10618 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10619 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10620 	status = CSR_READ(sc, WMREG_STATUS);
   10621 
   10622 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10623 
   10624 	DPRINTF(WM_DEBUG_LINK,
   10625 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10626 		device_xname(sc->sc_dev), __func__,
   10627 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10628 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10629 
   10630 	/*
   10631 	 * SWDPIN   LU RXCW
   10632 	 *      0    0    0
   10633 	 *      0    0    1	(should not happen)
   10634 	 *      0    1    0	(should not happen)
   10635 	 *      0    1    1	(should not happen)
   10636 	 *      1    0    0	Disable autonego and force linkup
   10637 	 *      1    0    1	got /C/ but not linkup yet
   10638 	 *      1    1    0	(linkup)
   10639 	 *      1    1    1	If IFM_AUTO, back to autonego
   10640 	 *
   10641 	 */
   10642 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10643 	    && ((status & STATUS_LU) == 0)
   10644 	    && ((rxcw & RXCW_C) == 0)) {
   10645 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10646 			__func__));
   10647 		sc->sc_tbi_linkup = 0;
   10648 		/* Disable auto-negotiation in the TXCW register */
   10649 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10650 
   10651 		/*
   10652 		 * Force link-up and also force full-duplex.
   10653 		 *
   10654 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10655 		 * so we should update sc->sc_ctrl
   10656 		 */
   10657 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10658 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10659 	} else if (((status & STATUS_LU) != 0)
   10660 	    && ((rxcw & RXCW_C) != 0)
   10661 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10662 		sc->sc_tbi_linkup = 1;
   10663 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10664 			__func__));
   10665 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10666 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10667 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10668 	    && ((rxcw & RXCW_C) != 0)) {
   10669 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10670 	} else {
   10671 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10672 			status));
   10673 	}
   10674 
   10675 	return 0;
   10676 }
   10677 
   10678 /*
   10679  * wm_tbi_tick:
   10680  *
   10681  *	Check the link on TBI devices.
   10682  *	This function acts as mii_tick().
   10683  */
   10684 static void
   10685 wm_tbi_tick(struct wm_softc *sc)
   10686 {
   10687 	struct mii_data *mii = &sc->sc_mii;
   10688 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10689 	uint32_t status;
   10690 
   10691 	KASSERT(WM_CORE_LOCKED(sc));
   10692 
   10693 	status = CSR_READ(sc, WMREG_STATUS);
   10694 
   10695 	/* XXX is this needed? */
   10696 	(void)CSR_READ(sc, WMREG_RXCW);
   10697 	(void)CSR_READ(sc, WMREG_CTRL);
   10698 
   10699 	/* set link status */
   10700 	if ((status & STATUS_LU) == 0) {
   10701 		DPRINTF(WM_DEBUG_LINK,
   10702 		    ("%s: LINK: checklink -> down\n",
   10703 			device_xname(sc->sc_dev)));
   10704 		sc->sc_tbi_linkup = 0;
   10705 	} else if (sc->sc_tbi_linkup == 0) {
   10706 		DPRINTF(WM_DEBUG_LINK,
   10707 		    ("%s: LINK: checklink -> up %s\n",
   10708 			device_xname(sc->sc_dev),
   10709 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10710 		sc->sc_tbi_linkup = 1;
   10711 		sc->sc_tbi_serdes_ticks = 0;
   10712 	}
   10713 
   10714 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   10715 		goto setled;
   10716 
   10717 	if ((status & STATUS_LU) == 0) {
   10718 		sc->sc_tbi_linkup = 0;
   10719 		/* If the timer expired, retry autonegotiation */
   10720 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10721 		    && (++sc->sc_tbi_serdes_ticks
   10722 			>= sc->sc_tbi_serdes_anegticks)) {
   10723 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10724 			sc->sc_tbi_serdes_ticks = 0;
   10725 			/*
   10726 			 * Reset the link, and let autonegotiation do
   10727 			 * its thing
   10728 			 */
   10729 			sc->sc_ctrl |= CTRL_LRST;
   10730 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10731 			CSR_WRITE_FLUSH(sc);
   10732 			delay(1000);
   10733 			sc->sc_ctrl &= ~CTRL_LRST;
   10734 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10735 			CSR_WRITE_FLUSH(sc);
   10736 			delay(1000);
   10737 			CSR_WRITE(sc, WMREG_TXCW,
   10738 			    sc->sc_txcw & ~TXCW_ANE);
   10739 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10740 		}
   10741 	}
   10742 
   10743 setled:
   10744 	wm_tbi_serdes_set_linkled(sc);
   10745 }
   10746 
   10747 /* SERDES related */
   10748 static void
   10749 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10750 {
   10751 	uint32_t reg;
   10752 
   10753 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10754 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   10755 		return;
   10756 
   10757 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   10758 	reg |= PCS_CFG_PCS_EN;
   10759 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   10760 
   10761 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10762 	reg &= ~CTRL_EXT_SWDPIN(3);
   10763 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10764 	CSR_WRITE_FLUSH(sc);
   10765 }
   10766 
   10767 static int
   10768 wm_serdes_mediachange(struct ifnet *ifp)
   10769 {
   10770 	struct wm_softc *sc = ifp->if_softc;
   10771 	bool pcs_autoneg = true; /* XXX */
   10772 	uint32_t ctrl_ext, pcs_lctl, reg;
   10773 
   10774 	/* XXX Currently, this function is not called on 8257[12] */
   10775 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10776 	    || (sc->sc_type >= WM_T_82575))
   10777 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10778 
   10779 	wm_serdes_power_up_link_82575(sc);
   10780 
   10781 	sc->sc_ctrl |= CTRL_SLU;
   10782 
   10783 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10784 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10785 
   10786 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10787 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10788 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10789 	case CTRL_EXT_LINK_MODE_SGMII:
   10790 		pcs_autoneg = true;
   10791 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10792 		break;
   10793 	case CTRL_EXT_LINK_MODE_1000KX:
   10794 		pcs_autoneg = false;
   10795 		/* FALLTHROUGH */
   10796 	default:
   10797 		if ((sc->sc_type == WM_T_82575)
   10798 		    || (sc->sc_type == WM_T_82576)) {
   10799 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10800 				pcs_autoneg = false;
   10801 		}
   10802 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10803 		    | CTRL_FRCFDX;
   10804 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10805 	}
   10806 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10807 
   10808 	if (pcs_autoneg) {
   10809 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10810 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10811 
   10812 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10813 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10814 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10815 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10816 	} else
   10817 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10818 
   10819 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10820 
   10821 
   10822 	return 0;
   10823 }
   10824 
   10825 static void
   10826 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10827 {
   10828 	struct wm_softc *sc = ifp->if_softc;
   10829 	struct mii_data *mii = &sc->sc_mii;
   10830 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10831 	uint32_t pcs_adv, pcs_lpab, reg;
   10832 
   10833 	ifmr->ifm_status = IFM_AVALID;
   10834 	ifmr->ifm_active = IFM_ETHER;
   10835 
   10836 	/* Check PCS */
   10837 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10838 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10839 		ifmr->ifm_active |= IFM_NONE;
   10840 		sc->sc_tbi_linkup = 0;
   10841 		goto setled;
   10842 	}
   10843 
   10844 	sc->sc_tbi_linkup = 1;
   10845 	ifmr->ifm_status |= IFM_ACTIVE;
   10846 	if (sc->sc_type == WM_T_I354) {
   10847 		uint32_t status;
   10848 
   10849 		status = CSR_READ(sc, WMREG_STATUS);
   10850 		if (((status & STATUS_2P5_SKU) != 0)
   10851 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10852 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   10853 		} else
   10854 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   10855 	} else {
   10856 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   10857 		case PCS_LSTS_SPEED_10:
   10858 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   10859 			break;
   10860 		case PCS_LSTS_SPEED_100:
   10861 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   10862 			break;
   10863 		case PCS_LSTS_SPEED_1000:
   10864 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10865 			break;
   10866 		default:
   10867 			device_printf(sc->sc_dev, "Unknown speed\n");
   10868 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10869 			break;
   10870 		}
   10871 	}
   10872 	if ((reg & PCS_LSTS_FDX) != 0)
   10873 		ifmr->ifm_active |= IFM_FDX;
   10874 	else
   10875 		ifmr->ifm_active |= IFM_HDX;
   10876 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10877 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10878 		/* Check flow */
   10879 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10880 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10881 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10882 			goto setled;
   10883 		}
   10884 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10885 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10886 		DPRINTF(WM_DEBUG_LINK,
   10887 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10888 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10889 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10890 			mii->mii_media_active |= IFM_FLOW
   10891 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10892 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10893 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10894 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10895 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10896 			mii->mii_media_active |= IFM_FLOW
   10897 			    | IFM_ETH_TXPAUSE;
   10898 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10899 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10900 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10901 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10902 			mii->mii_media_active |= IFM_FLOW
   10903 			    | IFM_ETH_RXPAUSE;
   10904 		}
   10905 	}
   10906 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10907 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10908 setled:
   10909 	wm_tbi_serdes_set_linkled(sc);
   10910 }
   10911 
   10912 /*
   10913  * wm_serdes_tick:
   10914  *
   10915  *	Check the link on serdes devices.
   10916  */
   10917 static void
   10918 wm_serdes_tick(struct wm_softc *sc)
   10919 {
   10920 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10921 	struct mii_data *mii = &sc->sc_mii;
   10922 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10923 	uint32_t reg;
   10924 
   10925 	KASSERT(WM_CORE_LOCKED(sc));
   10926 
   10927 	mii->mii_media_status = IFM_AVALID;
   10928 	mii->mii_media_active = IFM_ETHER;
   10929 
   10930 	/* Check PCS */
   10931 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10932 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   10933 		mii->mii_media_status |= IFM_ACTIVE;
   10934 		sc->sc_tbi_linkup = 1;
   10935 		sc->sc_tbi_serdes_ticks = 0;
   10936 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   10937 		if ((reg & PCS_LSTS_FDX) != 0)
   10938 			mii->mii_media_active |= IFM_FDX;
   10939 		else
   10940 			mii->mii_media_active |= IFM_HDX;
   10941 	} else {
   10942 		mii->mii_media_status |= IFM_NONE;
   10943 		sc->sc_tbi_linkup = 0;
   10944 		/* If the timer expired, retry autonegotiation */
   10945 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10946 		    && (++sc->sc_tbi_serdes_ticks
   10947 			>= sc->sc_tbi_serdes_anegticks)) {
   10948 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10949 			sc->sc_tbi_serdes_ticks = 0;
   10950 			/* XXX */
   10951 			wm_serdes_mediachange(ifp);
   10952 		}
   10953 	}
   10954 
   10955 	wm_tbi_serdes_set_linkled(sc);
   10956 }
   10957 
   10958 /* SFP related */
   10959 
   10960 static int
   10961 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   10962 {
   10963 	uint32_t i2ccmd;
   10964 	int i;
   10965 
   10966 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10967 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10968 
   10969 	/* Poll the ready bit */
   10970 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10971 		delay(50);
   10972 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10973 		if (i2ccmd & I2CCMD_READY)
   10974 			break;
   10975 	}
   10976 	if ((i2ccmd & I2CCMD_READY) == 0)
   10977 		return -1;
   10978 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10979 		return -1;
   10980 
   10981 	*data = i2ccmd & 0x00ff;
   10982 
   10983 	return 0;
   10984 }
   10985 
   10986 static uint32_t
   10987 wm_sfp_get_media_type(struct wm_softc *sc)
   10988 {
   10989 	uint32_t ctrl_ext;
   10990 	uint8_t val = 0;
   10991 	int timeout = 3;
   10992 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   10993 	int rv = -1;
   10994 
   10995 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10996 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   10997 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   10998 	CSR_WRITE_FLUSH(sc);
   10999 
   11000 	/* Read SFP module data */
   11001 	while (timeout) {
   11002 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11003 		if (rv == 0)
   11004 			break;
   11005 		delay(100*1000); /* XXX too big */
   11006 		timeout--;
   11007 	}
   11008 	if (rv != 0)
   11009 		goto out;
   11010 	switch (val) {
   11011 	case SFF_SFP_ID_SFF:
   11012 		aprint_normal_dev(sc->sc_dev,
   11013 		    "Module/Connector soldered to board\n");
   11014 		break;
   11015 	case SFF_SFP_ID_SFP:
   11016 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11017 		break;
   11018 	case SFF_SFP_ID_UNKNOWN:
   11019 		goto out;
   11020 	default:
   11021 		break;
   11022 	}
   11023 
   11024 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11025 	if (rv != 0) {
   11026 		goto out;
   11027 	}
   11028 
   11029 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11030 		mediatype = WM_MEDIATYPE_SERDES;
   11031 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11032 		sc->sc_flags |= WM_F_SGMII;
   11033 		mediatype = WM_MEDIATYPE_COPPER;
   11034 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11035 		sc->sc_flags |= WM_F_SGMII;
   11036 		mediatype = WM_MEDIATYPE_SERDES;
   11037 	}
   11038 
   11039 out:
   11040 	/* Restore I2C interface setting */
   11041 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11042 
   11043 	return mediatype;
   11044 }
   11045 
   11046 /*
   11047  * NVM related.
   11048  * Microwire, SPI (w/wo EERD) and Flash.
   11049  */
   11050 
   11051 /* Both spi and uwire */
   11052 
   11053 /*
   11054  * wm_eeprom_sendbits:
   11055  *
   11056  *	Send a series of bits to the EEPROM.
   11057  */
   11058 static void
   11059 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11060 {
   11061 	uint32_t reg;
   11062 	int x;
   11063 
   11064 	reg = CSR_READ(sc, WMREG_EECD);
   11065 
   11066 	for (x = nbits; x > 0; x--) {
   11067 		if (bits & (1U << (x - 1)))
   11068 			reg |= EECD_DI;
   11069 		else
   11070 			reg &= ~EECD_DI;
   11071 		CSR_WRITE(sc, WMREG_EECD, reg);
   11072 		CSR_WRITE_FLUSH(sc);
   11073 		delay(2);
   11074 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11075 		CSR_WRITE_FLUSH(sc);
   11076 		delay(2);
   11077 		CSR_WRITE(sc, WMREG_EECD, reg);
   11078 		CSR_WRITE_FLUSH(sc);
   11079 		delay(2);
   11080 	}
   11081 }
   11082 
   11083 /*
   11084  * wm_eeprom_recvbits:
   11085  *
   11086  *	Receive a series of bits from the EEPROM.
   11087  */
   11088 static void
   11089 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11090 {
   11091 	uint32_t reg, val;
   11092 	int x;
   11093 
   11094 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11095 
   11096 	val = 0;
   11097 	for (x = nbits; x > 0; x--) {
   11098 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11099 		CSR_WRITE_FLUSH(sc);
   11100 		delay(2);
   11101 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11102 			val |= (1U << (x - 1));
   11103 		CSR_WRITE(sc, WMREG_EECD, reg);
   11104 		CSR_WRITE_FLUSH(sc);
   11105 		delay(2);
   11106 	}
   11107 	*valp = val;
   11108 }
   11109 
   11110 /* Microwire */
   11111 
   11112 /*
   11113  * wm_nvm_read_uwire:
   11114  *
   11115  *	Read a word from the EEPROM using the MicroWire protocol.
   11116  */
   11117 static int
   11118 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11119 {
   11120 	uint32_t reg, val;
   11121 	int i;
   11122 
   11123 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11124 		device_xname(sc->sc_dev), __func__));
   11125 
   11126 	for (i = 0; i < wordcnt; i++) {
   11127 		/* Clear SK and DI. */
   11128 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11129 		CSR_WRITE(sc, WMREG_EECD, reg);
   11130 
   11131 		/*
   11132 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11133 		 * and Xen.
   11134 		 *
   11135 		 * We use this workaround only for 82540 because qemu's
   11136 		 * e1000 act as 82540.
   11137 		 */
   11138 		if (sc->sc_type == WM_T_82540) {
   11139 			reg |= EECD_SK;
   11140 			CSR_WRITE(sc, WMREG_EECD, reg);
   11141 			reg &= ~EECD_SK;
   11142 			CSR_WRITE(sc, WMREG_EECD, reg);
   11143 			CSR_WRITE_FLUSH(sc);
   11144 			delay(2);
   11145 		}
   11146 		/* XXX: end of workaround */
   11147 
   11148 		/* Set CHIP SELECT. */
   11149 		reg |= EECD_CS;
   11150 		CSR_WRITE(sc, WMREG_EECD, reg);
   11151 		CSR_WRITE_FLUSH(sc);
   11152 		delay(2);
   11153 
   11154 		/* Shift in the READ command. */
   11155 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11156 
   11157 		/* Shift in address. */
   11158 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11159 
   11160 		/* Shift out the data. */
   11161 		wm_eeprom_recvbits(sc, &val, 16);
   11162 		data[i] = val & 0xffff;
   11163 
   11164 		/* Clear CHIP SELECT. */
   11165 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11166 		CSR_WRITE(sc, WMREG_EECD, reg);
   11167 		CSR_WRITE_FLUSH(sc);
   11168 		delay(2);
   11169 	}
   11170 
   11171 	return 0;
   11172 }
   11173 
   11174 /* SPI */
   11175 
   11176 /*
   11177  * Set SPI and FLASH related information from the EECD register.
   11178  * For 82541 and 82547, the word size is taken from EEPROM.
   11179  */
   11180 static int
   11181 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11182 {
   11183 	int size;
   11184 	uint32_t reg;
   11185 	uint16_t data;
   11186 
   11187 	reg = CSR_READ(sc, WMREG_EECD);
   11188 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11189 
   11190 	/* Read the size of NVM from EECD by default */
   11191 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11192 	switch (sc->sc_type) {
   11193 	case WM_T_82541:
   11194 	case WM_T_82541_2:
   11195 	case WM_T_82547:
   11196 	case WM_T_82547_2:
   11197 		/* Set dummy value to access EEPROM */
   11198 		sc->sc_nvm_wordsize = 64;
   11199 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11200 		reg = data;
   11201 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11202 		if (size == 0)
   11203 			size = 6; /* 64 word size */
   11204 		else
   11205 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11206 		break;
   11207 	case WM_T_80003:
   11208 	case WM_T_82571:
   11209 	case WM_T_82572:
   11210 	case WM_T_82573: /* SPI case */
   11211 	case WM_T_82574: /* SPI case */
   11212 	case WM_T_82583: /* SPI case */
   11213 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11214 		if (size > 14)
   11215 			size = 14;
   11216 		break;
   11217 	case WM_T_82575:
   11218 	case WM_T_82576:
   11219 	case WM_T_82580:
   11220 	case WM_T_I350:
   11221 	case WM_T_I354:
   11222 	case WM_T_I210:
   11223 	case WM_T_I211:
   11224 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11225 		if (size > 15)
   11226 			size = 15;
   11227 		break;
   11228 	default:
   11229 		aprint_error_dev(sc->sc_dev,
   11230 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11231 		return -1;
   11232 		break;
   11233 	}
   11234 
   11235 	sc->sc_nvm_wordsize = 1 << size;
   11236 
   11237 	return 0;
   11238 }
   11239 
   11240 /*
   11241  * wm_nvm_ready_spi:
   11242  *
   11243  *	Wait for a SPI EEPROM to be ready for commands.
   11244  */
   11245 static int
   11246 wm_nvm_ready_spi(struct wm_softc *sc)
   11247 {
   11248 	uint32_t val;
   11249 	int usec;
   11250 
   11251 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11252 		device_xname(sc->sc_dev), __func__));
   11253 
   11254 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11255 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11256 		wm_eeprom_recvbits(sc, &val, 8);
   11257 		if ((val & SPI_SR_RDY) == 0)
   11258 			break;
   11259 	}
   11260 	if (usec >= SPI_MAX_RETRIES) {
   11261 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11262 		return 1;
   11263 	}
   11264 	return 0;
   11265 }
   11266 
   11267 /*
   11268  * wm_nvm_read_spi:
   11269  *
   11270  *	Read a work from the EEPROM using the SPI protocol.
   11271  */
   11272 static int
   11273 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11274 {
   11275 	uint32_t reg, val;
   11276 	int i;
   11277 	uint8_t opc;
   11278 
   11279 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11280 		device_xname(sc->sc_dev), __func__));
   11281 
   11282 	/* Clear SK and CS. */
   11283 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11284 	CSR_WRITE(sc, WMREG_EECD, reg);
   11285 	CSR_WRITE_FLUSH(sc);
   11286 	delay(2);
   11287 
   11288 	if (wm_nvm_ready_spi(sc))
   11289 		return 1;
   11290 
   11291 	/* Toggle CS to flush commands. */
   11292 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11293 	CSR_WRITE_FLUSH(sc);
   11294 	delay(2);
   11295 	CSR_WRITE(sc, WMREG_EECD, reg);
   11296 	CSR_WRITE_FLUSH(sc);
   11297 	delay(2);
   11298 
   11299 	opc = SPI_OPC_READ;
   11300 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11301 		opc |= SPI_OPC_A8;
   11302 
   11303 	wm_eeprom_sendbits(sc, opc, 8);
   11304 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11305 
   11306 	for (i = 0; i < wordcnt; i++) {
   11307 		wm_eeprom_recvbits(sc, &val, 16);
   11308 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11309 	}
   11310 
   11311 	/* Raise CS and clear SK. */
   11312 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11313 	CSR_WRITE(sc, WMREG_EECD, reg);
   11314 	CSR_WRITE_FLUSH(sc);
   11315 	delay(2);
   11316 
   11317 	return 0;
   11318 }
   11319 
   11320 /* Using with EERD */
   11321 
   11322 static int
   11323 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11324 {
   11325 	uint32_t attempts = 100000;
   11326 	uint32_t i, reg = 0;
   11327 	int32_t done = -1;
   11328 
   11329 	for (i = 0; i < attempts; i++) {
   11330 		reg = CSR_READ(sc, rw);
   11331 
   11332 		if (reg & EERD_DONE) {
   11333 			done = 0;
   11334 			break;
   11335 		}
   11336 		delay(5);
   11337 	}
   11338 
   11339 	return done;
   11340 }
   11341 
   11342 static int
   11343 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11344     uint16_t *data)
   11345 {
   11346 	int i, eerd = 0;
   11347 	int error = 0;
   11348 
   11349 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11350 		device_xname(sc->sc_dev), __func__));
   11351 
   11352 	for (i = 0; i < wordcnt; i++) {
   11353 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11354 
   11355 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11356 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11357 		if (error != 0)
   11358 			break;
   11359 
   11360 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11361 	}
   11362 
   11363 	return error;
   11364 }
   11365 
   11366 /* Flash */
   11367 
   11368 static int
   11369 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11370 {
   11371 	uint32_t eecd;
   11372 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11373 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11374 	uint8_t sig_byte = 0;
   11375 
   11376 	switch (sc->sc_type) {
   11377 	case WM_T_PCH_SPT:
   11378 		/*
   11379 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11380 		 * sector valid bits from the NVM.
   11381 		 */
   11382 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11383 		if ((*bank == 0) || (*bank == 1)) {
   11384 			aprint_error_dev(sc->sc_dev,
   11385 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11386 				*bank);
   11387 			return -1;
   11388 		} else {
   11389 			*bank = *bank - 2;
   11390 			return 0;
   11391 		}
   11392 	case WM_T_ICH8:
   11393 	case WM_T_ICH9:
   11394 		eecd = CSR_READ(sc, WMREG_EECD);
   11395 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11396 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11397 			return 0;
   11398 		}
   11399 		/* FALLTHROUGH */
   11400 	default:
   11401 		/* Default to 0 */
   11402 		*bank = 0;
   11403 
   11404 		/* Check bank 0 */
   11405 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11406 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11407 			*bank = 0;
   11408 			return 0;
   11409 		}
   11410 
   11411 		/* Check bank 1 */
   11412 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11413 		    &sig_byte);
   11414 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11415 			*bank = 1;
   11416 			return 0;
   11417 		}
   11418 	}
   11419 
   11420 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11421 		device_xname(sc->sc_dev)));
   11422 	return -1;
   11423 }
   11424 
   11425 /******************************************************************************
   11426  * This function does initial flash setup so that a new read/write/erase cycle
   11427  * can be started.
   11428  *
   11429  * sc - The pointer to the hw structure
   11430  ****************************************************************************/
   11431 static int32_t
   11432 wm_ich8_cycle_init(struct wm_softc *sc)
   11433 {
   11434 	uint16_t hsfsts;
   11435 	int32_t error = 1;
   11436 	int32_t i     = 0;
   11437 
   11438 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11439 
   11440 	/* May be check the Flash Des Valid bit in Hw status */
   11441 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11442 		return error;
   11443 	}
   11444 
   11445 	/* Clear FCERR in Hw status by writing 1 */
   11446 	/* Clear DAEL in Hw status by writing a 1 */
   11447 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11448 
   11449 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11450 
   11451 	/*
   11452 	 * Either we should have a hardware SPI cycle in progress bit to check
   11453 	 * against, in order to start a new cycle or FDONE bit should be
   11454 	 * changed in the hardware so that it is 1 after harware reset, which
   11455 	 * can then be used as an indication whether a cycle is in progress or
   11456 	 * has been completed .. we should also have some software semaphore
   11457 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11458 	 * threads access to those bits can be sequentiallized or a way so that
   11459 	 * 2 threads dont start the cycle at the same time
   11460 	 */
   11461 
   11462 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11463 		/*
   11464 		 * There is no cycle running at present, so we can start a
   11465 		 * cycle
   11466 		 */
   11467 
   11468 		/* Begin by setting Flash Cycle Done. */
   11469 		hsfsts |= HSFSTS_DONE;
   11470 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11471 		error = 0;
   11472 	} else {
   11473 		/*
   11474 		 * otherwise poll for sometime so the current cycle has a
   11475 		 * chance to end before giving up.
   11476 		 */
   11477 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11478 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11479 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11480 				error = 0;
   11481 				break;
   11482 			}
   11483 			delay(1);
   11484 		}
   11485 		if (error == 0) {
   11486 			/*
   11487 			 * Successful in waiting for previous cycle to timeout,
   11488 			 * now set the Flash Cycle Done.
   11489 			 */
   11490 			hsfsts |= HSFSTS_DONE;
   11491 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11492 		}
   11493 	}
   11494 	return error;
   11495 }
   11496 
   11497 /******************************************************************************
   11498  * This function starts a flash cycle and waits for its completion
   11499  *
   11500  * sc - The pointer to the hw structure
   11501  ****************************************************************************/
   11502 static int32_t
   11503 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11504 {
   11505 	uint16_t hsflctl;
   11506 	uint16_t hsfsts;
   11507 	int32_t error = 1;
   11508 	uint32_t i = 0;
   11509 
   11510 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11511 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11512 	hsflctl |= HSFCTL_GO;
   11513 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11514 
   11515 	/* Wait till FDONE bit is set to 1 */
   11516 	do {
   11517 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11518 		if (hsfsts & HSFSTS_DONE)
   11519 			break;
   11520 		delay(1);
   11521 		i++;
   11522 	} while (i < timeout);
   11523 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11524 		error = 0;
   11525 
   11526 	return error;
   11527 }
   11528 
   11529 /******************************************************************************
   11530  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11531  *
   11532  * sc - The pointer to the hw structure
   11533  * index - The index of the byte or word to read.
   11534  * size - Size of data to read, 1=byte 2=word, 4=dword
   11535  * data - Pointer to the word to store the value read.
   11536  *****************************************************************************/
   11537 static int32_t
   11538 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11539     uint32_t size, uint32_t *data)
   11540 {
   11541 	uint16_t hsfsts;
   11542 	uint16_t hsflctl;
   11543 	uint32_t flash_linear_address;
   11544 	uint32_t flash_data = 0;
   11545 	int32_t error = 1;
   11546 	int32_t count = 0;
   11547 
   11548 	if (size < 1  || size > 4 || data == 0x0 ||
   11549 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11550 		return error;
   11551 
   11552 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11553 	    sc->sc_ich8_flash_base;
   11554 
   11555 	do {
   11556 		delay(1);
   11557 		/* Steps */
   11558 		error = wm_ich8_cycle_init(sc);
   11559 		if (error)
   11560 			break;
   11561 
   11562 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11563 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11564 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11565 		    & HSFCTL_BCOUNT_MASK;
   11566 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11567 		if (sc->sc_type == WM_T_PCH_SPT) {
   11568 			/*
   11569 			 * In SPT, This register is in Lan memory space, not
   11570 			 * flash. Therefore, only 32 bit access is supported.
   11571 			 */
   11572 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11573 			    (uint32_t)hsflctl);
   11574 		} else
   11575 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11576 
   11577 		/*
   11578 		 * Write the last 24 bits of index into Flash Linear address
   11579 		 * field in Flash Address
   11580 		 */
   11581 		/* TODO: TBD maybe check the index against the size of flash */
   11582 
   11583 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11584 
   11585 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11586 
   11587 		/*
   11588 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11589 		 * the whole sequence a few more times, else read in (shift in)
   11590 		 * the Flash Data0, the order is least significant byte first
   11591 		 * msb to lsb
   11592 		 */
   11593 		if (error == 0) {
   11594 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11595 			if (size == 1)
   11596 				*data = (uint8_t)(flash_data & 0x000000FF);
   11597 			else if (size == 2)
   11598 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11599 			else if (size == 4)
   11600 				*data = (uint32_t)flash_data;
   11601 			break;
   11602 		} else {
   11603 			/*
   11604 			 * If we've gotten here, then things are probably
   11605 			 * completely hosed, but if the error condition is
   11606 			 * detected, it won't hurt to give it another try...
   11607 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11608 			 */
   11609 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11610 			if (hsfsts & HSFSTS_ERR) {
   11611 				/* Repeat for some time before giving up. */
   11612 				continue;
   11613 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11614 				break;
   11615 		}
   11616 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11617 
   11618 	return error;
   11619 }
   11620 
   11621 /******************************************************************************
   11622  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11623  *
   11624  * sc - pointer to wm_hw structure
   11625  * index - The index of the byte to read.
   11626  * data - Pointer to a byte to store the value read.
   11627  *****************************************************************************/
   11628 static int32_t
   11629 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11630 {
   11631 	int32_t status;
   11632 	uint32_t word = 0;
   11633 
   11634 	status = wm_read_ich8_data(sc, index, 1, &word);
   11635 	if (status == 0)
   11636 		*data = (uint8_t)word;
   11637 	else
   11638 		*data = 0;
   11639 
   11640 	return status;
   11641 }
   11642 
   11643 /******************************************************************************
   11644  * Reads a word from the NVM using the ICH8 flash access registers.
   11645  *
   11646  * sc - pointer to wm_hw structure
   11647  * index - The starting byte index of the word to read.
   11648  * data - Pointer to a word to store the value read.
   11649  *****************************************************************************/
   11650 static int32_t
   11651 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11652 {
   11653 	int32_t status;
   11654 	uint32_t word = 0;
   11655 
   11656 	status = wm_read_ich8_data(sc, index, 2, &word);
   11657 	if (status == 0)
   11658 		*data = (uint16_t)word;
   11659 	else
   11660 		*data = 0;
   11661 
   11662 	return status;
   11663 }
   11664 
   11665 /******************************************************************************
   11666  * Reads a dword from the NVM using the ICH8 flash access registers.
   11667  *
   11668  * sc - pointer to wm_hw structure
   11669  * index - The starting byte index of the word to read.
   11670  * data - Pointer to a word to store the value read.
   11671  *****************************************************************************/
   11672 static int32_t
   11673 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11674 {
   11675 	int32_t status;
   11676 
   11677 	status = wm_read_ich8_data(sc, index, 4, data);
   11678 	return status;
   11679 }
   11680 
   11681 /******************************************************************************
   11682  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11683  * register.
   11684  *
   11685  * sc - Struct containing variables accessed by shared code
   11686  * offset - offset of word in the EEPROM to read
   11687  * data - word read from the EEPROM
   11688  * words - number of words to read
   11689  *****************************************************************************/
   11690 static int
   11691 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11692 {
   11693 	int32_t  error = 0;
   11694 	uint32_t flash_bank = 0;
   11695 	uint32_t act_offset = 0;
   11696 	uint32_t bank_offset = 0;
   11697 	uint16_t word = 0;
   11698 	uint16_t i = 0;
   11699 
   11700 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11701 		device_xname(sc->sc_dev), __func__));
   11702 
   11703 	/*
   11704 	 * We need to know which is the valid flash bank.  In the event
   11705 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11706 	 * managing flash_bank.  So it cannot be trusted and needs
   11707 	 * to be updated with each read.
   11708 	 */
   11709 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11710 	if (error) {
   11711 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11712 			device_xname(sc->sc_dev)));
   11713 		flash_bank = 0;
   11714 	}
   11715 
   11716 	/*
   11717 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11718 	 * size
   11719 	 */
   11720 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11721 
   11722 	error = wm_get_swfwhw_semaphore(sc);
   11723 	if (error) {
   11724 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11725 		    __func__);
   11726 		return error;
   11727 	}
   11728 
   11729 	for (i = 0; i < words; i++) {
   11730 		/* The NVM part needs a byte offset, hence * 2 */
   11731 		act_offset = bank_offset + ((offset + i) * 2);
   11732 		error = wm_read_ich8_word(sc, act_offset, &word);
   11733 		if (error) {
   11734 			aprint_error_dev(sc->sc_dev,
   11735 			    "%s: failed to read NVM\n", __func__);
   11736 			break;
   11737 		}
   11738 		data[i] = word;
   11739 	}
   11740 
   11741 	wm_put_swfwhw_semaphore(sc);
   11742 	return error;
   11743 }
   11744 
   11745 /******************************************************************************
   11746  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11747  * register.
   11748  *
   11749  * sc - Struct containing variables accessed by shared code
   11750  * offset - offset of word in the EEPROM to read
   11751  * data - word read from the EEPROM
   11752  * words - number of words to read
   11753  *****************************************************************************/
   11754 static int
   11755 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11756 {
   11757 	int32_t  error = 0;
   11758 	uint32_t flash_bank = 0;
   11759 	uint32_t act_offset = 0;
   11760 	uint32_t bank_offset = 0;
   11761 	uint32_t dword = 0;
   11762 	uint16_t i = 0;
   11763 
   11764 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11765 		device_xname(sc->sc_dev), __func__));
   11766 
   11767 	/*
   11768 	 * We need to know which is the valid flash bank.  In the event
   11769 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11770 	 * managing flash_bank.  So it cannot be trusted and needs
   11771 	 * to be updated with each read.
   11772 	 */
   11773 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11774 	if (error) {
   11775 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11776 			device_xname(sc->sc_dev)));
   11777 		flash_bank = 0;
   11778 	}
   11779 
   11780 	/*
   11781 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11782 	 * size
   11783 	 */
   11784 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11785 
   11786 	error = wm_get_swfwhw_semaphore(sc);
   11787 	if (error) {
   11788 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11789 		    __func__);
   11790 		return error;
   11791 	}
   11792 
   11793 	for (i = 0; i < words; i++) {
   11794 		/* The NVM part needs a byte offset, hence * 2 */
   11795 		act_offset = bank_offset + ((offset + i) * 2);
   11796 		/* but we must read dword aligned, so mask ... */
   11797 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11798 		if (error) {
   11799 			aprint_error_dev(sc->sc_dev,
   11800 			    "%s: failed to read NVM\n", __func__);
   11801 			break;
   11802 		}
   11803 		/* ... and pick out low or high word */
   11804 		if ((act_offset & 0x2) == 0)
   11805 			data[i] = (uint16_t)(dword & 0xFFFF);
   11806 		else
   11807 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11808 	}
   11809 
   11810 	wm_put_swfwhw_semaphore(sc);
   11811 	return error;
   11812 }
   11813 
   11814 /* iNVM */
   11815 
   11816 static int
   11817 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11818 {
   11819 	int32_t  rv = 0;
   11820 	uint32_t invm_dword;
   11821 	uint16_t i;
   11822 	uint8_t record_type, word_address;
   11823 
   11824 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11825 		device_xname(sc->sc_dev), __func__));
   11826 
   11827 	for (i = 0; i < INVM_SIZE; i++) {
   11828 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11829 		/* Get record type */
   11830 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11831 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11832 			break;
   11833 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11834 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11835 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11836 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11837 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11838 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11839 			if (word_address == address) {
   11840 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11841 				rv = 0;
   11842 				break;
   11843 			}
   11844 		}
   11845 	}
   11846 
   11847 	return rv;
   11848 }
   11849 
   11850 static int
   11851 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11852 {
   11853 	int rv = 0;
   11854 	int i;
   11855 
   11856 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11857 		device_xname(sc->sc_dev), __func__));
   11858 
   11859 	for (i = 0; i < words; i++) {
   11860 		switch (offset + i) {
   11861 		case NVM_OFF_MACADDR:
   11862 		case NVM_OFF_MACADDR1:
   11863 		case NVM_OFF_MACADDR2:
   11864 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11865 			if (rv != 0) {
   11866 				data[i] = 0xffff;
   11867 				rv = -1;
   11868 			}
   11869 			break;
   11870 		case NVM_OFF_CFG2:
   11871 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11872 			if (rv != 0) {
   11873 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11874 				rv = 0;
   11875 			}
   11876 			break;
   11877 		case NVM_OFF_CFG4:
   11878 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11879 			if (rv != 0) {
   11880 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11881 				rv = 0;
   11882 			}
   11883 			break;
   11884 		case NVM_OFF_LED_1_CFG:
   11885 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11886 			if (rv != 0) {
   11887 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11888 				rv = 0;
   11889 			}
   11890 			break;
   11891 		case NVM_OFF_LED_0_2_CFG:
   11892 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11893 			if (rv != 0) {
   11894 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11895 				rv = 0;
   11896 			}
   11897 			break;
   11898 		case NVM_OFF_ID_LED_SETTINGS:
   11899 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11900 			if (rv != 0) {
   11901 				*data = ID_LED_RESERVED_FFFF;
   11902 				rv = 0;
   11903 			}
   11904 			break;
   11905 		default:
   11906 			DPRINTF(WM_DEBUG_NVM,
   11907 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11908 			*data = NVM_RESERVED_WORD;
   11909 			break;
   11910 		}
   11911 	}
   11912 
   11913 	return rv;
   11914 }
   11915 
   11916 /* Lock, detecting NVM type, validate checksum, version and read */
   11917 
   11918 /*
   11919  * wm_nvm_acquire:
   11920  *
   11921  *	Perform the EEPROM handshake required on some chips.
   11922  */
   11923 static int
   11924 wm_nvm_acquire(struct wm_softc *sc)
   11925 {
   11926 	uint32_t reg;
   11927 	int x;
   11928 	int ret = 0;
   11929 
   11930 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11931 		device_xname(sc->sc_dev), __func__));
   11932 
   11933 	if (sc->sc_type >= WM_T_ICH8) {
   11934 		ret = wm_get_nvm_ich8lan(sc);
   11935 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   11936 		ret = wm_get_swfwhw_semaphore(sc);
   11937 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   11938 		/* This will also do wm_get_swsm_semaphore() if needed */
   11939 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   11940 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11941 		ret = wm_get_swsm_semaphore(sc);
   11942 	}
   11943 
   11944 	if (ret) {
   11945 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11946 			__func__);
   11947 		return 1;
   11948 	}
   11949 
   11950 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11951 		reg = CSR_READ(sc, WMREG_EECD);
   11952 
   11953 		/* Request EEPROM access. */
   11954 		reg |= EECD_EE_REQ;
   11955 		CSR_WRITE(sc, WMREG_EECD, reg);
   11956 
   11957 		/* ..and wait for it to be granted. */
   11958 		for (x = 0; x < 1000; x++) {
   11959 			reg = CSR_READ(sc, WMREG_EECD);
   11960 			if (reg & EECD_EE_GNT)
   11961 				break;
   11962 			delay(5);
   11963 		}
   11964 		if ((reg & EECD_EE_GNT) == 0) {
   11965 			aprint_error_dev(sc->sc_dev,
   11966 			    "could not acquire EEPROM GNT\n");
   11967 			reg &= ~EECD_EE_REQ;
   11968 			CSR_WRITE(sc, WMREG_EECD, reg);
   11969 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11970 				wm_put_swfwhw_semaphore(sc);
   11971 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   11972 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11973 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11974 				wm_put_swsm_semaphore(sc);
   11975 			return 1;
   11976 		}
   11977 	}
   11978 
   11979 	return 0;
   11980 }
   11981 
   11982 /*
   11983  * wm_nvm_release:
   11984  *
   11985  *	Release the EEPROM mutex.
   11986  */
   11987 static void
   11988 wm_nvm_release(struct wm_softc *sc)
   11989 {
   11990 	uint32_t reg;
   11991 
   11992 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11993 		device_xname(sc->sc_dev), __func__));
   11994 
   11995 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11996 		reg = CSR_READ(sc, WMREG_EECD);
   11997 		reg &= ~EECD_EE_REQ;
   11998 		CSR_WRITE(sc, WMREG_EECD, reg);
   11999 	}
   12000 
   12001 	if (sc->sc_type >= WM_T_ICH8) {
   12002 		wm_put_nvm_ich8lan(sc);
   12003 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   12004 		wm_put_swfwhw_semaphore(sc);
   12005 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   12006 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12007 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12008 		wm_put_swsm_semaphore(sc);
   12009 }
   12010 
   12011 static int
   12012 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12013 {
   12014 	uint32_t eecd = 0;
   12015 
   12016 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12017 	    || sc->sc_type == WM_T_82583) {
   12018 		eecd = CSR_READ(sc, WMREG_EECD);
   12019 
   12020 		/* Isolate bits 15 & 16 */
   12021 		eecd = ((eecd >> 15) & 0x03);
   12022 
   12023 		/* If both bits are set, device is Flash type */
   12024 		if (eecd == 0x03)
   12025 			return 0;
   12026 	}
   12027 	return 1;
   12028 }
   12029 
   12030 static int
   12031 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12032 {
   12033 	uint32_t eec;
   12034 
   12035 	eec = CSR_READ(sc, WMREG_EEC);
   12036 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12037 		return 1;
   12038 
   12039 	return 0;
   12040 }
   12041 
   12042 /*
   12043  * wm_nvm_validate_checksum
   12044  *
   12045  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12046  */
   12047 static int
   12048 wm_nvm_validate_checksum(struct wm_softc *sc)
   12049 {
   12050 	uint16_t checksum;
   12051 	uint16_t eeprom_data;
   12052 #ifdef WM_DEBUG
   12053 	uint16_t csum_wordaddr, valid_checksum;
   12054 #endif
   12055 	int i;
   12056 
   12057 	checksum = 0;
   12058 
   12059 	/* Don't check for I211 */
   12060 	if (sc->sc_type == WM_T_I211)
   12061 		return 0;
   12062 
   12063 #ifdef WM_DEBUG
   12064 	if (sc->sc_type == WM_T_PCH_LPT) {
   12065 		csum_wordaddr = NVM_OFF_COMPAT;
   12066 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12067 	} else {
   12068 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12069 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12070 	}
   12071 
   12072 	/* Dump EEPROM image for debug */
   12073 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12074 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12075 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12076 		/* XXX PCH_SPT? */
   12077 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12078 		if ((eeprom_data & valid_checksum) == 0) {
   12079 			DPRINTF(WM_DEBUG_NVM,
   12080 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12081 				device_xname(sc->sc_dev), eeprom_data,
   12082 				    valid_checksum));
   12083 		}
   12084 	}
   12085 
   12086 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12087 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12088 		for (i = 0; i < NVM_SIZE; i++) {
   12089 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12090 				printf("XXXX ");
   12091 			else
   12092 				printf("%04hx ", eeprom_data);
   12093 			if (i % 8 == 7)
   12094 				printf("\n");
   12095 		}
   12096 	}
   12097 
   12098 #endif /* WM_DEBUG */
   12099 
   12100 	for (i = 0; i < NVM_SIZE; i++) {
   12101 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12102 			return 1;
   12103 		checksum += eeprom_data;
   12104 	}
   12105 
   12106 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12107 #ifdef WM_DEBUG
   12108 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12109 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12110 #endif
   12111 	}
   12112 
   12113 	return 0;
   12114 }
   12115 
   12116 static void
   12117 wm_nvm_version_invm(struct wm_softc *sc)
   12118 {
   12119 	uint32_t dword;
   12120 
   12121 	/*
   12122 	 * Linux's code to decode version is very strange, so we don't
   12123 	 * obey that algorithm and just use word 61 as the document.
   12124 	 * Perhaps it's not perfect though...
   12125 	 *
   12126 	 * Example:
   12127 	 *
   12128 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12129 	 */
   12130 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12131 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12132 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12133 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12134 }
   12135 
   12136 static void
   12137 wm_nvm_version(struct wm_softc *sc)
   12138 {
   12139 	uint16_t major, minor, build, patch;
   12140 	uint16_t uid0, uid1;
   12141 	uint16_t nvm_data;
   12142 	uint16_t off;
   12143 	bool check_version = false;
   12144 	bool check_optionrom = false;
   12145 	bool have_build = false;
   12146 
   12147 	/*
   12148 	 * Version format:
   12149 	 *
   12150 	 * XYYZ
   12151 	 * X0YZ
   12152 	 * X0YY
   12153 	 *
   12154 	 * Example:
   12155 	 *
   12156 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12157 	 *	82571	0x50a6	5.10.6?
   12158 	 *	82572	0x506a	5.6.10?
   12159 	 *	82572EI	0x5069	5.6.9?
   12160 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12161 	 *		0x2013	2.1.3?
   12162 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12163 	 */
   12164 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12165 	switch (sc->sc_type) {
   12166 	case WM_T_82571:
   12167 	case WM_T_82572:
   12168 	case WM_T_82574:
   12169 	case WM_T_82583:
   12170 		check_version = true;
   12171 		check_optionrom = true;
   12172 		have_build = true;
   12173 		break;
   12174 	case WM_T_82575:
   12175 	case WM_T_82576:
   12176 	case WM_T_82580:
   12177 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12178 			check_version = true;
   12179 		break;
   12180 	case WM_T_I211:
   12181 		wm_nvm_version_invm(sc);
   12182 		goto printver;
   12183 	case WM_T_I210:
   12184 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12185 			wm_nvm_version_invm(sc);
   12186 			goto printver;
   12187 		}
   12188 		/* FALLTHROUGH */
   12189 	case WM_T_I350:
   12190 	case WM_T_I354:
   12191 		check_version = true;
   12192 		check_optionrom = true;
   12193 		break;
   12194 	default:
   12195 		return;
   12196 	}
   12197 	if (check_version) {
   12198 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12199 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12200 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12201 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12202 			build = nvm_data & NVM_BUILD_MASK;
   12203 			have_build = true;
   12204 		} else
   12205 			minor = nvm_data & 0x00ff;
   12206 
   12207 		/* Decimal */
   12208 		minor = (minor / 16) * 10 + (minor % 16);
   12209 		sc->sc_nvm_ver_major = major;
   12210 		sc->sc_nvm_ver_minor = minor;
   12211 
   12212 printver:
   12213 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12214 		    sc->sc_nvm_ver_minor);
   12215 		if (have_build) {
   12216 			sc->sc_nvm_ver_build = build;
   12217 			aprint_verbose(".%d", build);
   12218 		}
   12219 	}
   12220 	if (check_optionrom) {
   12221 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12222 		/* Option ROM Version */
   12223 		if ((off != 0x0000) && (off != 0xffff)) {
   12224 			off += NVM_COMBO_VER_OFF;
   12225 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12226 			wm_nvm_read(sc, off, 1, &uid0);
   12227 			if ((uid0 != 0) && (uid0 != 0xffff)
   12228 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12229 				/* 16bits */
   12230 				major = uid0 >> 8;
   12231 				build = (uid0 << 8) | (uid1 >> 8);
   12232 				patch = uid1 & 0x00ff;
   12233 				aprint_verbose(", option ROM Version %d.%d.%d",
   12234 				    major, build, patch);
   12235 			}
   12236 		}
   12237 	}
   12238 
   12239 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12240 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12241 }
   12242 
   12243 /*
   12244  * wm_nvm_read:
   12245  *
   12246  *	Read data from the serial EEPROM.
   12247  */
   12248 static int
   12249 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12250 {
   12251 	int rv;
   12252 
   12253 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12254 		device_xname(sc->sc_dev), __func__));
   12255 
   12256 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12257 		return 1;
   12258 
   12259 	if (wm_nvm_acquire(sc))
   12260 		return 1;
   12261 
   12262 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12263 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12264 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   12265 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   12266 	else if (sc->sc_type == WM_T_PCH_SPT)
   12267 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   12268 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   12269 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   12270 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   12271 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   12272 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   12273 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   12274 	else
   12275 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   12276 
   12277 	wm_nvm_release(sc);
   12278 	return rv;
   12279 }
   12280 
   12281 /*
   12282  * Hardware semaphores.
   12283  * Very complexed...
   12284  */
   12285 
   12286 static int
   12287 wm_get_null(struct wm_softc *sc)
   12288 {
   12289 
   12290 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12291 		device_xname(sc->sc_dev), __func__));
   12292 	return 0;
   12293 }
   12294 
   12295 static void
   12296 wm_put_null(struct wm_softc *sc)
   12297 {
   12298 
   12299 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12300 		device_xname(sc->sc_dev), __func__));
   12301 	return;
   12302 }
   12303 
   12304 /*
   12305  * Get hardware semaphore.
   12306  * Same as e1000_get_hw_semaphore_generic()
   12307  */
   12308 static int
   12309 wm_get_swsm_semaphore(struct wm_softc *sc)
   12310 {
   12311 	int32_t timeout;
   12312 	uint32_t swsm;
   12313 
   12314 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12315 		device_xname(sc->sc_dev), __func__));
   12316 	KASSERT(sc->sc_nvm_wordsize > 0);
   12317 
   12318 	/* Get the SW semaphore. */
   12319 	timeout = sc->sc_nvm_wordsize + 1;
   12320 	while (timeout) {
   12321 		swsm = CSR_READ(sc, WMREG_SWSM);
   12322 
   12323 		if ((swsm & SWSM_SMBI) == 0)
   12324 			break;
   12325 
   12326 		delay(50);
   12327 		timeout--;
   12328 	}
   12329 
   12330 	if (timeout == 0) {
   12331 		aprint_error_dev(sc->sc_dev,
   12332 		    "could not acquire SWSM SMBI\n");
   12333 		return 1;
   12334 	}
   12335 
   12336 	/* Get the FW semaphore. */
   12337 	timeout = sc->sc_nvm_wordsize + 1;
   12338 	while (timeout) {
   12339 		swsm = CSR_READ(sc, WMREG_SWSM);
   12340 		swsm |= SWSM_SWESMBI;
   12341 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12342 		/* If we managed to set the bit we got the semaphore. */
   12343 		swsm = CSR_READ(sc, WMREG_SWSM);
   12344 		if (swsm & SWSM_SWESMBI)
   12345 			break;
   12346 
   12347 		delay(50);
   12348 		timeout--;
   12349 	}
   12350 
   12351 	if (timeout == 0) {
   12352 		aprint_error_dev(sc->sc_dev,
   12353 		    "could not acquire SWSM SWESMBI\n");
   12354 		/* Release semaphores */
   12355 		wm_put_swsm_semaphore(sc);
   12356 		return 1;
   12357 	}
   12358 	return 0;
   12359 }
   12360 
   12361 /*
   12362  * Put hardware semaphore.
   12363  * Same as e1000_put_hw_semaphore_generic()
   12364  */
   12365 static void
   12366 wm_put_swsm_semaphore(struct wm_softc *sc)
   12367 {
   12368 	uint32_t swsm;
   12369 
   12370 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12371 		device_xname(sc->sc_dev), __func__));
   12372 
   12373 	swsm = CSR_READ(sc, WMREG_SWSM);
   12374 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12375 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12376 }
   12377 
   12378 /*
   12379  * Get SW/FW semaphore.
   12380  * Same as e1000_acquire_swfw_sync_82575().
   12381  */
   12382 static int
   12383 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12384 {
   12385 	uint32_t swfw_sync;
   12386 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12387 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12388 	int timeout = 200;
   12389 
   12390 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12391 		device_xname(sc->sc_dev), __func__));
   12392 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12393 
   12394 	for (timeout = 0; timeout < 200; timeout++) {
   12395 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12396 			if (wm_get_swsm_semaphore(sc)) {
   12397 				aprint_error_dev(sc->sc_dev,
   12398 				    "%s: failed to get semaphore\n",
   12399 				    __func__);
   12400 				return 1;
   12401 			}
   12402 		}
   12403 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12404 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12405 			swfw_sync |= swmask;
   12406 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12407 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   12408 				wm_put_swsm_semaphore(sc);
   12409 			return 0;
   12410 		}
   12411 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   12412 			wm_put_swsm_semaphore(sc);
   12413 		delay(5000);
   12414 	}
   12415 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12416 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12417 	return 1;
   12418 }
   12419 
   12420 static void
   12421 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12422 {
   12423 	uint32_t swfw_sync;
   12424 
   12425 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12426 		device_xname(sc->sc_dev), __func__));
   12427 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12428 
   12429 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12430 		while (wm_get_swsm_semaphore(sc) != 0)
   12431 			continue;
   12432 	}
   12433 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12434 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12435 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12436 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   12437 		wm_put_swsm_semaphore(sc);
   12438 }
   12439 
   12440 static int
   12441 wm_get_phy_82575(struct wm_softc *sc)
   12442 {
   12443 
   12444 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12445 		device_xname(sc->sc_dev), __func__));
   12446 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12447 }
   12448 
   12449 static void
   12450 wm_put_phy_82575(struct wm_softc *sc)
   12451 {
   12452 
   12453 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12454 		device_xname(sc->sc_dev), __func__));
   12455 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12456 }
   12457 
   12458 static int
   12459 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12460 {
   12461 	uint32_t ext_ctrl;
   12462 	int timeout = 200;
   12463 
   12464 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12465 		device_xname(sc->sc_dev), __func__));
   12466 
   12467 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12468 	for (timeout = 0; timeout < 200; timeout++) {
   12469 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12470 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12471 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12472 
   12473 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12474 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12475 			return 0;
   12476 		delay(5000);
   12477 	}
   12478 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12479 	    device_xname(sc->sc_dev), ext_ctrl);
   12480 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12481 	return 1;
   12482 }
   12483 
   12484 static void
   12485 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12486 {
   12487 	uint32_t ext_ctrl;
   12488 
   12489 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12490 		device_xname(sc->sc_dev), __func__));
   12491 
   12492 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12493 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12494 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12495 
   12496 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12497 }
   12498 
   12499 static int
   12500 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12501 {
   12502 	uint32_t ext_ctrl;
   12503 	int timeout;
   12504 
   12505 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12506 		device_xname(sc->sc_dev), __func__));
   12507 	mutex_enter(sc->sc_ich_phymtx);
   12508 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12509 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12510 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12511 			break;
   12512 		delay(1000);
   12513 	}
   12514 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12515 		printf("%s: SW has already locked the resource\n",
   12516 		    device_xname(sc->sc_dev));
   12517 		goto out;
   12518 	}
   12519 
   12520 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12521 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12522 	for (timeout = 0; timeout < 1000; timeout++) {
   12523 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12524 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12525 			break;
   12526 		delay(1000);
   12527 	}
   12528 	if (timeout >= 1000) {
   12529 		printf("%s: failed to acquire semaphore\n",
   12530 		    device_xname(sc->sc_dev));
   12531 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12532 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12533 		goto out;
   12534 	}
   12535 	return 0;
   12536 
   12537 out:
   12538 	mutex_exit(sc->sc_ich_phymtx);
   12539 	return 1;
   12540 }
   12541 
   12542 static void
   12543 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12544 {
   12545 	uint32_t ext_ctrl;
   12546 
   12547 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12548 		device_xname(sc->sc_dev), __func__));
   12549 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12550 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12551 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12552 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12553 	} else {
   12554 		printf("%s: Semaphore unexpectedly released\n",
   12555 		    device_xname(sc->sc_dev));
   12556 	}
   12557 
   12558 	mutex_exit(sc->sc_ich_phymtx);
   12559 }
   12560 
   12561 static int
   12562 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12563 {
   12564 
   12565 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12566 		device_xname(sc->sc_dev), __func__));
   12567 	mutex_enter(sc->sc_ich_nvmmtx);
   12568 
   12569 	return 0;
   12570 }
   12571 
   12572 static void
   12573 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12574 {
   12575 
   12576 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12577 		device_xname(sc->sc_dev), __func__));
   12578 	mutex_exit(sc->sc_ich_nvmmtx);
   12579 }
   12580 
   12581 static int
   12582 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12583 {
   12584 	int i = 0;
   12585 	uint32_t reg;
   12586 
   12587 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12588 		device_xname(sc->sc_dev), __func__));
   12589 
   12590 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12591 	do {
   12592 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12593 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12594 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12595 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12596 			break;
   12597 		delay(2*1000);
   12598 		i++;
   12599 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12600 
   12601 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12602 		wm_put_hw_semaphore_82573(sc);
   12603 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12604 		    device_xname(sc->sc_dev));
   12605 		return -1;
   12606 	}
   12607 
   12608 	return 0;
   12609 }
   12610 
   12611 static void
   12612 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12613 {
   12614 	uint32_t reg;
   12615 
   12616 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12617 		device_xname(sc->sc_dev), __func__));
   12618 
   12619 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12620 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12621 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12622 }
   12623 
   12624 /*
   12625  * Management mode and power management related subroutines.
   12626  * BMC, AMT, suspend/resume and EEE.
   12627  */
   12628 
   12629 #ifdef WM_WOL
   12630 static int
   12631 wm_check_mng_mode(struct wm_softc *sc)
   12632 {
   12633 	int rv;
   12634 
   12635 	switch (sc->sc_type) {
   12636 	case WM_T_ICH8:
   12637 	case WM_T_ICH9:
   12638 	case WM_T_ICH10:
   12639 	case WM_T_PCH:
   12640 	case WM_T_PCH2:
   12641 	case WM_T_PCH_LPT:
   12642 	case WM_T_PCH_SPT:
   12643 		rv = wm_check_mng_mode_ich8lan(sc);
   12644 		break;
   12645 	case WM_T_82574:
   12646 	case WM_T_82583:
   12647 		rv = wm_check_mng_mode_82574(sc);
   12648 		break;
   12649 	case WM_T_82571:
   12650 	case WM_T_82572:
   12651 	case WM_T_82573:
   12652 	case WM_T_80003:
   12653 		rv = wm_check_mng_mode_generic(sc);
   12654 		break;
   12655 	default:
   12656 		/* noting to do */
   12657 		rv = 0;
   12658 		break;
   12659 	}
   12660 
   12661 	return rv;
   12662 }
   12663 
   12664 static int
   12665 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12666 {
   12667 	uint32_t fwsm;
   12668 
   12669 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12670 
   12671 	if (((fwsm & FWSM_FW_VALID) != 0)
   12672 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12673 		return 1;
   12674 
   12675 	return 0;
   12676 }
   12677 
   12678 static int
   12679 wm_check_mng_mode_82574(struct wm_softc *sc)
   12680 {
   12681 	uint16_t data;
   12682 
   12683 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12684 
   12685 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12686 		return 1;
   12687 
   12688 	return 0;
   12689 }
   12690 
   12691 static int
   12692 wm_check_mng_mode_generic(struct wm_softc *sc)
   12693 {
   12694 	uint32_t fwsm;
   12695 
   12696 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12697 
   12698 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12699 		return 1;
   12700 
   12701 	return 0;
   12702 }
   12703 #endif /* WM_WOL */
   12704 
   12705 static int
   12706 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12707 {
   12708 	uint32_t manc, fwsm, factps;
   12709 
   12710 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12711 		return 0;
   12712 
   12713 	manc = CSR_READ(sc, WMREG_MANC);
   12714 
   12715 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   12716 		device_xname(sc->sc_dev), manc));
   12717 	if ((manc & MANC_RECV_TCO_EN) == 0)
   12718 		return 0;
   12719 
   12720 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   12721 		fwsm = CSR_READ(sc, WMREG_FWSM);
   12722 		factps = CSR_READ(sc, WMREG_FACTPS);
   12723 		if (((factps & FACTPS_MNGCG) == 0)
   12724 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12725 			return 1;
   12726 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   12727 		uint16_t data;
   12728 
   12729 		factps = CSR_READ(sc, WMREG_FACTPS);
   12730 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12731 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   12732 			device_xname(sc->sc_dev), factps, data));
   12733 		if (((factps & FACTPS_MNGCG) == 0)
   12734 		    && ((data & NVM_CFG2_MNGM_MASK)
   12735 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   12736 			return 1;
   12737 	} else if (((manc & MANC_SMBUS_EN) != 0)
   12738 	    && ((manc & MANC_ASF_EN) == 0))
   12739 		return 1;
   12740 
   12741 	return 0;
   12742 }
   12743 
   12744 static bool
   12745 wm_phy_resetisblocked(struct wm_softc *sc)
   12746 {
   12747 	bool blocked = false;
   12748 	uint32_t reg;
   12749 	int i = 0;
   12750 
   12751 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12752 		device_xname(sc->sc_dev), __func__));
   12753 
   12754 	switch (sc->sc_type) {
   12755 	case WM_T_ICH8:
   12756 	case WM_T_ICH9:
   12757 	case WM_T_ICH10:
   12758 	case WM_T_PCH:
   12759 	case WM_T_PCH2:
   12760 	case WM_T_PCH_LPT:
   12761 	case WM_T_PCH_SPT:
   12762 		do {
   12763 			reg = CSR_READ(sc, WMREG_FWSM);
   12764 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12765 				blocked = true;
   12766 				delay(10*1000);
   12767 				continue;
   12768 			}
   12769 			blocked = false;
   12770 		} while (blocked && (i++ < 30));
   12771 		return blocked;
   12772 		break;
   12773 	case WM_T_82571:
   12774 	case WM_T_82572:
   12775 	case WM_T_82573:
   12776 	case WM_T_82574:
   12777 	case WM_T_82583:
   12778 	case WM_T_80003:
   12779 		reg = CSR_READ(sc, WMREG_MANC);
   12780 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12781 			return true;
   12782 		else
   12783 			return false;
   12784 		break;
   12785 	default:
   12786 		/* no problem */
   12787 		break;
   12788 	}
   12789 
   12790 	return false;
   12791 }
   12792 
   12793 static void
   12794 wm_get_hw_control(struct wm_softc *sc)
   12795 {
   12796 	uint32_t reg;
   12797 
   12798 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12799 		device_xname(sc->sc_dev), __func__));
   12800 
   12801 	if (sc->sc_type == WM_T_82573) {
   12802 		reg = CSR_READ(sc, WMREG_SWSM);
   12803 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12804 	} else if (sc->sc_type >= WM_T_82571) {
   12805 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12806 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12807 	}
   12808 }
   12809 
   12810 static void
   12811 wm_release_hw_control(struct wm_softc *sc)
   12812 {
   12813 	uint32_t reg;
   12814 
   12815 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12816 		device_xname(sc->sc_dev), __func__));
   12817 
   12818 	if (sc->sc_type == WM_T_82573) {
   12819 		reg = CSR_READ(sc, WMREG_SWSM);
   12820 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12821 	} else if (sc->sc_type >= WM_T_82571) {
   12822 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12823 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12824 	}
   12825 }
   12826 
   12827 static void
   12828 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12829 {
   12830 	uint32_t reg;
   12831 
   12832 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12833 		device_xname(sc->sc_dev), __func__));
   12834 
   12835 	if (sc->sc_type < WM_T_PCH2)
   12836 		return;
   12837 
   12838 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12839 
   12840 	if (gate)
   12841 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12842 	else
   12843 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12844 
   12845 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12846 }
   12847 
   12848 static void
   12849 wm_smbustopci(struct wm_softc *sc)
   12850 {
   12851 	uint32_t fwsm, reg;
   12852 	int rv = 0;
   12853 
   12854 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12855 		device_xname(sc->sc_dev), __func__));
   12856 
   12857 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12858 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12859 
   12860 	/* Disable ULP */
   12861 	wm_ulp_disable(sc);
   12862 
   12863 	/* Acquire PHY semaphore */
   12864 	sc->phy.acquire(sc);
   12865 
   12866 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12867 	switch (sc->sc_type) {
   12868 	case WM_T_PCH_LPT:
   12869 	case WM_T_PCH_SPT:
   12870 		if (wm_phy_is_accessible_pchlan(sc))
   12871 			break;
   12872 
   12873 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12874 		reg |= CTRL_EXT_FORCE_SMBUS;
   12875 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12876 #if 0
   12877 		/* XXX Isn't this required??? */
   12878 		CSR_WRITE_FLUSH(sc);
   12879 #endif
   12880 		delay(50 * 1000);
   12881 		/* FALLTHROUGH */
   12882 	case WM_T_PCH2:
   12883 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12884 			break;
   12885 		/* FALLTHROUGH */
   12886 	case WM_T_PCH:
   12887 		if (sc->sc_type == WM_T_PCH)
   12888 			if ((fwsm & FWSM_FW_VALID) != 0)
   12889 				break;
   12890 
   12891 		if (wm_phy_resetisblocked(sc) == true) {
   12892 			printf("XXX reset is blocked(3)\n");
   12893 			break;
   12894 		}
   12895 
   12896 		wm_toggle_lanphypc_pch_lpt(sc);
   12897 
   12898 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12899 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12900 				break;
   12901 
   12902 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12903 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12904 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12905 
   12906 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12907 				break;
   12908 			rv = -1;
   12909 		}
   12910 		break;
   12911 	default:
   12912 		break;
   12913 	}
   12914 
   12915 	/* Release semaphore */
   12916 	sc->phy.release(sc);
   12917 
   12918 	if (rv == 0) {
   12919 		if (wm_phy_resetisblocked(sc)) {
   12920 			printf("XXX reset is blocked(4)\n");
   12921 			goto out;
   12922 		}
   12923 		wm_reset_phy(sc);
   12924 		if (wm_phy_resetisblocked(sc))
   12925 			printf("XXX reset is blocked(4)\n");
   12926 	}
   12927 
   12928 out:
   12929 	/*
   12930 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   12931 	 */
   12932 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   12933 		delay(10*1000);
   12934 		wm_gate_hw_phy_config_ich8lan(sc, false);
   12935 	}
   12936 }
   12937 
   12938 static void
   12939 wm_init_manageability(struct wm_softc *sc)
   12940 {
   12941 
   12942 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12943 		device_xname(sc->sc_dev), __func__));
   12944 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12945 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   12946 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12947 
   12948 		/* Disable hardware interception of ARP */
   12949 		manc &= ~MANC_ARP_EN;
   12950 
   12951 		/* Enable receiving management packets to the host */
   12952 		if (sc->sc_type >= WM_T_82571) {
   12953 			manc |= MANC_EN_MNG2HOST;
   12954 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   12955 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   12956 		}
   12957 
   12958 		CSR_WRITE(sc, WMREG_MANC, manc);
   12959 	}
   12960 }
   12961 
   12962 static void
   12963 wm_release_manageability(struct wm_softc *sc)
   12964 {
   12965 
   12966 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12967 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12968 
   12969 		manc |= MANC_ARP_EN;
   12970 		if (sc->sc_type >= WM_T_82571)
   12971 			manc &= ~MANC_EN_MNG2HOST;
   12972 
   12973 		CSR_WRITE(sc, WMREG_MANC, manc);
   12974 	}
   12975 }
   12976 
   12977 static void
   12978 wm_get_wakeup(struct wm_softc *sc)
   12979 {
   12980 
   12981 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   12982 	switch (sc->sc_type) {
   12983 	case WM_T_82573:
   12984 	case WM_T_82583:
   12985 		sc->sc_flags |= WM_F_HAS_AMT;
   12986 		/* FALLTHROUGH */
   12987 	case WM_T_80003:
   12988 	case WM_T_82575:
   12989 	case WM_T_82576:
   12990 	case WM_T_82580:
   12991 	case WM_T_I350:
   12992 	case WM_T_I354:
   12993 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   12994 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   12995 		/* FALLTHROUGH */
   12996 	case WM_T_82541:
   12997 	case WM_T_82541_2:
   12998 	case WM_T_82547:
   12999 	case WM_T_82547_2:
   13000 	case WM_T_82571:
   13001 	case WM_T_82572:
   13002 	case WM_T_82574:
   13003 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13004 		break;
   13005 	case WM_T_ICH8:
   13006 	case WM_T_ICH9:
   13007 	case WM_T_ICH10:
   13008 	case WM_T_PCH:
   13009 	case WM_T_PCH2:
   13010 	case WM_T_PCH_LPT:
   13011 	case WM_T_PCH_SPT:
   13012 		sc->sc_flags |= WM_F_HAS_AMT;
   13013 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13014 		break;
   13015 	default:
   13016 		break;
   13017 	}
   13018 
   13019 	/* 1: HAS_MANAGE */
   13020 	if (wm_enable_mng_pass_thru(sc) != 0)
   13021 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13022 
   13023 #ifdef WM_DEBUG
   13024 	printf("\n");
   13025 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   13026 		printf("HAS_AMT,");
   13027 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   13028 		printf("ARC_SUBSYS_VALID,");
   13029 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   13030 		printf("ASF_FIRMWARE_PRES,");
   13031 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   13032 		printf("HAS_MANAGE,");
   13033 	printf("\n");
   13034 #endif
   13035 	/*
   13036 	 * Note that the WOL flags is set after the resetting of the eeprom
   13037 	 * stuff
   13038 	 */
   13039 }
   13040 
   13041 /*
   13042  * Unconfigure Ultra Low Power mode.
   13043  * Only for I217 and newer (see below).
   13044  */
   13045 static void
   13046 wm_ulp_disable(struct wm_softc *sc)
   13047 {
   13048 	uint32_t reg;
   13049 	int i = 0;
   13050 
   13051 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13052 		device_xname(sc->sc_dev), __func__));
   13053 	/* Exclude old devices */
   13054 	if ((sc->sc_type < WM_T_PCH_LPT)
   13055 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13056 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13057 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13058 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13059 		return;
   13060 
   13061 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13062 		/* Request ME un-configure ULP mode in the PHY */
   13063 		reg = CSR_READ(sc, WMREG_H2ME);
   13064 		reg &= ~H2ME_ULP;
   13065 		reg |= H2ME_ENFORCE_SETTINGS;
   13066 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13067 
   13068 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13069 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13070 			if (i++ == 30) {
   13071 				printf("%s timed out\n", __func__);
   13072 				return;
   13073 			}
   13074 			delay(10 * 1000);
   13075 		}
   13076 		reg = CSR_READ(sc, WMREG_H2ME);
   13077 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13078 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13079 
   13080 		return;
   13081 	}
   13082 
   13083 	/* Acquire semaphore */
   13084 	sc->phy.acquire(sc);
   13085 
   13086 	/* Toggle LANPHYPC */
   13087 	wm_toggle_lanphypc_pch_lpt(sc);
   13088 
   13089 	/* Unforce SMBus mode in PHY */
   13090 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13091 	if (reg == 0x0000 || reg == 0xffff) {
   13092 		uint32_t reg2;
   13093 
   13094 		printf("%s: Force SMBus first.\n", __func__);
   13095 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13096 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13097 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13098 		delay(50 * 1000);
   13099 
   13100 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13101 	}
   13102 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13103 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13104 
   13105 	/* Unforce SMBus mode in MAC */
   13106 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13107 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13108 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13109 
   13110 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13111 	reg |= HV_PM_CTRL_K1_ENA;
   13112 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13113 
   13114 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13115 	reg &= ~(I218_ULP_CONFIG1_IND
   13116 	    | I218_ULP_CONFIG1_STICKY_ULP
   13117 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13118 	    | I218_ULP_CONFIG1_WOL_HOST
   13119 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13120 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13121 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13122 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13123 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13124 	reg |= I218_ULP_CONFIG1_START;
   13125 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13126 
   13127 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13128 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13129 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13130 
   13131 	/* Release semaphore */
   13132 	sc->phy.release(sc);
   13133 	wm_gmii_reset(sc);
   13134 	delay(50 * 1000);
   13135 }
   13136 
   13137 /* WOL in the newer chipset interfaces (pchlan) */
   13138 static void
   13139 wm_enable_phy_wakeup(struct wm_softc *sc)
   13140 {
   13141 #if 0
   13142 	uint16_t preg;
   13143 
   13144 	/* Copy MAC RARs to PHY RARs */
   13145 
   13146 	/* Copy MAC MTA to PHY MTA */
   13147 
   13148 	/* Configure PHY Rx Control register */
   13149 
   13150 	/* Enable PHY wakeup in MAC register */
   13151 
   13152 	/* Configure and enable PHY wakeup in PHY registers */
   13153 
   13154 	/* Activate PHY wakeup */
   13155 
   13156 	/* XXX */
   13157 #endif
   13158 }
   13159 
   13160 /* Power down workaround on D3 */
   13161 static void
   13162 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13163 {
   13164 	uint32_t reg;
   13165 	int i;
   13166 
   13167 	for (i = 0; i < 2; i++) {
   13168 		/* Disable link */
   13169 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13170 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13171 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13172 
   13173 		/*
   13174 		 * Call gig speed drop workaround on Gig disable before
   13175 		 * accessing any PHY registers
   13176 		 */
   13177 		if (sc->sc_type == WM_T_ICH8)
   13178 			wm_gig_downshift_workaround_ich8lan(sc);
   13179 
   13180 		/* Write VR power-down enable */
   13181 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13182 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13183 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13184 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13185 
   13186 		/* Read it back and test */
   13187 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13188 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13189 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13190 			break;
   13191 
   13192 		/* Issue PHY reset and repeat at most one more time */
   13193 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13194 	}
   13195 }
   13196 
   13197 static void
   13198 wm_enable_wakeup(struct wm_softc *sc)
   13199 {
   13200 	uint32_t reg, pmreg;
   13201 	pcireg_t pmode;
   13202 
   13203 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13204 		device_xname(sc->sc_dev), __func__));
   13205 
   13206 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13207 		&pmreg, NULL) == 0)
   13208 		return;
   13209 
   13210 	/* Advertise the wakeup capability */
   13211 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13212 	    | CTRL_SWDPIN(3));
   13213 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13214 
   13215 	/* ICH workaround */
   13216 	switch (sc->sc_type) {
   13217 	case WM_T_ICH8:
   13218 	case WM_T_ICH9:
   13219 	case WM_T_ICH10:
   13220 	case WM_T_PCH:
   13221 	case WM_T_PCH2:
   13222 	case WM_T_PCH_LPT:
   13223 	case WM_T_PCH_SPT:
   13224 		/* Disable gig during WOL */
   13225 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13226 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13227 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13228 		if (sc->sc_type == WM_T_PCH)
   13229 			wm_gmii_reset(sc);
   13230 
   13231 		/* Power down workaround */
   13232 		if (sc->sc_phytype == WMPHY_82577) {
   13233 			struct mii_softc *child;
   13234 
   13235 			/* Assume that the PHY is copper */
   13236 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13237 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13238 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13239 				    (768 << 5) | 25, 0x0444); /* magic num */
   13240 		}
   13241 		break;
   13242 	default:
   13243 		break;
   13244 	}
   13245 
   13246 	/* Keep the laser running on fiber adapters */
   13247 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13248 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13249 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13250 		reg |= CTRL_EXT_SWDPIN(3);
   13251 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13252 	}
   13253 
   13254 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13255 #if 0	/* for the multicast packet */
   13256 	reg |= WUFC_MC;
   13257 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13258 #endif
   13259 
   13260 	if (sc->sc_type >= WM_T_PCH)
   13261 		wm_enable_phy_wakeup(sc);
   13262 	else {
   13263 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13264 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13265 	}
   13266 
   13267 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13268 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13269 		|| (sc->sc_type == WM_T_PCH2))
   13270 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13271 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13272 
   13273 	/* Request PME */
   13274 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13275 #if 0
   13276 	/* Disable WOL */
   13277 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13278 #else
   13279 	/* For WOL */
   13280 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13281 #endif
   13282 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13283 }
   13284 
   13285 /* LPLU */
   13286 
   13287 static void
   13288 wm_lplu_d0_disable(struct wm_softc *sc)
   13289 {
   13290 	uint32_t reg;
   13291 
   13292 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13293 		device_xname(sc->sc_dev), __func__));
   13294 
   13295 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13296 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13297 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13298 }
   13299 
   13300 static void
   13301 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   13302 {
   13303 	uint32_t reg;
   13304 
   13305 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13306 		device_xname(sc->sc_dev), __func__));
   13307 
   13308 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13309 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13310 	reg |= HV_OEM_BITS_ANEGNOW;
   13311 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13312 }
   13313 
   13314 /* EEE */
   13315 
   13316 static void
   13317 wm_set_eee_i350(struct wm_softc *sc)
   13318 {
   13319 	uint32_t ipcnfg, eeer;
   13320 
   13321 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13322 	eeer = CSR_READ(sc, WMREG_EEER);
   13323 
   13324 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13325 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13326 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13327 		    | EEER_LPI_FC);
   13328 	} else {
   13329 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13330 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13331 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13332 		    | EEER_LPI_FC);
   13333 	}
   13334 
   13335 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13336 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13337 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13338 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13339 }
   13340 
   13341 /*
   13342  * Workarounds (mainly PHY related).
   13343  * Basically, PHY's workarounds are in the PHY drivers.
   13344  */
   13345 
   13346 /* Work-around for 82566 Kumeran PCS lock loss */
   13347 static void
   13348 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13349 {
   13350 #if 0
   13351 	int miistatus, active, i;
   13352 	int reg;
   13353 
   13354 	miistatus = sc->sc_mii.mii_media_status;
   13355 
   13356 	/* If the link is not up, do nothing */
   13357 	if ((miistatus & IFM_ACTIVE) == 0)
   13358 		return;
   13359 
   13360 	active = sc->sc_mii.mii_media_active;
   13361 
   13362 	/* Nothing to do if the link is other than 1Gbps */
   13363 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   13364 		return;
   13365 
   13366 	for (i = 0; i < 10; i++) {
   13367 		/* read twice */
   13368 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13369 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13370 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13371 			goto out;	/* GOOD! */
   13372 
   13373 		/* Reset the PHY */
   13374 		wm_gmii_reset(sc);
   13375 		delay(5*1000);
   13376 	}
   13377 
   13378 	/* Disable GigE link negotiation */
   13379 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13380 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13381 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13382 
   13383 	/*
   13384 	 * Call gig speed drop workaround on Gig disable before accessing
   13385 	 * any PHY registers.
   13386 	 */
   13387 	wm_gig_downshift_workaround_ich8lan(sc);
   13388 
   13389 out:
   13390 	return;
   13391 #endif
   13392 }
   13393 
   13394 /* WOL from S5 stops working */
   13395 static void
   13396 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13397 {
   13398 	uint16_t kmrn_reg;
   13399 
   13400 	/* Only for igp3 */
   13401 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13402 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13403 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13404 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13405 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13406 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13407 	}
   13408 }
   13409 
   13410 /*
   13411  * Workaround for pch's PHYs
   13412  * XXX should be moved to new PHY driver?
   13413  */
   13414 static void
   13415 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13416 {
   13417 
   13418 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13419 		device_xname(sc->sc_dev), __func__));
   13420 	KASSERT(sc->sc_type == WM_T_PCH);
   13421 
   13422 	if (sc->sc_phytype == WMPHY_82577)
   13423 		wm_set_mdio_slow_mode_hv(sc);
   13424 
   13425 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13426 
   13427 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13428 
   13429 	/* 82578 */
   13430 	if (sc->sc_phytype == WMPHY_82578) {
   13431 		struct mii_softc *child;
   13432 
   13433 		/*
   13434 		 * Return registers to default by doing a soft reset then
   13435 		 * writing 0x3140 to the control register
   13436 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13437 		 */
   13438 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13439 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13440 			PHY_RESET(child);
   13441 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13442 			    0x3140);
   13443 		}
   13444 	}
   13445 
   13446 	/* Select page 0 */
   13447 	sc->phy.acquire(sc);
   13448 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13449 	sc->phy.release(sc);
   13450 
   13451 	/*
   13452 	 * Configure the K1 Si workaround during phy reset assuming there is
   13453 	 * link so that it disables K1 if link is in 1Gbps.
   13454 	 */
   13455 	wm_k1_gig_workaround_hv(sc, 1);
   13456 }
   13457 
   13458 static void
   13459 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13460 {
   13461 
   13462 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13463 		device_xname(sc->sc_dev), __func__));
   13464 	KASSERT(sc->sc_type == WM_T_PCH2);
   13465 
   13466 	wm_set_mdio_slow_mode_hv(sc);
   13467 }
   13468 
   13469 static int
   13470 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13471 {
   13472 	int k1_enable = sc->sc_nvm_k1_enabled;
   13473 
   13474 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13475 		device_xname(sc->sc_dev), __func__));
   13476 
   13477 	if (sc->phy.acquire(sc) != 0)
   13478 		return -1;
   13479 
   13480 	if (link) {
   13481 		k1_enable = 0;
   13482 
   13483 		/* Link stall fix for link up */
   13484 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13485 	} else {
   13486 		/* Link stall fix for link down */
   13487 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13488 	}
   13489 
   13490 	wm_configure_k1_ich8lan(sc, k1_enable);
   13491 	sc->phy.release(sc);
   13492 
   13493 	return 0;
   13494 }
   13495 
   13496 static void
   13497 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13498 {
   13499 	uint32_t reg;
   13500 
   13501 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13502 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13503 	    reg | HV_KMRN_MDIO_SLOW);
   13504 }
   13505 
   13506 static void
   13507 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13508 {
   13509 	uint32_t ctrl, ctrl_ext, tmp;
   13510 	uint16_t kmrn_reg;
   13511 
   13512 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13513 
   13514 	if (k1_enable)
   13515 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13516 	else
   13517 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13518 
   13519 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13520 
   13521 	delay(20);
   13522 
   13523 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13524 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13525 
   13526 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13527 	tmp |= CTRL_FRCSPD;
   13528 
   13529 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13530 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13531 	CSR_WRITE_FLUSH(sc);
   13532 	delay(20);
   13533 
   13534 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13535 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13536 	CSR_WRITE_FLUSH(sc);
   13537 	delay(20);
   13538 }
   13539 
   13540 /* special case - for 82575 - need to do manual init ... */
   13541 static void
   13542 wm_reset_init_script_82575(struct wm_softc *sc)
   13543 {
   13544 	/*
   13545 	 * remark: this is untested code - we have no board without EEPROM
   13546 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13547 	 */
   13548 
   13549 	/* SerDes configuration via SERDESCTRL */
   13550 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13551 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13552 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13553 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13554 
   13555 	/* CCM configuration via CCMCTL register */
   13556 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13557 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13558 
   13559 	/* PCIe lanes configuration */
   13560 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13561 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13562 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13563 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13564 
   13565 	/* PCIe PLL Configuration */
   13566 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13567 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13568 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13569 }
   13570 
   13571 static void
   13572 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13573 {
   13574 	uint32_t reg;
   13575 	uint16_t nvmword;
   13576 	int rv;
   13577 
   13578 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13579 		return;
   13580 
   13581 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13582 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13583 	if (rv != 0) {
   13584 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13585 		    __func__);
   13586 		return;
   13587 	}
   13588 
   13589 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13590 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13591 		reg |= MDICNFG_DEST;
   13592 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13593 		reg |= MDICNFG_COM_MDIO;
   13594 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13595 }
   13596 
   13597 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13598 
   13599 static bool
   13600 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13601 {
   13602 	int i;
   13603 	uint32_t reg;
   13604 	uint16_t id1, id2;
   13605 
   13606 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13607 		device_xname(sc->sc_dev), __func__));
   13608 	id1 = id2 = 0xffff;
   13609 	for (i = 0; i < 2; i++) {
   13610 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13611 		if (MII_INVALIDID(id1))
   13612 			continue;
   13613 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13614 		if (MII_INVALIDID(id2))
   13615 			continue;
   13616 		break;
   13617 	}
   13618 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13619 		goto out;
   13620 	}
   13621 
   13622 	if (sc->sc_type < WM_T_PCH_LPT) {
   13623 		sc->phy.release(sc);
   13624 		wm_set_mdio_slow_mode_hv(sc);
   13625 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13626 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13627 		sc->phy.acquire(sc);
   13628 	}
   13629 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13630 		printf("XXX return with false\n");
   13631 		return false;
   13632 	}
   13633 out:
   13634 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13635 		/* Only unforce SMBus if ME is not active */
   13636 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13637 			/* Unforce SMBus mode in PHY */
   13638 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13639 			    CV_SMB_CTRL);
   13640 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13641 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13642 			    CV_SMB_CTRL, reg);
   13643 
   13644 			/* Unforce SMBus mode in MAC */
   13645 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13646 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13647 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13648 		}
   13649 	}
   13650 	return true;
   13651 }
   13652 
   13653 static void
   13654 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13655 {
   13656 	uint32_t reg;
   13657 	int i;
   13658 
   13659 	/* Set PHY Config Counter to 50msec */
   13660 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13661 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13662 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13663 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13664 
   13665 	/* Toggle LANPHYPC */
   13666 	reg = CSR_READ(sc, WMREG_CTRL);
   13667 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13668 	reg &= ~CTRL_LANPHYPC_VALUE;
   13669 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13670 	CSR_WRITE_FLUSH(sc);
   13671 	delay(1000);
   13672 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13673 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13674 	CSR_WRITE_FLUSH(sc);
   13675 
   13676 	if (sc->sc_type < WM_T_PCH_LPT)
   13677 		delay(50 * 1000);
   13678 	else {
   13679 		i = 20;
   13680 
   13681 		do {
   13682 			delay(5 * 1000);
   13683 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13684 		    && i--);
   13685 
   13686 		delay(30 * 1000);
   13687 	}
   13688 }
   13689 
   13690 static int
   13691 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13692 {
   13693 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13694 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13695 	uint32_t rxa;
   13696 	uint16_t scale = 0, lat_enc = 0;
   13697 	int64_t lat_ns, value;
   13698 
   13699 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13700 		device_xname(sc->sc_dev), __func__));
   13701 
   13702 	if (link) {
   13703 		pcireg_t preg;
   13704 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   13705 
   13706 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   13707 
   13708 		/*
   13709 		 * Determine the maximum latency tolerated by the device.
   13710 		 *
   13711 		 * Per the PCIe spec, the tolerated latencies are encoded as
   13712 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   13713 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   13714 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   13715 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   13716 		 */
   13717 		lat_ns = ((int64_t)rxa * 1024 -
   13718 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   13719 		if (lat_ns < 0)
   13720 			lat_ns = 0;
   13721 		else {
   13722 			uint32_t status;
   13723 			uint16_t speed;
   13724 
   13725 			status = CSR_READ(sc, WMREG_STATUS);
   13726 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   13727 			case STATUS_SPEED_10:
   13728 				speed = 10;
   13729 				break;
   13730 			case STATUS_SPEED_100:
   13731 				speed = 100;
   13732 				break;
   13733 			case STATUS_SPEED_1000:
   13734 				speed = 1000;
   13735 				break;
   13736 			default:
   13737 				printf("%s: Unknown speed (status = %08x)\n",
   13738 				    device_xname(sc->sc_dev), status);
   13739 				return -1;
   13740 			}
   13741 			lat_ns /= speed;
   13742 		}
   13743 		value = lat_ns;
   13744 
   13745 		while (value > LTRV_VALUE) {
   13746 			scale ++;
   13747 			value = howmany(value, __BIT(5));
   13748 		}
   13749 		if (scale > LTRV_SCALE_MAX) {
   13750 			printf("%s: Invalid LTR latency scale %d\n",
   13751 			    device_xname(sc->sc_dev), scale);
   13752 			return -1;
   13753 		}
   13754 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   13755 
   13756 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13757 		    WM_PCI_LTR_CAP_LPT);
   13758 		max_snoop = preg & 0xffff;
   13759 		max_nosnoop = preg >> 16;
   13760 
   13761 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   13762 
   13763 		if (lat_enc > max_ltr_enc) {
   13764 			lat_enc = max_ltr_enc;
   13765 		}
   13766 	}
   13767 	/* Snoop and No-Snoop latencies the same */
   13768 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   13769 	CSR_WRITE(sc, WMREG_LTRV, reg);
   13770 
   13771 	return 0;
   13772 }
   13773 
   13774 /*
   13775  * I210 Errata 25 and I211 Errata 10
   13776  * Slow System Clock.
   13777  */
   13778 static void
   13779 wm_pll_workaround_i210(struct wm_softc *sc)
   13780 {
   13781 	uint32_t mdicnfg, wuc;
   13782 	uint32_t reg;
   13783 	pcireg_t pcireg;
   13784 	uint32_t pmreg;
   13785 	uint16_t nvmword, tmp_nvmword;
   13786 	int phyval;
   13787 	bool wa_done = false;
   13788 	int i;
   13789 
   13790 	/* Save WUC and MDICNFG registers */
   13791 	wuc = CSR_READ(sc, WMREG_WUC);
   13792 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13793 
   13794 	reg = mdicnfg & ~MDICNFG_DEST;
   13795 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13796 
   13797 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13798 		nvmword = INVM_DEFAULT_AL;
   13799 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13800 
   13801 	/* Get Power Management cap offset */
   13802 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13803 		&pmreg, NULL) == 0)
   13804 		return;
   13805 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13806 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13807 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13808 
   13809 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13810 			break; /* OK */
   13811 		}
   13812 
   13813 		wa_done = true;
   13814 		/* Directly reset the internal PHY */
   13815 		reg = CSR_READ(sc, WMREG_CTRL);
   13816 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13817 
   13818 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13819 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13820 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13821 
   13822 		CSR_WRITE(sc, WMREG_WUC, 0);
   13823 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13824 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13825 
   13826 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13827 		    pmreg + PCI_PMCSR);
   13828 		pcireg |= PCI_PMCSR_STATE_D3;
   13829 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13830 		    pmreg + PCI_PMCSR, pcireg);
   13831 		delay(1000);
   13832 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13833 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13834 		    pmreg + PCI_PMCSR, pcireg);
   13835 
   13836 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13837 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13838 
   13839 		/* Restore WUC register */
   13840 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13841 	}
   13842 
   13843 	/* Restore MDICNFG setting */
   13844 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13845 	if (wa_done)
   13846 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13847 }
   13848