Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.495
      1 /*	$NetBSD: if_wm.c,v 1.495 2017/03/03 07:57:49 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Split header buffer for newer descriptors
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.495 2017/03/03 07:57:49 knakahara Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #include "opt_if_wm.h"
     92 #endif
     93 
     94 #include <sys/param.h>
     95 #include <sys/systm.h>
     96 #include <sys/callout.h>
     97 #include <sys/mbuf.h>
     98 #include <sys/malloc.h>
     99 #include <sys/kmem.h>
    100 #include <sys/kernel.h>
    101 #include <sys/socket.h>
    102 #include <sys/ioctl.h>
    103 #include <sys/errno.h>
    104 #include <sys/device.h>
    105 #include <sys/queue.h>
    106 #include <sys/syslog.h>
    107 #include <sys/interrupt.h>
    108 #include <sys/cpu.h>
    109 #include <sys/pcq.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/miivar.h>
    132 #include <dev/mii/miidevs.h>
    133 #include <dev/mii/mii_bitbang.h>
    134 #include <dev/mii/ikphyreg.h>
    135 #include <dev/mii/igphyreg.h>
    136 #include <dev/mii/igphyvar.h>
    137 #include <dev/mii/inbmphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 /*
    177  * Transmit descriptor list size.  Due to errata, we can only have
    178  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    179  * on >= 82544.  We tell the upper layers that they can queue a lot
    180  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    181  * of them at a time.
    182  *
    183  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    184  * chains containing many small mbufs have been observed in zero-copy
    185  * situations with jumbo frames.
    186  */
    187 #define	WM_NTXSEGS		256
    188 #define	WM_IFQUEUELEN		256
    189 #define	WM_TXQUEUELEN_MAX	64
    190 #define	WM_TXQUEUELEN_MAX_82547	16
    191 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    192 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    193 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    194 #define	WM_NTXDESC_82542	256
    195 #define	WM_NTXDESC_82544	4096
    196 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    197 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    198 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    199 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    200 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    201 
    202 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    203 
    204 #define	WM_TXINTERQSIZE		256
    205 
    206 /*
    207  * Receive descriptor list size.  We have one Rx buffer for normal
    208  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    209  * packet.  We allocate 256 receive descriptors, each with a 2k
    210  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    211  */
    212 #define	WM_NRXDESC		256
    213 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    214 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    215 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    216 
    217 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    218 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    219 #endif
    220 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    221 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    222 #endif
    223 
    224 typedef union txdescs {
    225 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    226 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    227 } txdescs_t;
    228 
    229 typedef union rxdescs {
    230 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    231 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    232 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    233 } rxdescs_t;
    234 
    235 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    236 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    237 
    238 /*
    239  * Software state for transmit jobs.
    240  */
    241 struct wm_txsoft {
    242 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    243 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    244 	int txs_firstdesc;		/* first descriptor in packet */
    245 	int txs_lastdesc;		/* last descriptor in packet */
    246 	int txs_ndesc;			/* # of descriptors used */
    247 };
    248 
    249 /*
    250  * Software state for receive buffers.  Each descriptor gets a
    251  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    252  * more than one buffer, we chain them together.
    253  */
    254 struct wm_rxsoft {
    255 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    256 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    257 };
    258 
    259 #define WM_LINKUP_TIMEOUT	50
    260 
    261 static uint16_t swfwphysem[] = {
    262 	SWFW_PHY0_SM,
    263 	SWFW_PHY1_SM,
    264 	SWFW_PHY2_SM,
    265 	SWFW_PHY3_SM
    266 };
    267 
    268 static const uint32_t wm_82580_rxpbs_table[] = {
    269 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    270 };
    271 
    272 struct wm_softc;
    273 
    274 #ifdef WM_EVENT_COUNTERS
    275 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    276 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    277 	struct evcnt qname##_ev_##evname;
    278 
    279 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    280 	do{								\
    281 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    282 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    283 		    "%s%02d%s", #qname, (qnum), #evname);		\
    284 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    285 		    (evtype), NULL, (xname),				\
    286 		    (q)->qname##_##evname##_evcnt_name);		\
    287 	}while(0)
    288 
    289 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    290 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    291 
    292 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    293 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    294 
    295 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    296 	evcnt_detach(&(q)->qname##_ev_##evname);
    297 #endif /* WM_EVENT_COUNTERS */
    298 
    299 struct wm_txqueue {
    300 	kmutex_t *txq_lock;		/* lock for tx operations */
    301 
    302 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    303 
    304 	/* Software state for the transmit descriptors. */
    305 	int txq_num;			/* must be a power of two */
    306 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    307 
    308 	/* TX control data structures. */
    309 	int txq_ndesc;			/* must be a power of two */
    310 	size_t txq_descsize;		/* a tx descriptor size */
    311 	txdescs_t *txq_descs_u;
    312         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    313 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    314 	int txq_desc_rseg;		/* real number of control segment */
    315 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    316 #define	txq_descs	txq_descs_u->sctxu_txdescs
    317 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    318 
    319 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    320 
    321 	int txq_free;			/* number of free Tx descriptors */
    322 	int txq_next;			/* next ready Tx descriptor */
    323 
    324 	int txq_sfree;			/* number of free Tx jobs */
    325 	int txq_snext;			/* next free Tx job */
    326 	int txq_sdirty;			/* dirty Tx jobs */
    327 
    328 	/* These 4 variables are used only on the 82547. */
    329 	int txq_fifo_size;		/* Tx FIFO size */
    330 	int txq_fifo_head;		/* current head of FIFO */
    331 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    332 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    333 
    334 	/*
    335 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    336 	 * CPUs. This queue intermediate them without block.
    337 	 */
    338 	pcq_t *txq_interq;
    339 
    340 	/*
    341 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    342 	 * to manage Tx H/W queue's busy flag.
    343 	 */
    344 	int txq_flags;			/* flags for H/W queue, see below */
    345 #define	WM_TXQ_NO_SPACE	0x1
    346 
    347 	bool txq_stopping;
    348 
    349 	uint32_t txq_packets;		/* for AIM */
    350 	uint32_t txq_bytes;		/* for AIM */
    351 #ifdef WM_EVENT_COUNTERS
    352 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    353 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    354 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    355 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    356 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    357 						/* XXX not used? */
    358 
    359 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    360 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    361 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    362 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    363 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    364 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    365 
    366 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    367 
    368 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    369 
    370 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    371 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    372 #endif /* WM_EVENT_COUNTERS */
    373 };
    374 
    375 struct wm_rxqueue {
    376 	kmutex_t *rxq_lock;		/* lock for rx operations */
    377 
    378 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    379 
    380 	/* Software state for the receive descriptors. */
    381 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    382 
    383 	/* RX control data structures. */
    384 	int rxq_ndesc;			/* must be a power of two */
    385 	size_t rxq_descsize;		/* a rx descriptor size */
    386 	rxdescs_t *rxq_descs_u;
    387 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    388 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    389 	int rxq_desc_rseg;		/* real number of control segment */
    390 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    391 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    392 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    393 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    394 
    395 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    396 
    397 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    398 	int rxq_discard;
    399 	int rxq_len;
    400 	struct mbuf *rxq_head;
    401 	struct mbuf *rxq_tail;
    402 	struct mbuf **rxq_tailp;
    403 
    404 	bool rxq_stopping;
    405 
    406 	uint32_t rxq_packets;		/* for AIM */
    407 	uint32_t rxq_bytes;		/* for AIM */
    408 #ifdef WM_EVENT_COUNTERS
    409 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    410 
    411 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    412 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    413 #endif
    414 };
    415 
    416 struct wm_queue {
    417 	int wmq_id;			/* index of transmit and receive queues */
    418 	int wmq_intr_idx;		/* index of MSI-X tables */
    419 
    420 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    421 	bool wmq_set_itr;
    422 
    423 	struct wm_txqueue wmq_txq;
    424 	struct wm_rxqueue wmq_rxq;
    425 
    426 	void *wmq_si;
    427 };
    428 
    429 struct wm_phyop {
    430 	int (*acquire)(struct wm_softc *);
    431 	void (*release)(struct wm_softc *);
    432 	int reset_delay_us;
    433 };
    434 
    435 /*
    436  * Software state per device.
    437  */
    438 struct wm_softc {
    439 	device_t sc_dev;		/* generic device information */
    440 	bus_space_tag_t sc_st;		/* bus space tag */
    441 	bus_space_handle_t sc_sh;	/* bus space handle */
    442 	bus_size_t sc_ss;		/* bus space size */
    443 	bus_space_tag_t sc_iot;		/* I/O space tag */
    444 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    445 	bus_size_t sc_ios;		/* I/O space size */
    446 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    447 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    448 	bus_size_t sc_flashs;		/* flash registers space size */
    449 	off_t sc_flashreg_offset;	/*
    450 					 * offset to flash registers from
    451 					 * start of BAR
    452 					 */
    453 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    454 
    455 	struct ethercom sc_ethercom;	/* ethernet common data */
    456 	struct mii_data sc_mii;		/* MII/media information */
    457 
    458 	pci_chipset_tag_t sc_pc;
    459 	pcitag_t sc_pcitag;
    460 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    461 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    462 
    463 	uint16_t sc_pcidevid;		/* PCI device ID */
    464 	wm_chip_type sc_type;		/* MAC type */
    465 	int sc_rev;			/* MAC revision */
    466 	wm_phy_type sc_phytype;		/* PHY type */
    467 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    468 #define	WM_MEDIATYPE_UNKNOWN		0x00
    469 #define	WM_MEDIATYPE_FIBER		0x01
    470 #define	WM_MEDIATYPE_COPPER		0x02
    471 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    472 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    473 	int sc_flags;			/* flags; see below */
    474 	int sc_if_flags;		/* last if_flags */
    475 	int sc_flowflags;		/* 802.3x flow control flags */
    476 	int sc_align_tweak;
    477 
    478 	void *sc_ihs[WM_MAX_NINTR];	/*
    479 					 * interrupt cookie.
    480 					 * legacy and msi use sc_ihs[0].
    481 					 */
    482 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    483 	int sc_nintrs;			/* number of interrupts */
    484 
    485 	int sc_link_intr_idx;		/* index of MSI-X tables */
    486 
    487 	callout_t sc_tick_ch;		/* tick callout */
    488 	bool sc_core_stopping;
    489 
    490 	int sc_nvm_ver_major;
    491 	int sc_nvm_ver_minor;
    492 	int sc_nvm_ver_build;
    493 	int sc_nvm_addrbits;		/* NVM address bits */
    494 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    495 	int sc_ich8_flash_base;
    496 	int sc_ich8_flash_bank_size;
    497 	int sc_nvm_k1_enabled;
    498 
    499 	int sc_nqueues;
    500 	struct wm_queue *sc_queue;
    501 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    502 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    503 
    504 	int sc_affinity_offset;
    505 
    506 #ifdef WM_EVENT_COUNTERS
    507 	/* Event counters. */
    508 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    509 
    510         /* WM_T_82542_2_1 only */
    511 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    512 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    513 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    514 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    515 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    516 #endif /* WM_EVENT_COUNTERS */
    517 
    518 	/* This variable are used only on the 82547. */
    519 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    520 
    521 	uint32_t sc_ctrl;		/* prototype CTRL register */
    522 #if 0
    523 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    524 #endif
    525 	uint32_t sc_icr;		/* prototype interrupt bits */
    526 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    527 	uint32_t sc_tctl;		/* prototype TCTL register */
    528 	uint32_t sc_rctl;		/* prototype RCTL register */
    529 	uint32_t sc_txcw;		/* prototype TXCW register */
    530 	uint32_t sc_tipg;		/* prototype TIPG register */
    531 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    532 	uint32_t sc_pba;		/* prototype PBA register */
    533 
    534 	int sc_tbi_linkup;		/* TBI link status */
    535 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    536 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    537 
    538 	int sc_mchash_type;		/* multicast filter offset */
    539 
    540 	krndsource_t rnd_source;	/* random source */
    541 
    542 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    543 
    544 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    545 	kmutex_t *sc_ich_phymtx;	/*
    546 					 * 82574/82583/ICH/PCH specific PHY
    547 					 * mutex. For 82574/82583, the mutex
    548 					 * is used for both PHY and NVM.
    549 					 */
    550 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    551 
    552 	struct wm_phyop phy;
    553 };
    554 
    555 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    556 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    557 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    558 
    559 #define	WM_RXCHAIN_RESET(rxq)						\
    560 do {									\
    561 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    562 	*(rxq)->rxq_tailp = NULL;					\
    563 	(rxq)->rxq_len = 0;						\
    564 } while (/*CONSTCOND*/0)
    565 
    566 #define	WM_RXCHAIN_LINK(rxq, m)						\
    567 do {									\
    568 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    569 	(rxq)->rxq_tailp = &(m)->m_next;				\
    570 } while (/*CONSTCOND*/0)
    571 
    572 #ifdef WM_EVENT_COUNTERS
    573 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    574 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    575 
    576 #define WM_Q_EVCNT_INCR(qname, evname)			\
    577 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    578 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    579 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    580 #else /* !WM_EVENT_COUNTERS */
    581 #define	WM_EVCNT_INCR(ev)	/* nothing */
    582 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    583 
    584 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    585 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    586 #endif /* !WM_EVENT_COUNTERS */
    587 
    588 #define	CSR_READ(sc, reg)						\
    589 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    590 #define	CSR_WRITE(sc, reg, val)						\
    591 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    592 #define	CSR_WRITE_FLUSH(sc)						\
    593 	(void) CSR_READ((sc), WMREG_STATUS)
    594 
    595 #define ICH8_FLASH_READ32(sc, reg)					\
    596 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    597 	    (reg) + sc->sc_flashreg_offset)
    598 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    599 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    600 	    (reg) + sc->sc_flashreg_offset, (data))
    601 
    602 #define ICH8_FLASH_READ16(sc, reg)					\
    603 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    604 	    (reg) + sc->sc_flashreg_offset)
    605 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    606 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    607 	    (reg) + sc->sc_flashreg_offset, (data))
    608 
    609 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    610 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    611 
    612 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    613 #define	WM_CDTXADDR_HI(txq, x)						\
    614 	(sizeof(bus_addr_t) == 8 ?					\
    615 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    616 
    617 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    618 #define	WM_CDRXADDR_HI(rxq, x)						\
    619 	(sizeof(bus_addr_t) == 8 ?					\
    620 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    621 
    622 /*
    623  * Register read/write functions.
    624  * Other than CSR_{READ|WRITE}().
    625  */
    626 #if 0
    627 static inline uint32_t wm_io_read(struct wm_softc *, int);
    628 #endif
    629 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    630 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    631 	uint32_t, uint32_t);
    632 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    633 
    634 /*
    635  * Descriptor sync/init functions.
    636  */
    637 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    638 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    639 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    640 
    641 /*
    642  * Device driver interface functions and commonly used functions.
    643  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    644  */
    645 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    646 static int	wm_match(device_t, cfdata_t, void *);
    647 static void	wm_attach(device_t, device_t, void *);
    648 static int	wm_detach(device_t, int);
    649 static bool	wm_suspend(device_t, const pmf_qual_t *);
    650 static bool	wm_resume(device_t, const pmf_qual_t *);
    651 static void	wm_watchdog(struct ifnet *);
    652 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    653 static void	wm_tick(void *);
    654 static int	wm_ifflags_cb(struct ethercom *);
    655 static int	wm_ioctl(struct ifnet *, u_long, void *);
    656 /* MAC address related */
    657 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    658 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    659 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    660 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    661 static void	wm_set_filter(struct wm_softc *);
    662 /* Reset and init related */
    663 static void	wm_set_vlan(struct wm_softc *);
    664 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    665 static void	wm_get_auto_rd_done(struct wm_softc *);
    666 static void	wm_lan_init_done(struct wm_softc *);
    667 static void	wm_get_cfg_done(struct wm_softc *);
    668 static void	wm_initialize_hardware_bits(struct wm_softc *);
    669 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    670 static void	wm_reset_phy(struct wm_softc *);
    671 static void	wm_flush_desc_rings(struct wm_softc *);
    672 static void	wm_reset(struct wm_softc *);
    673 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    674 static void	wm_rxdrain(struct wm_rxqueue *);
    675 static void	wm_rss_getkey(uint8_t *);
    676 static void	wm_init_rss(struct wm_softc *);
    677 static void	wm_adjust_qnum(struct wm_softc *, int);
    678 static int	wm_setup_legacy(struct wm_softc *);
    679 static int	wm_setup_msix(struct wm_softc *);
    680 static int	wm_init(struct ifnet *);
    681 static int	wm_init_locked(struct ifnet *);
    682 static void	wm_turnon(struct wm_softc *);
    683 static void	wm_turnoff(struct wm_softc *);
    684 static void	wm_stop(struct ifnet *, int);
    685 static void	wm_stop_locked(struct ifnet *, int);
    686 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    687 static void	wm_82547_txfifo_stall(void *);
    688 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    689 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    690 /* DMA related */
    691 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    692 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    693 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    694 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    695     struct wm_txqueue *);
    696 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    697 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    698 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    699     struct wm_rxqueue *);
    700 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    701 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    702 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    703 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    704 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    705 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    706 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    707     struct wm_txqueue *);
    708 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    709     struct wm_rxqueue *);
    710 static int	wm_alloc_txrx_queues(struct wm_softc *);
    711 static void	wm_free_txrx_queues(struct wm_softc *);
    712 static int	wm_init_txrx_queues(struct wm_softc *);
    713 /* Start */
    714 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    715     uint32_t *, uint8_t *);
    716 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    717 static void	wm_start(struct ifnet *);
    718 static void	wm_start_locked(struct ifnet *);
    719 static int	wm_transmit(struct ifnet *, struct mbuf *);
    720 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    721 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    722 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    723     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    724 static void	wm_nq_start(struct ifnet *);
    725 static void	wm_nq_start_locked(struct ifnet *);
    726 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    727 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    728 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    729 static void	wm_deferred_start_locked(struct wm_txqueue *);
    730 static void	wm_handle_queue(void *);
    731 /* Interrupt */
    732 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    733 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    734 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    735 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    736 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    737 static void	wm_linkintr(struct wm_softc *, uint32_t);
    738 static int	wm_intr_legacy(void *);
    739 static inline void	wm_txrxintr_disable(struct wm_queue *);
    740 static inline void	wm_txrxintr_enable(struct wm_queue *);
    741 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    742 static int	wm_txrxintr_msix(void *);
    743 static int	wm_linkintr_msix(void *);
    744 
    745 /*
    746  * Media related.
    747  * GMII, SGMII, TBI, SERDES and SFP.
    748  */
    749 /* Common */
    750 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    751 /* GMII related */
    752 static void	wm_gmii_reset(struct wm_softc *);
    753 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    754 static int	wm_get_phy_id_82575(struct wm_softc *);
    755 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    756 static int	wm_gmii_mediachange(struct ifnet *);
    757 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    758 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    759 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    760 static int	wm_gmii_i82543_readreg(device_t, int, int);
    761 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    762 static int	wm_gmii_mdic_readreg(device_t, int, int);
    763 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    764 static int	wm_gmii_i82544_readreg(device_t, int, int);
    765 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    766 static int	wm_gmii_i80003_readreg(device_t, int, int);
    767 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    768 static int	wm_gmii_bm_readreg(device_t, int, int);
    769 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    770 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    771 static int	wm_gmii_hv_readreg(device_t, int, int);
    772 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    773 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    774 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    775 static int	wm_gmii_82580_readreg(device_t, int, int);
    776 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    777 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    778 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    779 static void	wm_gmii_statchg(struct ifnet *);
    780 /*
    781  * kumeran related (80003, ICH* and PCH*).
    782  * These functions are not for accessing MII registers but for accessing
    783  * kumeran specific registers.
    784  */
    785 static int	wm_kmrn_readreg(struct wm_softc *, int);
    786 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    787 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    788 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    789 /* SGMII */
    790 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    791 static int	wm_sgmii_readreg(device_t, int, int);
    792 static void	wm_sgmii_writereg(device_t, int, int, int);
    793 /* TBI related */
    794 static void	wm_tbi_mediainit(struct wm_softc *);
    795 static int	wm_tbi_mediachange(struct ifnet *);
    796 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    797 static int	wm_check_for_link(struct wm_softc *);
    798 static void	wm_tbi_tick(struct wm_softc *);
    799 /* SERDES related */
    800 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    801 static int	wm_serdes_mediachange(struct ifnet *);
    802 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    803 static void	wm_serdes_tick(struct wm_softc *);
    804 /* SFP related */
    805 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    806 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    807 
    808 /*
    809  * NVM related.
    810  * Microwire, SPI (w/wo EERD) and Flash.
    811  */
    812 /* Misc functions */
    813 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    814 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    815 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    816 /* Microwire */
    817 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    818 /* SPI */
    819 static int	wm_nvm_ready_spi(struct wm_softc *);
    820 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    821 /* Using with EERD */
    822 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    823 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    824 /* Flash */
    825 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    826     unsigned int *);
    827 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    828 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    829 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    830 	uint32_t *);
    831 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    832 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    833 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    834 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    835 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    836 /* iNVM */
    837 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    838 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    839 /* Lock, detecting NVM type, validate checksum and read */
    840 static int	wm_nvm_acquire(struct wm_softc *);
    841 static void	wm_nvm_release(struct wm_softc *);
    842 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    843 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    844 static int	wm_nvm_validate_checksum(struct wm_softc *);
    845 static void	wm_nvm_version_invm(struct wm_softc *);
    846 static void	wm_nvm_version(struct wm_softc *);
    847 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    848 
    849 /*
    850  * Hardware semaphores.
    851  * Very complexed...
    852  */
    853 static int	wm_get_null(struct wm_softc *);
    854 static void	wm_put_null(struct wm_softc *);
    855 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    856 static void	wm_put_swsm_semaphore(struct wm_softc *);
    857 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    858 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    859 static int	wm_get_phy_82575(struct wm_softc *);
    860 static void	wm_put_phy_82575(struct wm_softc *);
    861 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    862 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    863 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    864 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    865 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    866 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    867 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    868 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    869 
    870 /*
    871  * Management mode and power management related subroutines.
    872  * BMC, AMT, suspend/resume and EEE.
    873  */
    874 #if 0
    875 static int	wm_check_mng_mode(struct wm_softc *);
    876 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    877 static int	wm_check_mng_mode_82574(struct wm_softc *);
    878 static int	wm_check_mng_mode_generic(struct wm_softc *);
    879 #endif
    880 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    881 static bool	wm_phy_resetisblocked(struct wm_softc *);
    882 static void	wm_get_hw_control(struct wm_softc *);
    883 static void	wm_release_hw_control(struct wm_softc *);
    884 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    885 static void	wm_smbustopci(struct wm_softc *);
    886 static void	wm_init_manageability(struct wm_softc *);
    887 static void	wm_release_manageability(struct wm_softc *);
    888 static void	wm_get_wakeup(struct wm_softc *);
    889 static void	wm_ulp_disable(struct wm_softc *);
    890 static void	wm_enable_phy_wakeup(struct wm_softc *);
    891 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    892 static void	wm_enable_wakeup(struct wm_softc *);
    893 /* LPLU (Low Power Link Up) */
    894 static void	wm_lplu_d0_disable(struct wm_softc *);
    895 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    896 /* EEE */
    897 static void	wm_set_eee_i350(struct wm_softc *);
    898 
    899 /*
    900  * Workarounds (mainly PHY related).
    901  * Basically, PHY's workarounds are in the PHY drivers.
    902  */
    903 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    904 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    905 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    906 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    907 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    908 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    909 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    910 static void	wm_reset_init_script_82575(struct wm_softc *);
    911 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    912 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    913 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    914 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    915 static void	wm_pll_workaround_i210(struct wm_softc *);
    916 
    917 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    918     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    919 
    920 /*
    921  * Devices supported by this driver.
    922  */
    923 static const struct wm_product {
    924 	pci_vendor_id_t		wmp_vendor;
    925 	pci_product_id_t	wmp_product;
    926 	const char		*wmp_name;
    927 	wm_chip_type		wmp_type;
    928 	uint32_t		wmp_flags;
    929 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    930 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    931 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    932 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    933 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    934 } wm_products[] = {
    935 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    936 	  "Intel i82542 1000BASE-X Ethernet",
    937 	  WM_T_82542_2_1,	WMP_F_FIBER },
    938 
    939 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    940 	  "Intel i82543GC 1000BASE-X Ethernet",
    941 	  WM_T_82543,		WMP_F_FIBER },
    942 
    943 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    944 	  "Intel i82543GC 1000BASE-T Ethernet",
    945 	  WM_T_82543,		WMP_F_COPPER },
    946 
    947 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    948 	  "Intel i82544EI 1000BASE-T Ethernet",
    949 	  WM_T_82544,		WMP_F_COPPER },
    950 
    951 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    952 	  "Intel i82544EI 1000BASE-X Ethernet",
    953 	  WM_T_82544,		WMP_F_FIBER },
    954 
    955 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    956 	  "Intel i82544GC 1000BASE-T Ethernet",
    957 	  WM_T_82544,		WMP_F_COPPER },
    958 
    959 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    960 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    961 	  WM_T_82544,		WMP_F_COPPER },
    962 
    963 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    964 	  "Intel i82540EM 1000BASE-T Ethernet",
    965 	  WM_T_82540,		WMP_F_COPPER },
    966 
    967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    968 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    969 	  WM_T_82540,		WMP_F_COPPER },
    970 
    971 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    972 	  "Intel i82540EP 1000BASE-T Ethernet",
    973 	  WM_T_82540,		WMP_F_COPPER },
    974 
    975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    976 	  "Intel i82540EP 1000BASE-T Ethernet",
    977 	  WM_T_82540,		WMP_F_COPPER },
    978 
    979 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    980 	  "Intel i82540EP 1000BASE-T Ethernet",
    981 	  WM_T_82540,		WMP_F_COPPER },
    982 
    983 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    984 	  "Intel i82545EM 1000BASE-T Ethernet",
    985 	  WM_T_82545,		WMP_F_COPPER },
    986 
    987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    988 	  "Intel i82545GM 1000BASE-T Ethernet",
    989 	  WM_T_82545_3,		WMP_F_COPPER },
    990 
    991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    992 	  "Intel i82545GM 1000BASE-X Ethernet",
    993 	  WM_T_82545_3,		WMP_F_FIBER },
    994 
    995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    996 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    997 	  WM_T_82545_3,		WMP_F_SERDES },
    998 
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1000 	  "Intel i82546EB 1000BASE-T Ethernet",
   1001 	  WM_T_82546,		WMP_F_COPPER },
   1002 
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1004 	  "Intel i82546EB 1000BASE-T Ethernet",
   1005 	  WM_T_82546,		WMP_F_COPPER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1008 	  "Intel i82545EM 1000BASE-X Ethernet",
   1009 	  WM_T_82545,		WMP_F_FIBER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1012 	  "Intel i82546EB 1000BASE-X Ethernet",
   1013 	  WM_T_82546,		WMP_F_FIBER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1016 	  "Intel i82546GB 1000BASE-T Ethernet",
   1017 	  WM_T_82546_3,		WMP_F_COPPER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1020 	  "Intel i82546GB 1000BASE-X Ethernet",
   1021 	  WM_T_82546_3,		WMP_F_FIBER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1024 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1025 	  WM_T_82546_3,		WMP_F_SERDES },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1028 	  "i82546GB quad-port Gigabit Ethernet",
   1029 	  WM_T_82546_3,		WMP_F_COPPER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1032 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1033 	  WM_T_82546_3,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1036 	  "Intel PRO/1000MT (82546GB)",
   1037 	  WM_T_82546_3,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1040 	  "Intel i82541EI 1000BASE-T Ethernet",
   1041 	  WM_T_82541,		WMP_F_COPPER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1044 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1045 	  WM_T_82541,		WMP_F_COPPER },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1048 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1049 	  WM_T_82541,		WMP_F_COPPER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1052 	  "Intel i82541ER 1000BASE-T Ethernet",
   1053 	  WM_T_82541_2,		WMP_F_COPPER },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1056 	  "Intel i82541GI 1000BASE-T Ethernet",
   1057 	  WM_T_82541_2,		WMP_F_COPPER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1060 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1061 	  WM_T_82541_2,		WMP_F_COPPER },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1064 	  "Intel i82541PI 1000BASE-T Ethernet",
   1065 	  WM_T_82541_2,		WMP_F_COPPER },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1068 	  "Intel i82547EI 1000BASE-T Ethernet",
   1069 	  WM_T_82547,		WMP_F_COPPER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1072 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1073 	  WM_T_82547,		WMP_F_COPPER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1076 	  "Intel i82547GI 1000BASE-T Ethernet",
   1077 	  WM_T_82547_2,		WMP_F_COPPER },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1080 	  "Intel PRO/1000 PT (82571EB)",
   1081 	  WM_T_82571,		WMP_F_COPPER },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1084 	  "Intel PRO/1000 PF (82571EB)",
   1085 	  WM_T_82571,		WMP_F_FIBER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1088 	  "Intel PRO/1000 PB (82571EB)",
   1089 	  WM_T_82571,		WMP_F_SERDES },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1092 	  "Intel PRO/1000 QT (82571EB)",
   1093 	  WM_T_82571,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1096 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1097 	  WM_T_82571,		WMP_F_COPPER, },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1100 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1101 	  WM_T_82571,		WMP_F_COPPER, },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1104 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1105 	  WM_T_82571,		WMP_F_SERDES, },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1108 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1109 	  WM_T_82571,		WMP_F_SERDES, },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1112 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1113 	  WM_T_82571,		WMP_F_FIBER, },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1116 	  "Intel i82572EI 1000baseT Ethernet",
   1117 	  WM_T_82572,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1120 	  "Intel i82572EI 1000baseX Ethernet",
   1121 	  WM_T_82572,		WMP_F_FIBER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1124 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1125 	  WM_T_82572,		WMP_F_SERDES },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1128 	  "Intel i82572EI 1000baseT Ethernet",
   1129 	  WM_T_82572,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1132 	  "Intel i82573E",
   1133 	  WM_T_82573,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1136 	  "Intel i82573E IAMT",
   1137 	  WM_T_82573,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1140 	  "Intel i82573L Gigabit Ethernet",
   1141 	  WM_T_82573,		WMP_F_COPPER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1144 	  "Intel i82574L",
   1145 	  WM_T_82574,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1148 	  "Intel i82574L",
   1149 	  WM_T_82574,		WMP_F_COPPER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1152 	  "Intel i82583V",
   1153 	  WM_T_82583,		WMP_F_COPPER },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1156 	  "i80003 dual 1000baseT Ethernet",
   1157 	  WM_T_80003,		WMP_F_COPPER },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1160 	  "i80003 dual 1000baseX Ethernet",
   1161 	  WM_T_80003,		WMP_F_COPPER },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1164 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1165 	  WM_T_80003,		WMP_F_SERDES },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1168 	  "Intel i80003 1000baseT Ethernet",
   1169 	  WM_T_80003,		WMP_F_COPPER },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1172 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1173 	  WM_T_80003,		WMP_F_SERDES },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1176 	  "Intel i82801H (M_AMT) LAN Controller",
   1177 	  WM_T_ICH8,		WMP_F_COPPER },
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1179 	  "Intel i82801H (AMT) LAN Controller",
   1180 	  WM_T_ICH8,		WMP_F_COPPER },
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1182 	  "Intel i82801H LAN Controller",
   1183 	  WM_T_ICH8,		WMP_F_COPPER },
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1185 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1186 	  WM_T_ICH8,		WMP_F_COPPER },
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1188 	  "Intel i82801H (M) LAN Controller",
   1189 	  WM_T_ICH8,		WMP_F_COPPER },
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1191 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1192 	  WM_T_ICH8,		WMP_F_COPPER },
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1194 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1195 	  WM_T_ICH8,		WMP_F_COPPER },
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1197 	  "82567V-3 LAN Controller",
   1198 	  WM_T_ICH8,		WMP_F_COPPER },
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1200 	  "82801I (AMT) LAN Controller",
   1201 	  WM_T_ICH9,		WMP_F_COPPER },
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1203 	  "82801I 10/100 LAN Controller",
   1204 	  WM_T_ICH9,		WMP_F_COPPER },
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1206 	  "82801I (G) 10/100 LAN Controller",
   1207 	  WM_T_ICH9,		WMP_F_COPPER },
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1209 	  "82801I (GT) 10/100 LAN Controller",
   1210 	  WM_T_ICH9,		WMP_F_COPPER },
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1212 	  "82801I (C) LAN Controller",
   1213 	  WM_T_ICH9,		WMP_F_COPPER },
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1215 	  "82801I mobile LAN Controller",
   1216 	  WM_T_ICH9,		WMP_F_COPPER },
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1218 	  "82801I mobile (V) LAN Controller",
   1219 	  WM_T_ICH9,		WMP_F_COPPER },
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1221 	  "82801I mobile (AMT) LAN Controller",
   1222 	  WM_T_ICH9,		WMP_F_COPPER },
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1224 	  "82567LM-4 LAN Controller",
   1225 	  WM_T_ICH9,		WMP_F_COPPER },
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1227 	  "82567LM-2 LAN Controller",
   1228 	  WM_T_ICH10,		WMP_F_COPPER },
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1230 	  "82567LF-2 LAN Controller",
   1231 	  WM_T_ICH10,		WMP_F_COPPER },
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1233 	  "82567LM-3 LAN Controller",
   1234 	  WM_T_ICH10,		WMP_F_COPPER },
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1236 	  "82567LF-3 LAN Controller",
   1237 	  WM_T_ICH10,		WMP_F_COPPER },
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1239 	  "82567V-2 LAN Controller",
   1240 	  WM_T_ICH10,		WMP_F_COPPER },
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1242 	  "82567V-3? LAN Controller",
   1243 	  WM_T_ICH10,		WMP_F_COPPER },
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1245 	  "HANKSVILLE LAN Controller",
   1246 	  WM_T_ICH10,		WMP_F_COPPER },
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1248 	  "PCH LAN (82577LM) Controller",
   1249 	  WM_T_PCH,		WMP_F_COPPER },
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1251 	  "PCH LAN (82577LC) Controller",
   1252 	  WM_T_PCH,		WMP_F_COPPER },
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1254 	  "PCH LAN (82578DM) Controller",
   1255 	  WM_T_PCH,		WMP_F_COPPER },
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1257 	  "PCH LAN (82578DC) Controller",
   1258 	  WM_T_PCH,		WMP_F_COPPER },
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1260 	  "PCH2 LAN (82579LM) Controller",
   1261 	  WM_T_PCH2,		WMP_F_COPPER },
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1263 	  "PCH2 LAN (82579V) Controller",
   1264 	  WM_T_PCH2,		WMP_F_COPPER },
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1266 	  "82575EB dual-1000baseT Ethernet",
   1267 	  WM_T_82575,		WMP_F_COPPER },
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1269 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1270 	  WM_T_82575,		WMP_F_SERDES },
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1272 	  "82575GB quad-1000baseT Ethernet",
   1273 	  WM_T_82575,		WMP_F_COPPER },
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1275 	  "82575GB quad-1000baseT Ethernet (PM)",
   1276 	  WM_T_82575,		WMP_F_COPPER },
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1278 	  "82576 1000BaseT Ethernet",
   1279 	  WM_T_82576,		WMP_F_COPPER },
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1281 	  "82576 1000BaseX Ethernet",
   1282 	  WM_T_82576,		WMP_F_FIBER },
   1283 
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1285 	  "82576 gigabit Ethernet (SERDES)",
   1286 	  WM_T_82576,		WMP_F_SERDES },
   1287 
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1289 	  "82576 quad-1000BaseT Ethernet",
   1290 	  WM_T_82576,		WMP_F_COPPER },
   1291 
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1293 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1294 	  WM_T_82576,		WMP_F_COPPER },
   1295 
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1297 	  "82576 gigabit Ethernet",
   1298 	  WM_T_82576,		WMP_F_COPPER },
   1299 
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1301 	  "82576 gigabit Ethernet (SERDES)",
   1302 	  WM_T_82576,		WMP_F_SERDES },
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1304 	  "82576 quad-gigabit Ethernet (SERDES)",
   1305 	  WM_T_82576,		WMP_F_SERDES },
   1306 
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1308 	  "82580 1000BaseT Ethernet",
   1309 	  WM_T_82580,		WMP_F_COPPER },
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1311 	  "82580 1000BaseX Ethernet",
   1312 	  WM_T_82580,		WMP_F_FIBER },
   1313 
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1315 	  "82580 1000BaseT Ethernet (SERDES)",
   1316 	  WM_T_82580,		WMP_F_SERDES },
   1317 
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1319 	  "82580 gigabit Ethernet (SGMII)",
   1320 	  WM_T_82580,		WMP_F_COPPER },
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1322 	  "82580 dual-1000BaseT Ethernet",
   1323 	  WM_T_82580,		WMP_F_COPPER },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1326 	  "82580 quad-1000BaseX Ethernet",
   1327 	  WM_T_82580,		WMP_F_FIBER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1330 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1331 	  WM_T_82580,		WMP_F_COPPER },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1334 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1335 	  WM_T_82580,		WMP_F_SERDES },
   1336 
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1338 	  "DH89XXCC 1000BASE-KX Ethernet",
   1339 	  WM_T_82580,		WMP_F_SERDES },
   1340 
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1342 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1343 	  WM_T_82580,		WMP_F_SERDES },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1346 	  "I350 Gigabit Network Connection",
   1347 	  WM_T_I350,		WMP_F_COPPER },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1350 	  "I350 Gigabit Fiber Network Connection",
   1351 	  WM_T_I350,		WMP_F_FIBER },
   1352 
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1354 	  "I350 Gigabit Backplane Connection",
   1355 	  WM_T_I350,		WMP_F_SERDES },
   1356 
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1358 	  "I350 Quad Port Gigabit Ethernet",
   1359 	  WM_T_I350,		WMP_F_SERDES },
   1360 
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1362 	  "I350 Gigabit Connection",
   1363 	  WM_T_I350,		WMP_F_COPPER },
   1364 
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1366 	  "I354 Gigabit Ethernet (KX)",
   1367 	  WM_T_I354,		WMP_F_SERDES },
   1368 
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1370 	  "I354 Gigabit Ethernet (SGMII)",
   1371 	  WM_T_I354,		WMP_F_COPPER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1374 	  "I354 Gigabit Ethernet (2.5G)",
   1375 	  WM_T_I354,		WMP_F_COPPER },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1378 	  "I210-T1 Ethernet Server Adapter",
   1379 	  WM_T_I210,		WMP_F_COPPER },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1382 	  "I210 Ethernet (Copper OEM)",
   1383 	  WM_T_I210,		WMP_F_COPPER },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1386 	  "I210 Ethernet (Copper IT)",
   1387 	  WM_T_I210,		WMP_F_COPPER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1390 	  "I210 Ethernet (FLASH less)",
   1391 	  WM_T_I210,		WMP_F_COPPER },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1394 	  "I210 Gigabit Ethernet (Fiber)",
   1395 	  WM_T_I210,		WMP_F_FIBER },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1398 	  "I210 Gigabit Ethernet (SERDES)",
   1399 	  WM_T_I210,		WMP_F_SERDES },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1402 	  "I210 Gigabit Ethernet (FLASH less)",
   1403 	  WM_T_I210,		WMP_F_SERDES },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1406 	  "I210 Gigabit Ethernet (SGMII)",
   1407 	  WM_T_I210,		WMP_F_COPPER },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1410 	  "I211 Ethernet (COPPER)",
   1411 	  WM_T_I211,		WMP_F_COPPER },
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1413 	  "I217 V Ethernet Connection",
   1414 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1416 	  "I217 LM Ethernet Connection",
   1417 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1419 	  "I218 V Ethernet Connection",
   1420 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1422 	  "I218 V Ethernet Connection",
   1423 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1425 	  "I218 V Ethernet Connection",
   1426 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1427 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1428 	  "I218 LM Ethernet Connection",
   1429 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1431 	  "I218 LM Ethernet Connection",
   1432 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1434 	  "I218 LM Ethernet Connection",
   1435 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1436 #if 0
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1438 	  "I219 V Ethernet Connection",
   1439 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1441 	  "I219 V Ethernet Connection",
   1442 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1444 	  "I219 V Ethernet Connection",
   1445 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1447 	  "I219 V Ethernet Connection",
   1448 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1450 	  "I219 LM Ethernet Connection",
   1451 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1453 	  "I219 LM Ethernet Connection",
   1454 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1456 	  "I219 LM Ethernet Connection",
   1457 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1459 	  "I219 LM Ethernet Connection",
   1460 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1462 	  "I219 LM Ethernet Connection",
   1463 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1464 #endif
   1465 	{ 0,			0,
   1466 	  NULL,
   1467 	  0,			0 },
   1468 };
   1469 
   1470 /*
   1471  * Register read/write functions.
   1472  * Other than CSR_{READ|WRITE}().
   1473  */
   1474 
   1475 #if 0 /* Not currently used */
   1476 static inline uint32_t
   1477 wm_io_read(struct wm_softc *sc, int reg)
   1478 {
   1479 
   1480 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1481 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1482 }
   1483 #endif
   1484 
   1485 static inline void
   1486 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1487 {
   1488 
   1489 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1490 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1491 }
   1492 
   1493 static inline void
   1494 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1495     uint32_t data)
   1496 {
   1497 	uint32_t regval;
   1498 	int i;
   1499 
   1500 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1501 
   1502 	CSR_WRITE(sc, reg, regval);
   1503 
   1504 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1505 		delay(5);
   1506 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1507 			break;
   1508 	}
   1509 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1510 		aprint_error("%s: WARNING:"
   1511 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1512 		    device_xname(sc->sc_dev), reg);
   1513 	}
   1514 }
   1515 
   1516 static inline void
   1517 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1518 {
   1519 	wa->wa_low = htole32(v & 0xffffffffU);
   1520 	if (sizeof(bus_addr_t) == 8)
   1521 		wa->wa_high = htole32((uint64_t) v >> 32);
   1522 	else
   1523 		wa->wa_high = 0;
   1524 }
   1525 
   1526 /*
   1527  * Descriptor sync/init functions.
   1528  */
   1529 static inline void
   1530 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1531 {
   1532 	struct wm_softc *sc = txq->txq_sc;
   1533 
   1534 	/* If it will wrap around, sync to the end of the ring. */
   1535 	if ((start + num) > WM_NTXDESC(txq)) {
   1536 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1537 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1538 		    (WM_NTXDESC(txq) - start), ops);
   1539 		num -= (WM_NTXDESC(txq) - start);
   1540 		start = 0;
   1541 	}
   1542 
   1543 	/* Now sync whatever is left. */
   1544 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1545 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1546 }
   1547 
   1548 static inline void
   1549 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1550 {
   1551 	struct wm_softc *sc = rxq->rxq_sc;
   1552 
   1553 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1554 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1555 }
   1556 
   1557 static inline void
   1558 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1559 {
   1560 	struct wm_softc *sc = rxq->rxq_sc;
   1561 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1562 	struct mbuf *m = rxs->rxs_mbuf;
   1563 
   1564 	/*
   1565 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1566 	 * so that the payload after the Ethernet header is aligned
   1567 	 * to a 4-byte boundary.
   1568 
   1569 	 * XXX BRAINDAMAGE ALERT!
   1570 	 * The stupid chip uses the same size for every buffer, which
   1571 	 * is set in the Receive Control register.  We are using the 2K
   1572 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1573 	 * reason, we can't "scoot" packets longer than the standard
   1574 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1575 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1576 	 * the upper layer copy the headers.
   1577 	 */
   1578 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1579 
   1580 	if (sc->sc_type == WM_T_82574) {
   1581 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1582 		rxd->erx_data.erxd_addr =
   1583 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1584 		rxd->erx_data.erxd_dd = 0;
   1585 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1586 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1587 
   1588 		rxd->nqrx_data.nrxd_paddr =
   1589 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1590 		/* Currently, split header is not supported. */
   1591 		rxd->nqrx_data.nrxd_haddr = 0;
   1592 	} else {
   1593 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1594 
   1595 		wm_set_dma_addr(&rxd->wrx_addr,
   1596 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1597 		rxd->wrx_len = 0;
   1598 		rxd->wrx_cksum = 0;
   1599 		rxd->wrx_status = 0;
   1600 		rxd->wrx_errors = 0;
   1601 		rxd->wrx_special = 0;
   1602 	}
   1603 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1604 
   1605 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1606 }
   1607 
   1608 /*
   1609  * Device driver interface functions and commonly used functions.
   1610  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1611  */
   1612 
   1613 /* Lookup supported device table */
   1614 static const struct wm_product *
   1615 wm_lookup(const struct pci_attach_args *pa)
   1616 {
   1617 	const struct wm_product *wmp;
   1618 
   1619 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1620 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1621 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1622 			return wmp;
   1623 	}
   1624 	return NULL;
   1625 }
   1626 
   1627 /* The match function (ca_match) */
   1628 static int
   1629 wm_match(device_t parent, cfdata_t cf, void *aux)
   1630 {
   1631 	struct pci_attach_args *pa = aux;
   1632 
   1633 	if (wm_lookup(pa) != NULL)
   1634 		return 1;
   1635 
   1636 	return 0;
   1637 }
   1638 
   1639 /* The attach function (ca_attach) */
   1640 static void
   1641 wm_attach(device_t parent, device_t self, void *aux)
   1642 {
   1643 	struct wm_softc *sc = device_private(self);
   1644 	struct pci_attach_args *pa = aux;
   1645 	prop_dictionary_t dict;
   1646 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1647 	pci_chipset_tag_t pc = pa->pa_pc;
   1648 	int counts[PCI_INTR_TYPE_SIZE];
   1649 	pci_intr_type_t max_type;
   1650 	const char *eetype, *xname;
   1651 	bus_space_tag_t memt;
   1652 	bus_space_handle_t memh;
   1653 	bus_size_t memsize;
   1654 	int memh_valid;
   1655 	int i, error;
   1656 	const struct wm_product *wmp;
   1657 	prop_data_t ea;
   1658 	prop_number_t pn;
   1659 	uint8_t enaddr[ETHER_ADDR_LEN];
   1660 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1661 	pcireg_t preg, memtype;
   1662 	uint16_t eeprom_data, apme_mask;
   1663 	bool force_clear_smbi;
   1664 	uint32_t link_mode;
   1665 	uint32_t reg;
   1666 
   1667 	sc->sc_dev = self;
   1668 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1669 	sc->sc_core_stopping = false;
   1670 
   1671 	wmp = wm_lookup(pa);
   1672 #ifdef DIAGNOSTIC
   1673 	if (wmp == NULL) {
   1674 		printf("\n");
   1675 		panic("wm_attach: impossible");
   1676 	}
   1677 #endif
   1678 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1679 
   1680 	sc->sc_pc = pa->pa_pc;
   1681 	sc->sc_pcitag = pa->pa_tag;
   1682 
   1683 	if (pci_dma64_available(pa))
   1684 		sc->sc_dmat = pa->pa_dmat64;
   1685 	else
   1686 		sc->sc_dmat = pa->pa_dmat;
   1687 
   1688 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1689 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1690 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1691 
   1692 	sc->sc_type = wmp->wmp_type;
   1693 
   1694 	/* Set default function pointers */
   1695 	sc->phy.acquire = wm_get_null;
   1696 	sc->phy.release = wm_put_null;
   1697 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1698 
   1699 	if (sc->sc_type < WM_T_82543) {
   1700 		if (sc->sc_rev < 2) {
   1701 			aprint_error_dev(sc->sc_dev,
   1702 			    "i82542 must be at least rev. 2\n");
   1703 			return;
   1704 		}
   1705 		if (sc->sc_rev < 3)
   1706 			sc->sc_type = WM_T_82542_2_0;
   1707 	}
   1708 
   1709 	/*
   1710 	 * Disable MSI for Errata:
   1711 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1712 	 *
   1713 	 *  82544: Errata 25
   1714 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1715 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1716 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1717 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1718 	 *
   1719 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1720 	 *
   1721 	 *  82571 & 82572: Errata 63
   1722 	 */
   1723 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1724 	    || (sc->sc_type == WM_T_82572))
   1725 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1726 
   1727 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1728 	    || (sc->sc_type == WM_T_82580)
   1729 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1730 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1731 		sc->sc_flags |= WM_F_NEWQUEUE;
   1732 
   1733 	/* Set device properties (mactype) */
   1734 	dict = device_properties(sc->sc_dev);
   1735 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1736 
   1737 	/*
   1738 	 * Map the device.  All devices support memory-mapped acccess,
   1739 	 * and it is really required for normal operation.
   1740 	 */
   1741 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1742 	switch (memtype) {
   1743 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1744 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1745 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1746 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1747 		break;
   1748 	default:
   1749 		memh_valid = 0;
   1750 		break;
   1751 	}
   1752 
   1753 	if (memh_valid) {
   1754 		sc->sc_st = memt;
   1755 		sc->sc_sh = memh;
   1756 		sc->sc_ss = memsize;
   1757 	} else {
   1758 		aprint_error_dev(sc->sc_dev,
   1759 		    "unable to map device registers\n");
   1760 		return;
   1761 	}
   1762 
   1763 	/*
   1764 	 * In addition, i82544 and later support I/O mapped indirect
   1765 	 * register access.  It is not desirable (nor supported in
   1766 	 * this driver) to use it for normal operation, though it is
   1767 	 * required to work around bugs in some chip versions.
   1768 	 */
   1769 	if (sc->sc_type >= WM_T_82544) {
   1770 		/* First we have to find the I/O BAR. */
   1771 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1772 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1773 			if (memtype == PCI_MAPREG_TYPE_IO)
   1774 				break;
   1775 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1776 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1777 				i += 4;	/* skip high bits, too */
   1778 		}
   1779 		if (i < PCI_MAPREG_END) {
   1780 			/*
   1781 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1782 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1783 			 * It's no problem because newer chips has no this
   1784 			 * bug.
   1785 			 *
   1786 			 * The i8254x doesn't apparently respond when the
   1787 			 * I/O BAR is 0, which looks somewhat like it's not
   1788 			 * been configured.
   1789 			 */
   1790 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1791 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1792 				aprint_error_dev(sc->sc_dev,
   1793 				    "WARNING: I/O BAR at zero.\n");
   1794 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1795 					0, &sc->sc_iot, &sc->sc_ioh,
   1796 					NULL, &sc->sc_ios) == 0) {
   1797 				sc->sc_flags |= WM_F_IOH_VALID;
   1798 			} else {
   1799 				aprint_error_dev(sc->sc_dev,
   1800 				    "WARNING: unable to map I/O space\n");
   1801 			}
   1802 		}
   1803 
   1804 	}
   1805 
   1806 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1807 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1808 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1809 	if (sc->sc_type < WM_T_82542_2_1)
   1810 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1811 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1812 
   1813 	/* power up chip */
   1814 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1815 	    NULL)) && error != EOPNOTSUPP) {
   1816 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1817 		return;
   1818 	}
   1819 
   1820 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1821 
   1822 	/* Allocation settings */
   1823 	max_type = PCI_INTR_TYPE_MSIX;
   1824 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1825 	counts[PCI_INTR_TYPE_MSI] = 1;
   1826 	counts[PCI_INTR_TYPE_INTX] = 1;
   1827 
   1828 alloc_retry:
   1829 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1830 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1831 		return;
   1832 	}
   1833 
   1834 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1835 		error = wm_setup_msix(sc);
   1836 		if (error) {
   1837 			pci_intr_release(pc, sc->sc_intrs,
   1838 			    counts[PCI_INTR_TYPE_MSIX]);
   1839 
   1840 			/* Setup for MSI: Disable MSI-X */
   1841 			max_type = PCI_INTR_TYPE_MSI;
   1842 			counts[PCI_INTR_TYPE_MSI] = 1;
   1843 			counts[PCI_INTR_TYPE_INTX] = 1;
   1844 			goto alloc_retry;
   1845 		}
   1846 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1847 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1848 		error = wm_setup_legacy(sc);
   1849 		if (error) {
   1850 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1851 			    counts[PCI_INTR_TYPE_MSI]);
   1852 
   1853 			/* The next try is for INTx: Disable MSI */
   1854 			max_type = PCI_INTR_TYPE_INTX;
   1855 			counts[PCI_INTR_TYPE_INTX] = 1;
   1856 			goto alloc_retry;
   1857 		}
   1858 	} else {
   1859 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1860 		error = wm_setup_legacy(sc);
   1861 		if (error) {
   1862 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1863 			    counts[PCI_INTR_TYPE_INTX]);
   1864 			return;
   1865 		}
   1866 	}
   1867 
   1868 	/*
   1869 	 * Check the function ID (unit number of the chip).
   1870 	 */
   1871 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1872 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1873 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1874 	    || (sc->sc_type == WM_T_82580)
   1875 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1876 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1877 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1878 	else
   1879 		sc->sc_funcid = 0;
   1880 
   1881 	/*
   1882 	 * Determine a few things about the bus we're connected to.
   1883 	 */
   1884 	if (sc->sc_type < WM_T_82543) {
   1885 		/* We don't really know the bus characteristics here. */
   1886 		sc->sc_bus_speed = 33;
   1887 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1888 		/*
   1889 		 * CSA (Communication Streaming Architecture) is about as fast
   1890 		 * a 32-bit 66MHz PCI Bus.
   1891 		 */
   1892 		sc->sc_flags |= WM_F_CSA;
   1893 		sc->sc_bus_speed = 66;
   1894 		aprint_verbose_dev(sc->sc_dev,
   1895 		    "Communication Streaming Architecture\n");
   1896 		if (sc->sc_type == WM_T_82547) {
   1897 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1898 			callout_setfunc(&sc->sc_txfifo_ch,
   1899 					wm_82547_txfifo_stall, sc);
   1900 			aprint_verbose_dev(sc->sc_dev,
   1901 			    "using 82547 Tx FIFO stall work-around\n");
   1902 		}
   1903 	} else if (sc->sc_type >= WM_T_82571) {
   1904 		sc->sc_flags |= WM_F_PCIE;
   1905 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1906 		    && (sc->sc_type != WM_T_ICH10)
   1907 		    && (sc->sc_type != WM_T_PCH)
   1908 		    && (sc->sc_type != WM_T_PCH2)
   1909 		    && (sc->sc_type != WM_T_PCH_LPT)
   1910 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1911 			/* ICH* and PCH* have no PCIe capability registers */
   1912 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1913 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1914 				NULL) == 0)
   1915 				aprint_error_dev(sc->sc_dev,
   1916 				    "unable to find PCIe capability\n");
   1917 		}
   1918 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1919 	} else {
   1920 		reg = CSR_READ(sc, WMREG_STATUS);
   1921 		if (reg & STATUS_BUS64)
   1922 			sc->sc_flags |= WM_F_BUS64;
   1923 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1924 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1925 
   1926 			sc->sc_flags |= WM_F_PCIX;
   1927 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1928 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1929 				aprint_error_dev(sc->sc_dev,
   1930 				    "unable to find PCIX capability\n");
   1931 			else if (sc->sc_type != WM_T_82545_3 &&
   1932 				 sc->sc_type != WM_T_82546_3) {
   1933 				/*
   1934 				 * Work around a problem caused by the BIOS
   1935 				 * setting the max memory read byte count
   1936 				 * incorrectly.
   1937 				 */
   1938 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1939 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1940 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1941 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1942 
   1943 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1944 				    PCIX_CMD_BYTECNT_SHIFT;
   1945 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1946 				    PCIX_STATUS_MAXB_SHIFT;
   1947 				if (bytecnt > maxb) {
   1948 					aprint_verbose_dev(sc->sc_dev,
   1949 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1950 					    512 << bytecnt, 512 << maxb);
   1951 					pcix_cmd = (pcix_cmd &
   1952 					    ~PCIX_CMD_BYTECNT_MASK) |
   1953 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1954 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1955 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1956 					    pcix_cmd);
   1957 				}
   1958 			}
   1959 		}
   1960 		/*
   1961 		 * The quad port adapter is special; it has a PCIX-PCIX
   1962 		 * bridge on the board, and can run the secondary bus at
   1963 		 * a higher speed.
   1964 		 */
   1965 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1966 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1967 								      : 66;
   1968 		} else if (sc->sc_flags & WM_F_PCIX) {
   1969 			switch (reg & STATUS_PCIXSPD_MASK) {
   1970 			case STATUS_PCIXSPD_50_66:
   1971 				sc->sc_bus_speed = 66;
   1972 				break;
   1973 			case STATUS_PCIXSPD_66_100:
   1974 				sc->sc_bus_speed = 100;
   1975 				break;
   1976 			case STATUS_PCIXSPD_100_133:
   1977 				sc->sc_bus_speed = 133;
   1978 				break;
   1979 			default:
   1980 				aprint_error_dev(sc->sc_dev,
   1981 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1982 				    reg & STATUS_PCIXSPD_MASK);
   1983 				sc->sc_bus_speed = 66;
   1984 				break;
   1985 			}
   1986 		} else
   1987 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1988 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1989 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1990 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1991 	}
   1992 
   1993 	/* clear interesting stat counters */
   1994 	CSR_READ(sc, WMREG_COLC);
   1995 	CSR_READ(sc, WMREG_RXERRC);
   1996 
   1997 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   1998 	    || (sc->sc_type >= WM_T_ICH8))
   1999 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2000 	if (sc->sc_type >= WM_T_ICH8)
   2001 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2002 
   2003 	/* Set PHY, NVM mutex related stuff */
   2004 	switch (sc->sc_type) {
   2005 	case WM_T_82542_2_0:
   2006 	case WM_T_82542_2_1:
   2007 	case WM_T_82543:
   2008 	case WM_T_82544:
   2009 		/* Microwire */
   2010 		sc->sc_nvm_wordsize = 64;
   2011 		sc->sc_nvm_addrbits = 6;
   2012 		break;
   2013 	case WM_T_82540:
   2014 	case WM_T_82545:
   2015 	case WM_T_82545_3:
   2016 	case WM_T_82546:
   2017 	case WM_T_82546_3:
   2018 		/* Microwire */
   2019 		reg = CSR_READ(sc, WMREG_EECD);
   2020 		if (reg & EECD_EE_SIZE) {
   2021 			sc->sc_nvm_wordsize = 256;
   2022 			sc->sc_nvm_addrbits = 8;
   2023 		} else {
   2024 			sc->sc_nvm_wordsize = 64;
   2025 			sc->sc_nvm_addrbits = 6;
   2026 		}
   2027 		sc->sc_flags |= WM_F_LOCK_EECD;
   2028 		break;
   2029 	case WM_T_82541:
   2030 	case WM_T_82541_2:
   2031 	case WM_T_82547:
   2032 	case WM_T_82547_2:
   2033 		sc->sc_flags |= WM_F_LOCK_EECD;
   2034 		reg = CSR_READ(sc, WMREG_EECD);
   2035 		if (reg & EECD_EE_TYPE) {
   2036 			/* SPI */
   2037 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2038 			wm_nvm_set_addrbits_size_eecd(sc);
   2039 		} else {
   2040 			/* Microwire */
   2041 			if ((reg & EECD_EE_ABITS) != 0) {
   2042 				sc->sc_nvm_wordsize = 256;
   2043 				sc->sc_nvm_addrbits = 8;
   2044 			} else {
   2045 				sc->sc_nvm_wordsize = 64;
   2046 				sc->sc_nvm_addrbits = 6;
   2047 			}
   2048 		}
   2049 		break;
   2050 	case WM_T_82571:
   2051 	case WM_T_82572:
   2052 		/* SPI */
   2053 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2054 		wm_nvm_set_addrbits_size_eecd(sc);
   2055 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2056 		sc->phy.acquire = wm_get_swsm_semaphore;
   2057 		sc->phy.release = wm_put_swsm_semaphore;
   2058 		break;
   2059 	case WM_T_82573:
   2060 	case WM_T_82574:
   2061 	case WM_T_82583:
   2062 		if (sc->sc_type == WM_T_82573) {
   2063 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2064 			sc->phy.acquire = wm_get_swsm_semaphore;
   2065 			sc->phy.release = wm_put_swsm_semaphore;
   2066 		} else {
   2067 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2068 			/* Both PHY and NVM use the same semaphore. */
   2069 			sc->phy.acquire
   2070 			    = wm_get_swfwhw_semaphore;
   2071 			sc->phy.release
   2072 			    = wm_put_swfwhw_semaphore;
   2073 		}
   2074 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2075 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2076 			sc->sc_nvm_wordsize = 2048;
   2077 		} else {
   2078 			/* SPI */
   2079 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2080 			wm_nvm_set_addrbits_size_eecd(sc);
   2081 		}
   2082 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2083 		break;
   2084 	case WM_T_82575:
   2085 	case WM_T_82576:
   2086 	case WM_T_82580:
   2087 	case WM_T_I350:
   2088 	case WM_T_I354:
   2089 	case WM_T_80003:
   2090 		/* SPI */
   2091 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2092 		wm_nvm_set_addrbits_size_eecd(sc);
   2093 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2094 		    | WM_F_LOCK_SWSM;
   2095 		sc->phy.acquire = wm_get_phy_82575;
   2096 		sc->phy.release = wm_put_phy_82575;
   2097 		break;
   2098 	case WM_T_ICH8:
   2099 	case WM_T_ICH9:
   2100 	case WM_T_ICH10:
   2101 	case WM_T_PCH:
   2102 	case WM_T_PCH2:
   2103 	case WM_T_PCH_LPT:
   2104 		/* FLASH */
   2105 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2106 		sc->sc_nvm_wordsize = 2048;
   2107 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2108 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2109 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2110 			aprint_error_dev(sc->sc_dev,
   2111 			    "can't map FLASH registers\n");
   2112 			goto out;
   2113 		}
   2114 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2115 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2116 		    ICH_FLASH_SECTOR_SIZE;
   2117 		sc->sc_ich8_flash_bank_size =
   2118 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2119 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2120 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2121 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2122 		sc->sc_flashreg_offset = 0;
   2123 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2124 		sc->phy.release = wm_put_swflag_ich8lan;
   2125 		break;
   2126 	case WM_T_PCH_SPT:
   2127 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2128 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2129 		sc->sc_flasht = sc->sc_st;
   2130 		sc->sc_flashh = sc->sc_sh;
   2131 		sc->sc_ich8_flash_base = 0;
   2132 		sc->sc_nvm_wordsize =
   2133 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2134 			* NVM_SIZE_MULTIPLIER;
   2135 		/* It is size in bytes, we want words */
   2136 		sc->sc_nvm_wordsize /= 2;
   2137 		/* assume 2 banks */
   2138 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2139 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2140 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2141 		sc->phy.release = wm_put_swflag_ich8lan;
   2142 		break;
   2143 	case WM_T_I210:
   2144 	case WM_T_I211:
   2145 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2146 			wm_nvm_set_addrbits_size_eecd(sc);
   2147 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2148 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2149 		} else {
   2150 			sc->sc_nvm_wordsize = INVM_SIZE;
   2151 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2152 		}
   2153 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2154 		sc->phy.acquire = wm_get_phy_82575;
   2155 		sc->phy.release = wm_put_phy_82575;
   2156 		break;
   2157 	default:
   2158 		break;
   2159 	}
   2160 
   2161 	/* Reset the chip to a known state. */
   2162 	wm_reset(sc);
   2163 
   2164 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2165 	switch (sc->sc_type) {
   2166 	case WM_T_82571:
   2167 	case WM_T_82572:
   2168 		reg = CSR_READ(sc, WMREG_SWSM2);
   2169 		if ((reg & SWSM2_LOCK) == 0) {
   2170 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2171 			force_clear_smbi = true;
   2172 		} else
   2173 			force_clear_smbi = false;
   2174 		break;
   2175 	case WM_T_82573:
   2176 	case WM_T_82574:
   2177 	case WM_T_82583:
   2178 		force_clear_smbi = true;
   2179 		break;
   2180 	default:
   2181 		force_clear_smbi = false;
   2182 		break;
   2183 	}
   2184 	if (force_clear_smbi) {
   2185 		reg = CSR_READ(sc, WMREG_SWSM);
   2186 		if ((reg & SWSM_SMBI) != 0)
   2187 			aprint_error_dev(sc->sc_dev,
   2188 			    "Please update the Bootagent\n");
   2189 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2190 	}
   2191 
   2192 	/*
   2193 	 * Defer printing the EEPROM type until after verifying the checksum
   2194 	 * This allows the EEPROM type to be printed correctly in the case
   2195 	 * that no EEPROM is attached.
   2196 	 */
   2197 	/*
   2198 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2199 	 * this for later, so we can fail future reads from the EEPROM.
   2200 	 */
   2201 	if (wm_nvm_validate_checksum(sc)) {
   2202 		/*
   2203 		 * Read twice again because some PCI-e parts fail the
   2204 		 * first check due to the link being in sleep state.
   2205 		 */
   2206 		if (wm_nvm_validate_checksum(sc))
   2207 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2208 	}
   2209 
   2210 	/* Set device properties (macflags) */
   2211 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2212 
   2213 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2214 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2215 	else {
   2216 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2217 		    sc->sc_nvm_wordsize);
   2218 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2219 			aprint_verbose("iNVM");
   2220 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2221 			aprint_verbose("FLASH(HW)");
   2222 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2223 			aprint_verbose("FLASH");
   2224 		else {
   2225 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2226 				eetype = "SPI";
   2227 			else
   2228 				eetype = "MicroWire";
   2229 			aprint_verbose("(%d address bits) %s EEPROM",
   2230 			    sc->sc_nvm_addrbits, eetype);
   2231 		}
   2232 	}
   2233 	wm_nvm_version(sc);
   2234 	aprint_verbose("\n");
   2235 
   2236 	/* Check for I21[01] PLL workaround */
   2237 	if (sc->sc_type == WM_T_I210)
   2238 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2239 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2240 		/* NVM image release 3.25 has a workaround */
   2241 		if ((sc->sc_nvm_ver_major < 3)
   2242 		    || ((sc->sc_nvm_ver_major == 3)
   2243 			&& (sc->sc_nvm_ver_minor < 25))) {
   2244 			aprint_verbose_dev(sc->sc_dev,
   2245 			    "ROM image version %d.%d is older than 3.25\n",
   2246 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2247 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2248 		}
   2249 	}
   2250 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2251 		wm_pll_workaround_i210(sc);
   2252 
   2253 	wm_get_wakeup(sc);
   2254 
   2255 	/* Non-AMT based hardware can now take control from firmware */
   2256 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2257 		wm_get_hw_control(sc);
   2258 
   2259 	/*
   2260 	 * Read the Ethernet address from the EEPROM, if not first found
   2261 	 * in device properties.
   2262 	 */
   2263 	ea = prop_dictionary_get(dict, "mac-address");
   2264 	if (ea != NULL) {
   2265 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2266 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2267 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2268 	} else {
   2269 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2270 			aprint_error_dev(sc->sc_dev,
   2271 			    "unable to read Ethernet address\n");
   2272 			goto out;
   2273 		}
   2274 	}
   2275 
   2276 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2277 	    ether_sprintf(enaddr));
   2278 
   2279 	/*
   2280 	 * Read the config info from the EEPROM, and set up various
   2281 	 * bits in the control registers based on their contents.
   2282 	 */
   2283 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2284 	if (pn != NULL) {
   2285 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2286 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2287 	} else {
   2288 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2289 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2290 			goto out;
   2291 		}
   2292 	}
   2293 
   2294 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2295 	if (pn != NULL) {
   2296 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2297 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2298 	} else {
   2299 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2300 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2301 			goto out;
   2302 		}
   2303 	}
   2304 
   2305 	/* check for WM_F_WOL */
   2306 	switch (sc->sc_type) {
   2307 	case WM_T_82542_2_0:
   2308 	case WM_T_82542_2_1:
   2309 	case WM_T_82543:
   2310 		/* dummy? */
   2311 		eeprom_data = 0;
   2312 		apme_mask = NVM_CFG3_APME;
   2313 		break;
   2314 	case WM_T_82544:
   2315 		apme_mask = NVM_CFG2_82544_APM_EN;
   2316 		eeprom_data = cfg2;
   2317 		break;
   2318 	case WM_T_82546:
   2319 	case WM_T_82546_3:
   2320 	case WM_T_82571:
   2321 	case WM_T_82572:
   2322 	case WM_T_82573:
   2323 	case WM_T_82574:
   2324 	case WM_T_82583:
   2325 	case WM_T_80003:
   2326 	default:
   2327 		apme_mask = NVM_CFG3_APME;
   2328 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2329 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2330 		break;
   2331 	case WM_T_82575:
   2332 	case WM_T_82576:
   2333 	case WM_T_82580:
   2334 	case WM_T_I350:
   2335 	case WM_T_I354: /* XXX ok? */
   2336 	case WM_T_ICH8:
   2337 	case WM_T_ICH9:
   2338 	case WM_T_ICH10:
   2339 	case WM_T_PCH:
   2340 	case WM_T_PCH2:
   2341 	case WM_T_PCH_LPT:
   2342 	case WM_T_PCH_SPT:
   2343 		/* XXX The funcid should be checked on some devices */
   2344 		apme_mask = WUC_APME;
   2345 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2346 		break;
   2347 	}
   2348 
   2349 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2350 	if ((eeprom_data & apme_mask) != 0)
   2351 		sc->sc_flags |= WM_F_WOL;
   2352 #ifdef WM_DEBUG
   2353 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2354 		printf("WOL\n");
   2355 #endif
   2356 
   2357 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2358 		/* Check NVM for autonegotiation */
   2359 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2360 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2361 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2362 		}
   2363 	}
   2364 
   2365 	/*
   2366 	 * XXX need special handling for some multiple port cards
   2367 	 * to disable a paticular port.
   2368 	 */
   2369 
   2370 	if (sc->sc_type >= WM_T_82544) {
   2371 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2372 		if (pn != NULL) {
   2373 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2374 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2375 		} else {
   2376 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2377 				aprint_error_dev(sc->sc_dev,
   2378 				    "unable to read SWDPIN\n");
   2379 				goto out;
   2380 			}
   2381 		}
   2382 	}
   2383 
   2384 	if (cfg1 & NVM_CFG1_ILOS)
   2385 		sc->sc_ctrl |= CTRL_ILOS;
   2386 
   2387 	/*
   2388 	 * XXX
   2389 	 * This code isn't correct because pin 2 and 3 are located
   2390 	 * in different position on newer chips. Check all datasheet.
   2391 	 *
   2392 	 * Until resolve this problem, check if a chip < 82580
   2393 	 */
   2394 	if (sc->sc_type <= WM_T_82580) {
   2395 		if (sc->sc_type >= WM_T_82544) {
   2396 			sc->sc_ctrl |=
   2397 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2398 			    CTRL_SWDPIO_SHIFT;
   2399 			sc->sc_ctrl |=
   2400 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2401 			    CTRL_SWDPINS_SHIFT;
   2402 		} else {
   2403 			sc->sc_ctrl |=
   2404 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2405 			    CTRL_SWDPIO_SHIFT;
   2406 		}
   2407 	}
   2408 
   2409 	/* XXX For other than 82580? */
   2410 	if (sc->sc_type == WM_T_82580) {
   2411 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2412 		if (nvmword & __BIT(13))
   2413 			sc->sc_ctrl |= CTRL_ILOS;
   2414 	}
   2415 
   2416 #if 0
   2417 	if (sc->sc_type >= WM_T_82544) {
   2418 		if (cfg1 & NVM_CFG1_IPS0)
   2419 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2420 		if (cfg1 & NVM_CFG1_IPS1)
   2421 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2422 		sc->sc_ctrl_ext |=
   2423 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2424 		    CTRL_EXT_SWDPIO_SHIFT;
   2425 		sc->sc_ctrl_ext |=
   2426 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2427 		    CTRL_EXT_SWDPINS_SHIFT;
   2428 	} else {
   2429 		sc->sc_ctrl_ext |=
   2430 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2431 		    CTRL_EXT_SWDPIO_SHIFT;
   2432 	}
   2433 #endif
   2434 
   2435 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2436 #if 0
   2437 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2438 #endif
   2439 
   2440 	if (sc->sc_type == WM_T_PCH) {
   2441 		uint16_t val;
   2442 
   2443 		/* Save the NVM K1 bit setting */
   2444 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2445 
   2446 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2447 			sc->sc_nvm_k1_enabled = 1;
   2448 		else
   2449 			sc->sc_nvm_k1_enabled = 0;
   2450 	}
   2451 
   2452 	/*
   2453 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2454 	 * media structures accordingly.
   2455 	 */
   2456 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2457 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2458 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2459 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2460 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2461 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2462 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2463 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2464 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2465 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2466 	    || (sc->sc_type ==WM_T_I211)) {
   2467 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2468 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2469 		switch (link_mode) {
   2470 		case CTRL_EXT_LINK_MODE_1000KX:
   2471 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2472 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2473 			break;
   2474 		case CTRL_EXT_LINK_MODE_SGMII:
   2475 			if (wm_sgmii_uses_mdio(sc)) {
   2476 				aprint_verbose_dev(sc->sc_dev,
   2477 				    "SGMII(MDIO)\n");
   2478 				sc->sc_flags |= WM_F_SGMII;
   2479 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2480 				break;
   2481 			}
   2482 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2483 			/*FALLTHROUGH*/
   2484 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2485 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2486 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2487 				if (link_mode
   2488 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2489 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2490 					sc->sc_flags |= WM_F_SGMII;
   2491 				} else {
   2492 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2493 					aprint_verbose_dev(sc->sc_dev,
   2494 					    "SERDES\n");
   2495 				}
   2496 				break;
   2497 			}
   2498 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2499 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2500 
   2501 			/* Change current link mode setting */
   2502 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2503 			switch (sc->sc_mediatype) {
   2504 			case WM_MEDIATYPE_COPPER:
   2505 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2506 				break;
   2507 			case WM_MEDIATYPE_SERDES:
   2508 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2509 				break;
   2510 			default:
   2511 				break;
   2512 			}
   2513 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2514 			break;
   2515 		case CTRL_EXT_LINK_MODE_GMII:
   2516 		default:
   2517 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2518 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2519 			break;
   2520 		}
   2521 
   2522 		reg &= ~CTRL_EXT_I2C_ENA;
   2523 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2524 			reg |= CTRL_EXT_I2C_ENA;
   2525 		else
   2526 			reg &= ~CTRL_EXT_I2C_ENA;
   2527 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2528 
   2529 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2530 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2531 		else
   2532 			wm_tbi_mediainit(sc);
   2533 	} else if (sc->sc_type < WM_T_82543 ||
   2534 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2535 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2536 			aprint_error_dev(sc->sc_dev,
   2537 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2538 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2539 		}
   2540 		wm_tbi_mediainit(sc);
   2541 	} else {
   2542 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2543 			aprint_error_dev(sc->sc_dev,
   2544 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2545 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2546 		}
   2547 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2548 	}
   2549 
   2550 	ifp = &sc->sc_ethercom.ec_if;
   2551 	xname = device_xname(sc->sc_dev);
   2552 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2553 	ifp->if_softc = sc;
   2554 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2555 #ifdef WM_MPSAFE
   2556 	ifp->if_extflags = IFEF_START_MPSAFE;
   2557 #endif
   2558 	ifp->if_ioctl = wm_ioctl;
   2559 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2560 		ifp->if_start = wm_nq_start;
   2561 		if (sc->sc_nqueues > 1)
   2562 			ifp->if_transmit = wm_nq_transmit;
   2563 	} else {
   2564 		ifp->if_start = wm_start;
   2565 		if (sc->sc_nqueues > 1)
   2566 			ifp->if_transmit = wm_transmit;
   2567 	}
   2568 	ifp->if_watchdog = wm_watchdog;
   2569 	ifp->if_init = wm_init;
   2570 	ifp->if_stop = wm_stop;
   2571 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2572 	IFQ_SET_READY(&ifp->if_snd);
   2573 
   2574 	/* Check for jumbo frame */
   2575 	switch (sc->sc_type) {
   2576 	case WM_T_82573:
   2577 		/* XXX limited to 9234 if ASPM is disabled */
   2578 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2579 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2580 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2581 		break;
   2582 	case WM_T_82571:
   2583 	case WM_T_82572:
   2584 	case WM_T_82574:
   2585 	case WM_T_82575:
   2586 	case WM_T_82576:
   2587 	case WM_T_82580:
   2588 	case WM_T_I350:
   2589 	case WM_T_I354: /* XXXX ok? */
   2590 	case WM_T_I210:
   2591 	case WM_T_I211:
   2592 	case WM_T_80003:
   2593 	case WM_T_ICH9:
   2594 	case WM_T_ICH10:
   2595 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2596 	case WM_T_PCH_LPT:
   2597 	case WM_T_PCH_SPT:
   2598 		/* XXX limited to 9234 */
   2599 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2600 		break;
   2601 	case WM_T_PCH:
   2602 		/* XXX limited to 4096 */
   2603 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2604 		break;
   2605 	case WM_T_82542_2_0:
   2606 	case WM_T_82542_2_1:
   2607 	case WM_T_82583:
   2608 	case WM_T_ICH8:
   2609 		/* No support for jumbo frame */
   2610 		break;
   2611 	default:
   2612 		/* ETHER_MAX_LEN_JUMBO */
   2613 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2614 		break;
   2615 	}
   2616 
   2617 	/* If we're a i82543 or greater, we can support VLANs. */
   2618 	if (sc->sc_type >= WM_T_82543)
   2619 		sc->sc_ethercom.ec_capabilities |=
   2620 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2621 
   2622 	/*
   2623 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2624 	 * on i82543 and later.
   2625 	 */
   2626 	if (sc->sc_type >= WM_T_82543) {
   2627 		ifp->if_capabilities |=
   2628 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2629 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2630 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2631 		    IFCAP_CSUM_TCPv6_Tx |
   2632 		    IFCAP_CSUM_UDPv6_Tx;
   2633 	}
   2634 
   2635 	/*
   2636 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2637 	 *
   2638 	 *	82541GI (8086:1076) ... no
   2639 	 *	82572EI (8086:10b9) ... yes
   2640 	 */
   2641 	if (sc->sc_type >= WM_T_82571) {
   2642 		ifp->if_capabilities |=
   2643 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2644 	}
   2645 
   2646 	/*
   2647 	 * If we're a i82544 or greater (except i82547), we can do
   2648 	 * TCP segmentation offload.
   2649 	 */
   2650 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2651 		ifp->if_capabilities |= IFCAP_TSOv4;
   2652 	}
   2653 
   2654 	if (sc->sc_type >= WM_T_82571) {
   2655 		ifp->if_capabilities |= IFCAP_TSOv6;
   2656 	}
   2657 
   2658 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2659 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2660 
   2661 #ifdef WM_MPSAFE
   2662 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2663 #else
   2664 	sc->sc_core_lock = NULL;
   2665 #endif
   2666 
   2667 	/* Attach the interface. */
   2668 	if_initialize(ifp);
   2669 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2670 	ether_ifattach(ifp, enaddr);
   2671 	if_register(ifp);
   2672 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2673 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2674 			  RND_FLAG_DEFAULT);
   2675 
   2676 #ifdef WM_EVENT_COUNTERS
   2677 	/* Attach event counters. */
   2678 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2679 	    NULL, xname, "linkintr");
   2680 
   2681 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2682 	    NULL, xname, "tx_xoff");
   2683 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2684 	    NULL, xname, "tx_xon");
   2685 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2686 	    NULL, xname, "rx_xoff");
   2687 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2688 	    NULL, xname, "rx_xon");
   2689 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2690 	    NULL, xname, "rx_macctl");
   2691 #endif /* WM_EVENT_COUNTERS */
   2692 
   2693 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2694 		pmf_class_network_register(self, ifp);
   2695 	else
   2696 		aprint_error_dev(self, "couldn't establish power handler\n");
   2697 
   2698 	sc->sc_flags |= WM_F_ATTACHED;
   2699  out:
   2700 	return;
   2701 }
   2702 
   2703 /* The detach function (ca_detach) */
   2704 static int
   2705 wm_detach(device_t self, int flags __unused)
   2706 {
   2707 	struct wm_softc *sc = device_private(self);
   2708 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2709 	int i;
   2710 
   2711 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2712 		return 0;
   2713 
   2714 	/* Stop the interface. Callouts are stopped in it. */
   2715 	wm_stop(ifp, 1);
   2716 
   2717 	pmf_device_deregister(self);
   2718 
   2719 #ifdef WM_EVENT_COUNTERS
   2720 	evcnt_detach(&sc->sc_ev_linkintr);
   2721 
   2722 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2723 	evcnt_detach(&sc->sc_ev_tx_xon);
   2724 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2725 	evcnt_detach(&sc->sc_ev_rx_xon);
   2726 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2727 #endif /* WM_EVENT_COUNTERS */
   2728 
   2729 	/* Tell the firmware about the release */
   2730 	WM_CORE_LOCK(sc);
   2731 	wm_release_manageability(sc);
   2732 	wm_release_hw_control(sc);
   2733 	wm_enable_wakeup(sc);
   2734 	WM_CORE_UNLOCK(sc);
   2735 
   2736 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2737 
   2738 	/* Delete all remaining media. */
   2739 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2740 
   2741 	ether_ifdetach(ifp);
   2742 	if_detach(ifp);
   2743 	if_percpuq_destroy(sc->sc_ipq);
   2744 
   2745 	/* Unload RX dmamaps and free mbufs */
   2746 	for (i = 0; i < sc->sc_nqueues; i++) {
   2747 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2748 		mutex_enter(rxq->rxq_lock);
   2749 		wm_rxdrain(rxq);
   2750 		mutex_exit(rxq->rxq_lock);
   2751 	}
   2752 	/* Must unlock here */
   2753 
   2754 	/* Disestablish the interrupt handler */
   2755 	for (i = 0; i < sc->sc_nintrs; i++) {
   2756 		if (sc->sc_ihs[i] != NULL) {
   2757 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2758 			sc->sc_ihs[i] = NULL;
   2759 		}
   2760 	}
   2761 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2762 
   2763 	wm_free_txrx_queues(sc);
   2764 
   2765 	/* Unmap the registers */
   2766 	if (sc->sc_ss) {
   2767 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2768 		sc->sc_ss = 0;
   2769 	}
   2770 	if (sc->sc_ios) {
   2771 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2772 		sc->sc_ios = 0;
   2773 	}
   2774 	if (sc->sc_flashs) {
   2775 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2776 		sc->sc_flashs = 0;
   2777 	}
   2778 
   2779 	if (sc->sc_core_lock)
   2780 		mutex_obj_free(sc->sc_core_lock);
   2781 	if (sc->sc_ich_phymtx)
   2782 		mutex_obj_free(sc->sc_ich_phymtx);
   2783 	if (sc->sc_ich_nvmmtx)
   2784 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2785 
   2786 	return 0;
   2787 }
   2788 
   2789 static bool
   2790 wm_suspend(device_t self, const pmf_qual_t *qual)
   2791 {
   2792 	struct wm_softc *sc = device_private(self);
   2793 
   2794 	wm_release_manageability(sc);
   2795 	wm_release_hw_control(sc);
   2796 	wm_enable_wakeup(sc);
   2797 
   2798 	return true;
   2799 }
   2800 
   2801 static bool
   2802 wm_resume(device_t self, const pmf_qual_t *qual)
   2803 {
   2804 	struct wm_softc *sc = device_private(self);
   2805 
   2806 	wm_init_manageability(sc);
   2807 
   2808 	return true;
   2809 }
   2810 
   2811 /*
   2812  * wm_watchdog:		[ifnet interface function]
   2813  *
   2814  *	Watchdog timer handler.
   2815  */
   2816 static void
   2817 wm_watchdog(struct ifnet *ifp)
   2818 {
   2819 	int qid;
   2820 	struct wm_softc *sc = ifp->if_softc;
   2821 
   2822 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2823 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2824 
   2825 		wm_watchdog_txq(ifp, txq);
   2826 	}
   2827 
   2828 	/* Reset the interface. */
   2829 	(void) wm_init(ifp);
   2830 
   2831 	/*
   2832 	 * There are still some upper layer processing which call
   2833 	 * ifp->if_start(). e.g. ALTQ
   2834 	 */
   2835 	/* Try to get more packets going. */
   2836 	ifp->if_start(ifp);
   2837 }
   2838 
   2839 static void
   2840 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2841 {
   2842 	struct wm_softc *sc = ifp->if_softc;
   2843 
   2844 	/*
   2845 	 * Since we're using delayed interrupts, sweep up
   2846 	 * before we report an error.
   2847 	 */
   2848 	mutex_enter(txq->txq_lock);
   2849 	wm_txeof(sc, txq);
   2850 	mutex_exit(txq->txq_lock);
   2851 
   2852 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2853 #ifdef WM_DEBUG
   2854 		int i, j;
   2855 		struct wm_txsoft *txs;
   2856 #endif
   2857 		log(LOG_ERR,
   2858 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2859 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2860 		    txq->txq_next);
   2861 		ifp->if_oerrors++;
   2862 #ifdef WM_DEBUG
   2863 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2864 		    i = WM_NEXTTXS(txq, i)) {
   2865 		    txs = &txq->txq_soft[i];
   2866 		    printf("txs %d tx %d -> %d\n",
   2867 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2868 		    for (j = txs->txs_firstdesc; ;
   2869 			j = WM_NEXTTX(txq, j)) {
   2870 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2871 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2872 			printf("\t %#08x%08x\n",
   2873 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2874 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2875 			if (j == txs->txs_lastdesc)
   2876 				break;
   2877 			}
   2878 		}
   2879 #endif
   2880 	}
   2881 }
   2882 
   2883 /*
   2884  * wm_tick:
   2885  *
   2886  *	One second timer, used to check link status, sweep up
   2887  *	completed transmit jobs, etc.
   2888  */
   2889 static void
   2890 wm_tick(void *arg)
   2891 {
   2892 	struct wm_softc *sc = arg;
   2893 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2894 #ifndef WM_MPSAFE
   2895 	int s = splnet();
   2896 #endif
   2897 
   2898 	WM_CORE_LOCK(sc);
   2899 
   2900 	if (sc->sc_core_stopping)
   2901 		goto out;
   2902 
   2903 	if (sc->sc_type >= WM_T_82542_2_1) {
   2904 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2905 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2906 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2907 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2908 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2909 	}
   2910 
   2911 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2912 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2913 	    + CSR_READ(sc, WMREG_CRCERRS)
   2914 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2915 	    + CSR_READ(sc, WMREG_SYMERRC)
   2916 	    + CSR_READ(sc, WMREG_RXERRC)
   2917 	    + CSR_READ(sc, WMREG_SEC)
   2918 	    + CSR_READ(sc, WMREG_CEXTERR)
   2919 	    + CSR_READ(sc, WMREG_RLEC);
   2920 	/*
   2921 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2922 	 * memory. It does not mean the number of dropped packet. Because
   2923 	 * ethernet controller can receive packets in such case if there is
   2924 	 * space in phy's FIFO.
   2925 	 *
   2926 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2927 	 * own EVCNT instead of if_iqdrops.
   2928 	 */
   2929 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2930 
   2931 	if (sc->sc_flags & WM_F_HAS_MII)
   2932 		mii_tick(&sc->sc_mii);
   2933 	else if ((sc->sc_type >= WM_T_82575)
   2934 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2935 		wm_serdes_tick(sc);
   2936 	else
   2937 		wm_tbi_tick(sc);
   2938 
   2939 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2940 out:
   2941 	WM_CORE_UNLOCK(sc);
   2942 #ifndef WM_MPSAFE
   2943 	splx(s);
   2944 #endif
   2945 }
   2946 
   2947 static int
   2948 wm_ifflags_cb(struct ethercom *ec)
   2949 {
   2950 	struct ifnet *ifp = &ec->ec_if;
   2951 	struct wm_softc *sc = ifp->if_softc;
   2952 	int rc = 0;
   2953 
   2954 	WM_CORE_LOCK(sc);
   2955 
   2956 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2957 	sc->sc_if_flags = ifp->if_flags;
   2958 
   2959 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2960 		rc = ENETRESET;
   2961 		goto out;
   2962 	}
   2963 
   2964 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2965 		wm_set_filter(sc);
   2966 
   2967 	wm_set_vlan(sc);
   2968 
   2969 out:
   2970 	WM_CORE_UNLOCK(sc);
   2971 
   2972 	return rc;
   2973 }
   2974 
   2975 /*
   2976  * wm_ioctl:		[ifnet interface function]
   2977  *
   2978  *	Handle control requests from the operator.
   2979  */
   2980 static int
   2981 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2982 {
   2983 	struct wm_softc *sc = ifp->if_softc;
   2984 	struct ifreq *ifr = (struct ifreq *) data;
   2985 	struct ifaddr *ifa = (struct ifaddr *)data;
   2986 	struct sockaddr_dl *sdl;
   2987 	int s, error;
   2988 
   2989 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2990 		device_xname(sc->sc_dev), __func__));
   2991 
   2992 #ifndef WM_MPSAFE
   2993 	s = splnet();
   2994 #endif
   2995 	switch (cmd) {
   2996 	case SIOCSIFMEDIA:
   2997 	case SIOCGIFMEDIA:
   2998 		WM_CORE_LOCK(sc);
   2999 		/* Flow control requires full-duplex mode. */
   3000 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3001 		    (ifr->ifr_media & IFM_FDX) == 0)
   3002 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3003 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3004 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3005 				/* We can do both TXPAUSE and RXPAUSE. */
   3006 				ifr->ifr_media |=
   3007 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3008 			}
   3009 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3010 		}
   3011 		WM_CORE_UNLOCK(sc);
   3012 #ifdef WM_MPSAFE
   3013 		s = splnet();
   3014 #endif
   3015 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3016 #ifdef WM_MPSAFE
   3017 		splx(s);
   3018 #endif
   3019 		break;
   3020 	case SIOCINITIFADDR:
   3021 		WM_CORE_LOCK(sc);
   3022 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3023 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3024 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3025 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3026 			/* unicast address is first multicast entry */
   3027 			wm_set_filter(sc);
   3028 			error = 0;
   3029 			WM_CORE_UNLOCK(sc);
   3030 			break;
   3031 		}
   3032 		WM_CORE_UNLOCK(sc);
   3033 		/*FALLTHROUGH*/
   3034 	default:
   3035 #ifdef WM_MPSAFE
   3036 		s = splnet();
   3037 #endif
   3038 		/* It may call wm_start, so unlock here */
   3039 		error = ether_ioctl(ifp, cmd, data);
   3040 #ifdef WM_MPSAFE
   3041 		splx(s);
   3042 #endif
   3043 		if (error != ENETRESET)
   3044 			break;
   3045 
   3046 		error = 0;
   3047 
   3048 		if (cmd == SIOCSIFCAP) {
   3049 			error = (*ifp->if_init)(ifp);
   3050 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3051 			;
   3052 		else if (ifp->if_flags & IFF_RUNNING) {
   3053 			/*
   3054 			 * Multicast list has changed; set the hardware filter
   3055 			 * accordingly.
   3056 			 */
   3057 			WM_CORE_LOCK(sc);
   3058 			wm_set_filter(sc);
   3059 			WM_CORE_UNLOCK(sc);
   3060 		}
   3061 		break;
   3062 	}
   3063 
   3064 #ifndef WM_MPSAFE
   3065 	splx(s);
   3066 #endif
   3067 	return error;
   3068 }
   3069 
   3070 /* MAC address related */
   3071 
   3072 /*
   3073  * Get the offset of MAC address and return it.
   3074  * If error occured, use offset 0.
   3075  */
   3076 static uint16_t
   3077 wm_check_alt_mac_addr(struct wm_softc *sc)
   3078 {
   3079 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3080 	uint16_t offset = NVM_OFF_MACADDR;
   3081 
   3082 	/* Try to read alternative MAC address pointer */
   3083 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3084 		return 0;
   3085 
   3086 	/* Check pointer if it's valid or not. */
   3087 	if ((offset == 0x0000) || (offset == 0xffff))
   3088 		return 0;
   3089 
   3090 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3091 	/*
   3092 	 * Check whether alternative MAC address is valid or not.
   3093 	 * Some cards have non 0xffff pointer but those don't use
   3094 	 * alternative MAC address in reality.
   3095 	 *
   3096 	 * Check whether the broadcast bit is set or not.
   3097 	 */
   3098 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3099 		if (((myea[0] & 0xff) & 0x01) == 0)
   3100 			return offset; /* Found */
   3101 
   3102 	/* Not found */
   3103 	return 0;
   3104 }
   3105 
   3106 static int
   3107 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3108 {
   3109 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3110 	uint16_t offset = NVM_OFF_MACADDR;
   3111 	int do_invert = 0;
   3112 
   3113 	switch (sc->sc_type) {
   3114 	case WM_T_82580:
   3115 	case WM_T_I350:
   3116 	case WM_T_I354:
   3117 		/* EEPROM Top Level Partitioning */
   3118 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3119 		break;
   3120 	case WM_T_82571:
   3121 	case WM_T_82575:
   3122 	case WM_T_82576:
   3123 	case WM_T_80003:
   3124 	case WM_T_I210:
   3125 	case WM_T_I211:
   3126 		offset = wm_check_alt_mac_addr(sc);
   3127 		if (offset == 0)
   3128 			if ((sc->sc_funcid & 0x01) == 1)
   3129 				do_invert = 1;
   3130 		break;
   3131 	default:
   3132 		if ((sc->sc_funcid & 0x01) == 1)
   3133 			do_invert = 1;
   3134 		break;
   3135 	}
   3136 
   3137 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3138 		goto bad;
   3139 
   3140 	enaddr[0] = myea[0] & 0xff;
   3141 	enaddr[1] = myea[0] >> 8;
   3142 	enaddr[2] = myea[1] & 0xff;
   3143 	enaddr[3] = myea[1] >> 8;
   3144 	enaddr[4] = myea[2] & 0xff;
   3145 	enaddr[5] = myea[2] >> 8;
   3146 
   3147 	/*
   3148 	 * Toggle the LSB of the MAC address on the second port
   3149 	 * of some dual port cards.
   3150 	 */
   3151 	if (do_invert != 0)
   3152 		enaddr[5] ^= 1;
   3153 
   3154 	return 0;
   3155 
   3156  bad:
   3157 	return -1;
   3158 }
   3159 
   3160 /*
   3161  * wm_set_ral:
   3162  *
   3163  *	Set an entery in the receive address list.
   3164  */
   3165 static void
   3166 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3167 {
   3168 	uint32_t ral_lo, ral_hi;
   3169 
   3170 	if (enaddr != NULL) {
   3171 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3172 		    (enaddr[3] << 24);
   3173 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3174 		ral_hi |= RAL_AV;
   3175 	} else {
   3176 		ral_lo = 0;
   3177 		ral_hi = 0;
   3178 	}
   3179 
   3180 	if (sc->sc_type >= WM_T_82544) {
   3181 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3182 		    ral_lo);
   3183 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3184 		    ral_hi);
   3185 	} else {
   3186 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3187 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3188 	}
   3189 }
   3190 
   3191 /*
   3192  * wm_mchash:
   3193  *
   3194  *	Compute the hash of the multicast address for the 4096-bit
   3195  *	multicast filter.
   3196  */
   3197 static uint32_t
   3198 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3199 {
   3200 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3201 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3202 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3203 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3204 	uint32_t hash;
   3205 
   3206 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3207 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3208 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3209 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3210 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3211 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3212 		return (hash & 0x3ff);
   3213 	}
   3214 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3215 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3216 
   3217 	return (hash & 0xfff);
   3218 }
   3219 
   3220 /*
   3221  * wm_set_filter:
   3222  *
   3223  *	Set up the receive filter.
   3224  */
   3225 static void
   3226 wm_set_filter(struct wm_softc *sc)
   3227 {
   3228 	struct ethercom *ec = &sc->sc_ethercom;
   3229 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3230 	struct ether_multi *enm;
   3231 	struct ether_multistep step;
   3232 	bus_addr_t mta_reg;
   3233 	uint32_t hash, reg, bit;
   3234 	int i, size, ralmax;
   3235 
   3236 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3237 		device_xname(sc->sc_dev), __func__));
   3238 
   3239 	if (sc->sc_type >= WM_T_82544)
   3240 		mta_reg = WMREG_CORDOVA_MTA;
   3241 	else
   3242 		mta_reg = WMREG_MTA;
   3243 
   3244 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3245 
   3246 	if (ifp->if_flags & IFF_BROADCAST)
   3247 		sc->sc_rctl |= RCTL_BAM;
   3248 	if (ifp->if_flags & IFF_PROMISC) {
   3249 		sc->sc_rctl |= RCTL_UPE;
   3250 		goto allmulti;
   3251 	}
   3252 
   3253 	/*
   3254 	 * Set the station address in the first RAL slot, and
   3255 	 * clear the remaining slots.
   3256 	 */
   3257 	if (sc->sc_type == WM_T_ICH8)
   3258 		size = WM_RAL_TABSIZE_ICH8 -1;
   3259 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3260 	    || (sc->sc_type == WM_T_PCH))
   3261 		size = WM_RAL_TABSIZE_ICH8;
   3262 	else if (sc->sc_type == WM_T_PCH2)
   3263 		size = WM_RAL_TABSIZE_PCH2;
   3264 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3265 		size = WM_RAL_TABSIZE_PCH_LPT;
   3266 	else if (sc->sc_type == WM_T_82575)
   3267 		size = WM_RAL_TABSIZE_82575;
   3268 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3269 		size = WM_RAL_TABSIZE_82576;
   3270 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3271 		size = WM_RAL_TABSIZE_I350;
   3272 	else
   3273 		size = WM_RAL_TABSIZE;
   3274 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3275 
   3276 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3277 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3278 		switch (i) {
   3279 		case 0:
   3280 			/* We can use all entries */
   3281 			ralmax = size;
   3282 			break;
   3283 		case 1:
   3284 			/* Only RAR[0] */
   3285 			ralmax = 1;
   3286 			break;
   3287 		default:
   3288 			/* available SHRA + RAR[0] */
   3289 			ralmax = i + 1;
   3290 		}
   3291 	} else
   3292 		ralmax = size;
   3293 	for (i = 1; i < size; i++) {
   3294 		if (i < ralmax)
   3295 			wm_set_ral(sc, NULL, i);
   3296 	}
   3297 
   3298 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3299 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3300 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3301 	    || (sc->sc_type == WM_T_PCH_SPT))
   3302 		size = WM_ICH8_MC_TABSIZE;
   3303 	else
   3304 		size = WM_MC_TABSIZE;
   3305 	/* Clear out the multicast table. */
   3306 	for (i = 0; i < size; i++)
   3307 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3308 
   3309 	ETHER_LOCK(ec);
   3310 	ETHER_FIRST_MULTI(step, ec, enm);
   3311 	while (enm != NULL) {
   3312 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3313 			ETHER_UNLOCK(ec);
   3314 			/*
   3315 			 * We must listen to a range of multicast addresses.
   3316 			 * For now, just accept all multicasts, rather than
   3317 			 * trying to set only those filter bits needed to match
   3318 			 * the range.  (At this time, the only use of address
   3319 			 * ranges is for IP multicast routing, for which the
   3320 			 * range is big enough to require all bits set.)
   3321 			 */
   3322 			goto allmulti;
   3323 		}
   3324 
   3325 		hash = wm_mchash(sc, enm->enm_addrlo);
   3326 
   3327 		reg = (hash >> 5);
   3328 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3329 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3330 		    || (sc->sc_type == WM_T_PCH2)
   3331 		    || (sc->sc_type == WM_T_PCH_LPT)
   3332 		    || (sc->sc_type == WM_T_PCH_SPT))
   3333 			reg &= 0x1f;
   3334 		else
   3335 			reg &= 0x7f;
   3336 		bit = hash & 0x1f;
   3337 
   3338 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3339 		hash |= 1U << bit;
   3340 
   3341 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3342 			/*
   3343 			 * 82544 Errata 9: Certain register cannot be written
   3344 			 * with particular alignments in PCI-X bus operation
   3345 			 * (FCAH, MTA and VFTA).
   3346 			 */
   3347 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3348 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3349 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3350 		} else
   3351 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3352 
   3353 		ETHER_NEXT_MULTI(step, enm);
   3354 	}
   3355 	ETHER_UNLOCK(ec);
   3356 
   3357 	ifp->if_flags &= ~IFF_ALLMULTI;
   3358 	goto setit;
   3359 
   3360  allmulti:
   3361 	ifp->if_flags |= IFF_ALLMULTI;
   3362 	sc->sc_rctl |= RCTL_MPE;
   3363 
   3364  setit:
   3365 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3366 }
   3367 
   3368 /* Reset and init related */
   3369 
   3370 static void
   3371 wm_set_vlan(struct wm_softc *sc)
   3372 {
   3373 
   3374 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3375 		device_xname(sc->sc_dev), __func__));
   3376 
   3377 	/* Deal with VLAN enables. */
   3378 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3379 		sc->sc_ctrl |= CTRL_VME;
   3380 	else
   3381 		sc->sc_ctrl &= ~CTRL_VME;
   3382 
   3383 	/* Write the control registers. */
   3384 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3385 }
   3386 
   3387 static void
   3388 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3389 {
   3390 	uint32_t gcr;
   3391 	pcireg_t ctrl2;
   3392 
   3393 	gcr = CSR_READ(sc, WMREG_GCR);
   3394 
   3395 	/* Only take action if timeout value is defaulted to 0 */
   3396 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3397 		goto out;
   3398 
   3399 	if ((gcr & GCR_CAP_VER2) == 0) {
   3400 		gcr |= GCR_CMPL_TMOUT_10MS;
   3401 		goto out;
   3402 	}
   3403 
   3404 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3405 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3406 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3407 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3408 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3409 
   3410 out:
   3411 	/* Disable completion timeout resend */
   3412 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3413 
   3414 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3415 }
   3416 
   3417 void
   3418 wm_get_auto_rd_done(struct wm_softc *sc)
   3419 {
   3420 	int i;
   3421 
   3422 	/* wait for eeprom to reload */
   3423 	switch (sc->sc_type) {
   3424 	case WM_T_82571:
   3425 	case WM_T_82572:
   3426 	case WM_T_82573:
   3427 	case WM_T_82574:
   3428 	case WM_T_82583:
   3429 	case WM_T_82575:
   3430 	case WM_T_82576:
   3431 	case WM_T_82580:
   3432 	case WM_T_I350:
   3433 	case WM_T_I354:
   3434 	case WM_T_I210:
   3435 	case WM_T_I211:
   3436 	case WM_T_80003:
   3437 	case WM_T_ICH8:
   3438 	case WM_T_ICH9:
   3439 		for (i = 0; i < 10; i++) {
   3440 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3441 				break;
   3442 			delay(1000);
   3443 		}
   3444 		if (i == 10) {
   3445 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3446 			    "complete\n", device_xname(sc->sc_dev));
   3447 		}
   3448 		break;
   3449 	default:
   3450 		break;
   3451 	}
   3452 }
   3453 
   3454 void
   3455 wm_lan_init_done(struct wm_softc *sc)
   3456 {
   3457 	uint32_t reg = 0;
   3458 	int i;
   3459 
   3460 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3461 		device_xname(sc->sc_dev), __func__));
   3462 
   3463 	/* Wait for eeprom to reload */
   3464 	switch (sc->sc_type) {
   3465 	case WM_T_ICH10:
   3466 	case WM_T_PCH:
   3467 	case WM_T_PCH2:
   3468 	case WM_T_PCH_LPT:
   3469 	case WM_T_PCH_SPT:
   3470 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3471 			reg = CSR_READ(sc, WMREG_STATUS);
   3472 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3473 				break;
   3474 			delay(100);
   3475 		}
   3476 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3477 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3478 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3479 		}
   3480 		break;
   3481 	default:
   3482 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3483 		    __func__);
   3484 		break;
   3485 	}
   3486 
   3487 	reg &= ~STATUS_LAN_INIT_DONE;
   3488 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3489 }
   3490 
   3491 void
   3492 wm_get_cfg_done(struct wm_softc *sc)
   3493 {
   3494 	int mask;
   3495 	uint32_t reg;
   3496 	int i;
   3497 
   3498 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3499 		device_xname(sc->sc_dev), __func__));
   3500 
   3501 	/* Wait for eeprom to reload */
   3502 	switch (sc->sc_type) {
   3503 	case WM_T_82542_2_0:
   3504 	case WM_T_82542_2_1:
   3505 		/* null */
   3506 		break;
   3507 	case WM_T_82543:
   3508 	case WM_T_82544:
   3509 	case WM_T_82540:
   3510 	case WM_T_82545:
   3511 	case WM_T_82545_3:
   3512 	case WM_T_82546:
   3513 	case WM_T_82546_3:
   3514 	case WM_T_82541:
   3515 	case WM_T_82541_2:
   3516 	case WM_T_82547:
   3517 	case WM_T_82547_2:
   3518 	case WM_T_82573:
   3519 	case WM_T_82574:
   3520 	case WM_T_82583:
   3521 		/* generic */
   3522 		delay(10*1000);
   3523 		break;
   3524 	case WM_T_80003:
   3525 	case WM_T_82571:
   3526 	case WM_T_82572:
   3527 	case WM_T_82575:
   3528 	case WM_T_82576:
   3529 	case WM_T_82580:
   3530 	case WM_T_I350:
   3531 	case WM_T_I354:
   3532 	case WM_T_I210:
   3533 	case WM_T_I211:
   3534 		if (sc->sc_type == WM_T_82571) {
   3535 			/* Only 82571 shares port 0 */
   3536 			mask = EEMNGCTL_CFGDONE_0;
   3537 		} else
   3538 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3539 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3540 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3541 				break;
   3542 			delay(1000);
   3543 		}
   3544 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3545 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3546 				device_xname(sc->sc_dev), __func__));
   3547 		}
   3548 		break;
   3549 	case WM_T_ICH8:
   3550 	case WM_T_ICH9:
   3551 	case WM_T_ICH10:
   3552 	case WM_T_PCH:
   3553 	case WM_T_PCH2:
   3554 	case WM_T_PCH_LPT:
   3555 	case WM_T_PCH_SPT:
   3556 		delay(10*1000);
   3557 		if (sc->sc_type >= WM_T_ICH10)
   3558 			wm_lan_init_done(sc);
   3559 		else
   3560 			wm_get_auto_rd_done(sc);
   3561 
   3562 		reg = CSR_READ(sc, WMREG_STATUS);
   3563 		if ((reg & STATUS_PHYRA) != 0)
   3564 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3565 		break;
   3566 	default:
   3567 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3568 		    __func__);
   3569 		break;
   3570 	}
   3571 }
   3572 
   3573 /* Init hardware bits */
   3574 void
   3575 wm_initialize_hardware_bits(struct wm_softc *sc)
   3576 {
   3577 	uint32_t tarc0, tarc1, reg;
   3578 
   3579 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3580 		device_xname(sc->sc_dev), __func__));
   3581 
   3582 	/* For 82571 variant, 80003 and ICHs */
   3583 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3584 	    || (sc->sc_type >= WM_T_80003)) {
   3585 
   3586 		/* Transmit Descriptor Control 0 */
   3587 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3588 		reg |= TXDCTL_COUNT_DESC;
   3589 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3590 
   3591 		/* Transmit Descriptor Control 1 */
   3592 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3593 		reg |= TXDCTL_COUNT_DESC;
   3594 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3595 
   3596 		/* TARC0 */
   3597 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3598 		switch (sc->sc_type) {
   3599 		case WM_T_82571:
   3600 		case WM_T_82572:
   3601 		case WM_T_82573:
   3602 		case WM_T_82574:
   3603 		case WM_T_82583:
   3604 		case WM_T_80003:
   3605 			/* Clear bits 30..27 */
   3606 			tarc0 &= ~__BITS(30, 27);
   3607 			break;
   3608 		default:
   3609 			break;
   3610 		}
   3611 
   3612 		switch (sc->sc_type) {
   3613 		case WM_T_82571:
   3614 		case WM_T_82572:
   3615 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3616 
   3617 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3618 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3619 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3620 			/* 8257[12] Errata No.7 */
   3621 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3622 
   3623 			/* TARC1 bit 28 */
   3624 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3625 				tarc1 &= ~__BIT(28);
   3626 			else
   3627 				tarc1 |= __BIT(28);
   3628 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3629 
   3630 			/*
   3631 			 * 8257[12] Errata No.13
   3632 			 * Disable Dyamic Clock Gating.
   3633 			 */
   3634 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3635 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3636 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3637 			break;
   3638 		case WM_T_82573:
   3639 		case WM_T_82574:
   3640 		case WM_T_82583:
   3641 			if ((sc->sc_type == WM_T_82574)
   3642 			    || (sc->sc_type == WM_T_82583))
   3643 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3644 
   3645 			/* Extended Device Control */
   3646 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3647 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3648 			reg |= __BIT(22);	/* Set bit 22 */
   3649 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3650 
   3651 			/* Device Control */
   3652 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3653 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3654 
   3655 			/* PCIe Control Register */
   3656 			/*
   3657 			 * 82573 Errata (unknown).
   3658 			 *
   3659 			 * 82574 Errata 25 and 82583 Errata 12
   3660 			 * "Dropped Rx Packets":
   3661 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3662 			 */
   3663 			reg = CSR_READ(sc, WMREG_GCR);
   3664 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3665 			CSR_WRITE(sc, WMREG_GCR, reg);
   3666 
   3667 			if ((sc->sc_type == WM_T_82574)
   3668 			    || (sc->sc_type == WM_T_82583)) {
   3669 				/*
   3670 				 * Document says this bit must be set for
   3671 				 * proper operation.
   3672 				 */
   3673 				reg = CSR_READ(sc, WMREG_GCR);
   3674 				reg |= __BIT(22);
   3675 				CSR_WRITE(sc, WMREG_GCR, reg);
   3676 
   3677 				/*
   3678 				 * Apply workaround for hardware errata
   3679 				 * documented in errata docs Fixes issue where
   3680 				 * some error prone or unreliable PCIe
   3681 				 * completions are occurring, particularly
   3682 				 * with ASPM enabled. Without fix, issue can
   3683 				 * cause Tx timeouts.
   3684 				 */
   3685 				reg = CSR_READ(sc, WMREG_GCR2);
   3686 				reg |= __BIT(0);
   3687 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3688 			}
   3689 			break;
   3690 		case WM_T_80003:
   3691 			/* TARC0 */
   3692 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3693 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3694 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3695 
   3696 			/* TARC1 bit 28 */
   3697 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3698 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3699 				tarc1 &= ~__BIT(28);
   3700 			else
   3701 				tarc1 |= __BIT(28);
   3702 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3703 			break;
   3704 		case WM_T_ICH8:
   3705 		case WM_T_ICH9:
   3706 		case WM_T_ICH10:
   3707 		case WM_T_PCH:
   3708 		case WM_T_PCH2:
   3709 		case WM_T_PCH_LPT:
   3710 		case WM_T_PCH_SPT:
   3711 			/* TARC0 */
   3712 			if ((sc->sc_type == WM_T_ICH8)
   3713 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3714 				/* Set TARC0 bits 29 and 28 */
   3715 				tarc0 |= __BITS(29, 28);
   3716 			}
   3717 			/* Set TARC0 bits 23,24,26,27 */
   3718 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3719 
   3720 			/* CTRL_EXT */
   3721 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3722 			reg |= __BIT(22);	/* Set bit 22 */
   3723 			/*
   3724 			 * Enable PHY low-power state when MAC is at D3
   3725 			 * w/o WoL
   3726 			 */
   3727 			if (sc->sc_type >= WM_T_PCH)
   3728 				reg |= CTRL_EXT_PHYPDEN;
   3729 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3730 
   3731 			/* TARC1 */
   3732 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3733 			/* bit 28 */
   3734 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3735 				tarc1 &= ~__BIT(28);
   3736 			else
   3737 				tarc1 |= __BIT(28);
   3738 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3739 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3740 
   3741 			/* Device Status */
   3742 			if (sc->sc_type == WM_T_ICH8) {
   3743 				reg = CSR_READ(sc, WMREG_STATUS);
   3744 				reg &= ~__BIT(31);
   3745 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3746 
   3747 			}
   3748 
   3749 			/* IOSFPC */
   3750 			if (sc->sc_type == WM_T_PCH_SPT) {
   3751 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3752 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3753 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3754 			}
   3755 			/*
   3756 			 * Work-around descriptor data corruption issue during
   3757 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3758 			 * capability.
   3759 			 */
   3760 			reg = CSR_READ(sc, WMREG_RFCTL);
   3761 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3762 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3763 			break;
   3764 		default:
   3765 			break;
   3766 		}
   3767 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3768 
   3769 		switch (sc->sc_type) {
   3770 		/*
   3771 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3772 		 * Avoid RSS Hash Value bug.
   3773 		 */
   3774 		case WM_T_82571:
   3775 		case WM_T_82572:
   3776 		case WM_T_82573:
   3777 		case WM_T_80003:
   3778 		case WM_T_ICH8:
   3779 			reg = CSR_READ(sc, WMREG_RFCTL);
   3780 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3781 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3782 			break;
   3783 		case WM_T_82574:
   3784 			/* use extened Rx descriptor. */
   3785 			reg = CSR_READ(sc, WMREG_RFCTL);
   3786 			reg |= WMREG_RFCTL_EXSTEN;
   3787 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3788 			break;
   3789 		default:
   3790 			break;
   3791 		}
   3792 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   3793 		/*
   3794 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   3795 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   3796 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   3797 		 * Correctly by the Device"
   3798 		 *
   3799 		 * I354(C2000) Errata AVR53:
   3800 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   3801 		 * Hang"
   3802 		 */
   3803 		reg = CSR_READ(sc, WMREG_RFCTL);
   3804 		reg |= WMREG_RFCTL_IPV6EXDIS;
   3805 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   3806 	}
   3807 }
   3808 
   3809 static uint32_t
   3810 wm_rxpbs_adjust_82580(uint32_t val)
   3811 {
   3812 	uint32_t rv = 0;
   3813 
   3814 	if (val < __arraycount(wm_82580_rxpbs_table))
   3815 		rv = wm_82580_rxpbs_table[val];
   3816 
   3817 	return rv;
   3818 }
   3819 
   3820 /*
   3821  * wm_reset_phy:
   3822  *
   3823  *	generic PHY reset function.
   3824  *	Same as e1000_phy_hw_reset_generic()
   3825  */
   3826 static void
   3827 wm_reset_phy(struct wm_softc *sc)
   3828 {
   3829 	uint32_t reg;
   3830 
   3831 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3832 		device_xname(sc->sc_dev), __func__));
   3833 	if (wm_phy_resetisblocked(sc))
   3834 		return;
   3835 
   3836 	sc->phy.acquire(sc);
   3837 
   3838 	reg = CSR_READ(sc, WMREG_CTRL);
   3839 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3840 	CSR_WRITE_FLUSH(sc);
   3841 
   3842 	delay(sc->phy.reset_delay_us);
   3843 
   3844 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3845 	CSR_WRITE_FLUSH(sc);
   3846 
   3847 	delay(150);
   3848 
   3849 	sc->phy.release(sc);
   3850 
   3851 	wm_get_cfg_done(sc);
   3852 }
   3853 
   3854 static void
   3855 wm_flush_desc_rings(struct wm_softc *sc)
   3856 {
   3857 	pcireg_t preg;
   3858 	uint32_t reg;
   3859 	int nexttx;
   3860 
   3861 	/* First, disable MULR fix in FEXTNVM11 */
   3862 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3863 	reg |= FEXTNVM11_DIS_MULRFIX;
   3864 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3865 
   3866 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3867 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3868 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3869 		struct wm_txqueue *txq;
   3870 		wiseman_txdesc_t *txd;
   3871 
   3872 		/* TX */
   3873 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3874 		    device_xname(sc->sc_dev), preg, reg);
   3875 		reg = CSR_READ(sc, WMREG_TCTL);
   3876 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3877 
   3878 		txq = &sc->sc_queue[0].wmq_txq;
   3879 		nexttx = txq->txq_next;
   3880 		txd = &txq->txq_descs[nexttx];
   3881 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3882 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3883 		txd->wtx_fields.wtxu_status = 0;
   3884 		txd->wtx_fields.wtxu_options = 0;
   3885 		txd->wtx_fields.wtxu_vlan = 0;
   3886 
   3887 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3888 			BUS_SPACE_BARRIER_WRITE);
   3889 
   3890 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3891 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3892 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3893 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3894 		delay(250);
   3895 	}
   3896 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3897 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3898 		uint32_t rctl;
   3899 
   3900 		/* RX */
   3901 		printf("%s: Need RX flush (reg = %08x)\n",
   3902 		    device_xname(sc->sc_dev), preg);
   3903 		rctl = CSR_READ(sc, WMREG_RCTL);
   3904 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3905 		CSR_WRITE_FLUSH(sc);
   3906 		delay(150);
   3907 
   3908 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3909 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3910 		reg &= 0xffffc000;
   3911 		/*
   3912 		 * update thresholds: prefetch threshold to 31, host threshold
   3913 		 * to 1 and make sure the granularity is "descriptors" and not
   3914 		 * "cache lines"
   3915 		 */
   3916 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   3917 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   3918 
   3919 		/*
   3920 		 * momentarily enable the RX ring for the changes to take
   3921 		 * effect
   3922 		 */
   3923 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   3924 		CSR_WRITE_FLUSH(sc);
   3925 		delay(150);
   3926 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3927 	}
   3928 }
   3929 
   3930 /*
   3931  * wm_reset:
   3932  *
   3933  *	Reset the i82542 chip.
   3934  */
   3935 static void
   3936 wm_reset(struct wm_softc *sc)
   3937 {
   3938 	int phy_reset = 0;
   3939 	int i, error = 0;
   3940 	uint32_t reg;
   3941 
   3942 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3943 		device_xname(sc->sc_dev), __func__));
   3944 	KASSERT(sc->sc_type != 0);
   3945 
   3946 	/*
   3947 	 * Allocate on-chip memory according to the MTU size.
   3948 	 * The Packet Buffer Allocation register must be written
   3949 	 * before the chip is reset.
   3950 	 */
   3951 	switch (sc->sc_type) {
   3952 	case WM_T_82547:
   3953 	case WM_T_82547_2:
   3954 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3955 		    PBA_22K : PBA_30K;
   3956 		for (i = 0; i < sc->sc_nqueues; i++) {
   3957 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3958 			txq->txq_fifo_head = 0;
   3959 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3960 			txq->txq_fifo_size =
   3961 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3962 			txq->txq_fifo_stall = 0;
   3963 		}
   3964 		break;
   3965 	case WM_T_82571:
   3966 	case WM_T_82572:
   3967 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3968 	case WM_T_80003:
   3969 		sc->sc_pba = PBA_32K;
   3970 		break;
   3971 	case WM_T_82573:
   3972 		sc->sc_pba = PBA_12K;
   3973 		break;
   3974 	case WM_T_82574:
   3975 	case WM_T_82583:
   3976 		sc->sc_pba = PBA_20K;
   3977 		break;
   3978 	case WM_T_82576:
   3979 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3980 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3981 		break;
   3982 	case WM_T_82580:
   3983 	case WM_T_I350:
   3984 	case WM_T_I354:
   3985 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3986 		break;
   3987 	case WM_T_I210:
   3988 	case WM_T_I211:
   3989 		sc->sc_pba = PBA_34K;
   3990 		break;
   3991 	case WM_T_ICH8:
   3992 		/* Workaround for a bit corruption issue in FIFO memory */
   3993 		sc->sc_pba = PBA_8K;
   3994 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3995 		break;
   3996 	case WM_T_ICH9:
   3997 	case WM_T_ICH10:
   3998 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3999 		    PBA_14K : PBA_10K;
   4000 		break;
   4001 	case WM_T_PCH:
   4002 	case WM_T_PCH2:
   4003 	case WM_T_PCH_LPT:
   4004 	case WM_T_PCH_SPT:
   4005 		sc->sc_pba = PBA_26K;
   4006 		break;
   4007 	default:
   4008 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4009 		    PBA_40K : PBA_48K;
   4010 		break;
   4011 	}
   4012 	/*
   4013 	 * Only old or non-multiqueue devices have the PBA register
   4014 	 * XXX Need special handling for 82575.
   4015 	 */
   4016 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4017 	    || (sc->sc_type == WM_T_82575))
   4018 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4019 
   4020 	/* Prevent the PCI-E bus from sticking */
   4021 	if (sc->sc_flags & WM_F_PCIE) {
   4022 		int timeout = 800;
   4023 
   4024 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4025 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4026 
   4027 		while (timeout--) {
   4028 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4029 			    == 0)
   4030 				break;
   4031 			delay(100);
   4032 		}
   4033 	}
   4034 
   4035 	/* Set the completion timeout for interface */
   4036 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4037 	    || (sc->sc_type == WM_T_82580)
   4038 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4039 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4040 		wm_set_pcie_completion_timeout(sc);
   4041 
   4042 	/* Clear interrupt */
   4043 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4044 	if (sc->sc_nintrs > 1) {
   4045 		if (sc->sc_type != WM_T_82574) {
   4046 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4047 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4048 		} else {
   4049 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4050 		}
   4051 	}
   4052 
   4053 	/* Stop the transmit and receive processes. */
   4054 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4055 	sc->sc_rctl &= ~RCTL_EN;
   4056 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4057 	CSR_WRITE_FLUSH(sc);
   4058 
   4059 	/* XXX set_tbi_sbp_82543() */
   4060 
   4061 	delay(10*1000);
   4062 
   4063 	/* Must acquire the MDIO ownership before MAC reset */
   4064 	switch (sc->sc_type) {
   4065 	case WM_T_82573:
   4066 	case WM_T_82574:
   4067 	case WM_T_82583:
   4068 		error = wm_get_hw_semaphore_82573(sc);
   4069 		break;
   4070 	default:
   4071 		break;
   4072 	}
   4073 
   4074 	/*
   4075 	 * 82541 Errata 29? & 82547 Errata 28?
   4076 	 * See also the description about PHY_RST bit in CTRL register
   4077 	 * in 8254x_GBe_SDM.pdf.
   4078 	 */
   4079 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4080 		CSR_WRITE(sc, WMREG_CTRL,
   4081 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4082 		CSR_WRITE_FLUSH(sc);
   4083 		delay(5000);
   4084 	}
   4085 
   4086 	switch (sc->sc_type) {
   4087 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4088 	case WM_T_82541:
   4089 	case WM_T_82541_2:
   4090 	case WM_T_82547:
   4091 	case WM_T_82547_2:
   4092 		/*
   4093 		 * On some chipsets, a reset through a memory-mapped write
   4094 		 * cycle can cause the chip to reset before completing the
   4095 		 * write cycle.  This causes major headache that can be
   4096 		 * avoided by issuing the reset via indirect register writes
   4097 		 * through I/O space.
   4098 		 *
   4099 		 * So, if we successfully mapped the I/O BAR at attach time,
   4100 		 * use that.  Otherwise, try our luck with a memory-mapped
   4101 		 * reset.
   4102 		 */
   4103 		if (sc->sc_flags & WM_F_IOH_VALID)
   4104 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4105 		else
   4106 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4107 		break;
   4108 	case WM_T_82545_3:
   4109 	case WM_T_82546_3:
   4110 		/* Use the shadow control register on these chips. */
   4111 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4112 		break;
   4113 	case WM_T_80003:
   4114 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4115 		sc->phy.acquire(sc);
   4116 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4117 		sc->phy.release(sc);
   4118 		break;
   4119 	case WM_T_ICH8:
   4120 	case WM_T_ICH9:
   4121 	case WM_T_ICH10:
   4122 	case WM_T_PCH:
   4123 	case WM_T_PCH2:
   4124 	case WM_T_PCH_LPT:
   4125 	case WM_T_PCH_SPT:
   4126 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4127 		if (wm_phy_resetisblocked(sc) == false) {
   4128 			/*
   4129 			 * Gate automatic PHY configuration by hardware on
   4130 			 * non-managed 82579
   4131 			 */
   4132 			if ((sc->sc_type == WM_T_PCH2)
   4133 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4134 				== 0))
   4135 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4136 
   4137 			reg |= CTRL_PHY_RESET;
   4138 			phy_reset = 1;
   4139 		} else
   4140 			printf("XXX reset is blocked!!!\n");
   4141 		sc->phy.acquire(sc);
   4142 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4143 		/* Don't insert a completion barrier when reset */
   4144 		delay(20*1000);
   4145 		mutex_exit(sc->sc_ich_phymtx);
   4146 		break;
   4147 	case WM_T_82580:
   4148 	case WM_T_I350:
   4149 	case WM_T_I354:
   4150 	case WM_T_I210:
   4151 	case WM_T_I211:
   4152 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4153 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4154 			CSR_WRITE_FLUSH(sc);
   4155 		delay(5000);
   4156 		break;
   4157 	case WM_T_82542_2_0:
   4158 	case WM_T_82542_2_1:
   4159 	case WM_T_82543:
   4160 	case WM_T_82540:
   4161 	case WM_T_82545:
   4162 	case WM_T_82546:
   4163 	case WM_T_82571:
   4164 	case WM_T_82572:
   4165 	case WM_T_82573:
   4166 	case WM_T_82574:
   4167 	case WM_T_82575:
   4168 	case WM_T_82576:
   4169 	case WM_T_82583:
   4170 	default:
   4171 		/* Everything else can safely use the documented method. */
   4172 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4173 		break;
   4174 	}
   4175 
   4176 	/* Must release the MDIO ownership after MAC reset */
   4177 	switch (sc->sc_type) {
   4178 	case WM_T_82573:
   4179 	case WM_T_82574:
   4180 	case WM_T_82583:
   4181 		if (error == 0)
   4182 			wm_put_hw_semaphore_82573(sc);
   4183 		break;
   4184 	default:
   4185 		break;
   4186 	}
   4187 
   4188 	if (phy_reset != 0)
   4189 		wm_get_cfg_done(sc);
   4190 
   4191 	/* reload EEPROM */
   4192 	switch (sc->sc_type) {
   4193 	case WM_T_82542_2_0:
   4194 	case WM_T_82542_2_1:
   4195 	case WM_T_82543:
   4196 	case WM_T_82544:
   4197 		delay(10);
   4198 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4199 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4200 		CSR_WRITE_FLUSH(sc);
   4201 		delay(2000);
   4202 		break;
   4203 	case WM_T_82540:
   4204 	case WM_T_82545:
   4205 	case WM_T_82545_3:
   4206 	case WM_T_82546:
   4207 	case WM_T_82546_3:
   4208 		delay(5*1000);
   4209 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4210 		break;
   4211 	case WM_T_82541:
   4212 	case WM_T_82541_2:
   4213 	case WM_T_82547:
   4214 	case WM_T_82547_2:
   4215 		delay(20000);
   4216 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4217 		break;
   4218 	case WM_T_82571:
   4219 	case WM_T_82572:
   4220 	case WM_T_82573:
   4221 	case WM_T_82574:
   4222 	case WM_T_82583:
   4223 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4224 			delay(10);
   4225 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4226 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4227 			CSR_WRITE_FLUSH(sc);
   4228 		}
   4229 		/* check EECD_EE_AUTORD */
   4230 		wm_get_auto_rd_done(sc);
   4231 		/*
   4232 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4233 		 * is set.
   4234 		 */
   4235 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4236 		    || (sc->sc_type == WM_T_82583))
   4237 			delay(25*1000);
   4238 		break;
   4239 	case WM_T_82575:
   4240 	case WM_T_82576:
   4241 	case WM_T_82580:
   4242 	case WM_T_I350:
   4243 	case WM_T_I354:
   4244 	case WM_T_I210:
   4245 	case WM_T_I211:
   4246 	case WM_T_80003:
   4247 		/* check EECD_EE_AUTORD */
   4248 		wm_get_auto_rd_done(sc);
   4249 		break;
   4250 	case WM_T_ICH8:
   4251 	case WM_T_ICH9:
   4252 	case WM_T_ICH10:
   4253 	case WM_T_PCH:
   4254 	case WM_T_PCH2:
   4255 	case WM_T_PCH_LPT:
   4256 	case WM_T_PCH_SPT:
   4257 		break;
   4258 	default:
   4259 		panic("%s: unknown type\n", __func__);
   4260 	}
   4261 
   4262 	/* Check whether EEPROM is present or not */
   4263 	switch (sc->sc_type) {
   4264 	case WM_T_82575:
   4265 	case WM_T_82576:
   4266 	case WM_T_82580:
   4267 	case WM_T_I350:
   4268 	case WM_T_I354:
   4269 	case WM_T_ICH8:
   4270 	case WM_T_ICH9:
   4271 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4272 			/* Not found */
   4273 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4274 			if (sc->sc_type == WM_T_82575)
   4275 				wm_reset_init_script_82575(sc);
   4276 		}
   4277 		break;
   4278 	default:
   4279 		break;
   4280 	}
   4281 
   4282 	if ((sc->sc_type == WM_T_82580)
   4283 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4284 		/* clear global device reset status bit */
   4285 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4286 	}
   4287 
   4288 	/* Clear any pending interrupt events. */
   4289 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4290 	reg = CSR_READ(sc, WMREG_ICR);
   4291 	if (sc->sc_nintrs > 1) {
   4292 		if (sc->sc_type != WM_T_82574) {
   4293 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4294 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4295 		} else
   4296 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4297 	}
   4298 
   4299 	/* reload sc_ctrl */
   4300 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4301 
   4302 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4303 		wm_set_eee_i350(sc);
   4304 
   4305 	/* Clear the host wakeup bit after lcd reset */
   4306 	if (sc->sc_type >= WM_T_PCH) {
   4307 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4308 		    BM_PORT_GEN_CFG);
   4309 		reg &= ~BM_WUC_HOST_WU_BIT;
   4310 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4311 		    BM_PORT_GEN_CFG, reg);
   4312 	}
   4313 
   4314 	/*
   4315 	 * For PCH, this write will make sure that any noise will be detected
   4316 	 * as a CRC error and be dropped rather than show up as a bad packet
   4317 	 * to the DMA engine
   4318 	 */
   4319 	if (sc->sc_type == WM_T_PCH)
   4320 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4321 
   4322 	if (sc->sc_type >= WM_T_82544)
   4323 		CSR_WRITE(sc, WMREG_WUC, 0);
   4324 
   4325 	wm_reset_mdicnfg_82580(sc);
   4326 
   4327 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4328 		wm_pll_workaround_i210(sc);
   4329 }
   4330 
   4331 /*
   4332  * wm_add_rxbuf:
   4333  *
   4334  *	Add a receive buffer to the indiciated descriptor.
   4335  */
   4336 static int
   4337 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4338 {
   4339 	struct wm_softc *sc = rxq->rxq_sc;
   4340 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4341 	struct mbuf *m;
   4342 	int error;
   4343 
   4344 	KASSERT(mutex_owned(rxq->rxq_lock));
   4345 
   4346 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4347 	if (m == NULL)
   4348 		return ENOBUFS;
   4349 
   4350 	MCLGET(m, M_DONTWAIT);
   4351 	if ((m->m_flags & M_EXT) == 0) {
   4352 		m_freem(m);
   4353 		return ENOBUFS;
   4354 	}
   4355 
   4356 	if (rxs->rxs_mbuf != NULL)
   4357 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4358 
   4359 	rxs->rxs_mbuf = m;
   4360 
   4361 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4362 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4363 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4364 	if (error) {
   4365 		/* XXX XXX XXX */
   4366 		aprint_error_dev(sc->sc_dev,
   4367 		    "unable to load rx DMA map %d, error = %d\n",
   4368 		    idx, error);
   4369 		panic("wm_add_rxbuf");
   4370 	}
   4371 
   4372 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4373 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4374 
   4375 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4376 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4377 			wm_init_rxdesc(rxq, idx);
   4378 	} else
   4379 		wm_init_rxdesc(rxq, idx);
   4380 
   4381 	return 0;
   4382 }
   4383 
   4384 /*
   4385  * wm_rxdrain:
   4386  *
   4387  *	Drain the receive queue.
   4388  */
   4389 static void
   4390 wm_rxdrain(struct wm_rxqueue *rxq)
   4391 {
   4392 	struct wm_softc *sc = rxq->rxq_sc;
   4393 	struct wm_rxsoft *rxs;
   4394 	int i;
   4395 
   4396 	KASSERT(mutex_owned(rxq->rxq_lock));
   4397 
   4398 	for (i = 0; i < WM_NRXDESC; i++) {
   4399 		rxs = &rxq->rxq_soft[i];
   4400 		if (rxs->rxs_mbuf != NULL) {
   4401 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4402 			m_freem(rxs->rxs_mbuf);
   4403 			rxs->rxs_mbuf = NULL;
   4404 		}
   4405 	}
   4406 }
   4407 
   4408 
   4409 /*
   4410  * XXX copy from FreeBSD's sys/net/rss_config.c
   4411  */
   4412 /*
   4413  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4414  * effectiveness may be limited by algorithm choice and available entropy
   4415  * during the boot.
   4416  *
   4417  * XXXRW: And that we don't randomize it yet!
   4418  *
   4419  * This is the default Microsoft RSS specification key which is also
   4420  * the Chelsio T5 firmware default key.
   4421  */
   4422 #define RSS_KEYSIZE 40
   4423 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4424 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4425 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4426 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4427 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4428 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4429 };
   4430 
   4431 /*
   4432  * Caller must pass an array of size sizeof(rss_key).
   4433  *
   4434  * XXX
   4435  * As if_ixgbe may use this function, this function should not be
   4436  * if_wm specific function.
   4437  */
   4438 static void
   4439 wm_rss_getkey(uint8_t *key)
   4440 {
   4441 
   4442 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4443 }
   4444 
   4445 /*
   4446  * Setup registers for RSS.
   4447  *
   4448  * XXX not yet VMDq support
   4449  */
   4450 static void
   4451 wm_init_rss(struct wm_softc *sc)
   4452 {
   4453 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4454 	int i;
   4455 
   4456 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4457 
   4458 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4459 		int qid, reta_ent;
   4460 
   4461 		qid  = i % sc->sc_nqueues;
   4462 		switch(sc->sc_type) {
   4463 		case WM_T_82574:
   4464 			reta_ent = __SHIFTIN(qid,
   4465 			    RETA_ENT_QINDEX_MASK_82574);
   4466 			break;
   4467 		case WM_T_82575:
   4468 			reta_ent = __SHIFTIN(qid,
   4469 			    RETA_ENT_QINDEX1_MASK_82575);
   4470 			break;
   4471 		default:
   4472 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4473 			break;
   4474 		}
   4475 
   4476 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4477 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4478 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4479 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4480 	}
   4481 
   4482 	wm_rss_getkey((uint8_t *)rss_key);
   4483 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4484 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4485 
   4486 	if (sc->sc_type == WM_T_82574)
   4487 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4488 	else
   4489 		mrqc = MRQC_ENABLE_RSS_MQ;
   4490 
   4491 	/*
   4492 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4493 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4494 	 */
   4495 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4496 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4497 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4498 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4499 
   4500 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4501 }
   4502 
   4503 /*
   4504  * Adjust TX and RX queue numbers which the system actulally uses.
   4505  *
   4506  * The numbers are affected by below parameters.
   4507  *     - The nubmer of hardware queues
   4508  *     - The number of MSI-X vectors (= "nvectors" argument)
   4509  *     - ncpu
   4510  */
   4511 static void
   4512 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4513 {
   4514 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4515 
   4516 	if (nvectors < 2) {
   4517 		sc->sc_nqueues = 1;
   4518 		return;
   4519 	}
   4520 
   4521 	switch(sc->sc_type) {
   4522 	case WM_T_82572:
   4523 		hw_ntxqueues = 2;
   4524 		hw_nrxqueues = 2;
   4525 		break;
   4526 	case WM_T_82574:
   4527 		hw_ntxqueues = 2;
   4528 		hw_nrxqueues = 2;
   4529 		break;
   4530 	case WM_T_82575:
   4531 		hw_ntxqueues = 4;
   4532 		hw_nrxqueues = 4;
   4533 		break;
   4534 	case WM_T_82576:
   4535 		hw_ntxqueues = 16;
   4536 		hw_nrxqueues = 16;
   4537 		break;
   4538 	case WM_T_82580:
   4539 	case WM_T_I350:
   4540 	case WM_T_I354:
   4541 		hw_ntxqueues = 8;
   4542 		hw_nrxqueues = 8;
   4543 		break;
   4544 	case WM_T_I210:
   4545 		hw_ntxqueues = 4;
   4546 		hw_nrxqueues = 4;
   4547 		break;
   4548 	case WM_T_I211:
   4549 		hw_ntxqueues = 2;
   4550 		hw_nrxqueues = 2;
   4551 		break;
   4552 		/*
   4553 		 * As below ethernet controllers does not support MSI-X,
   4554 		 * this driver let them not use multiqueue.
   4555 		 *     - WM_T_80003
   4556 		 *     - WM_T_ICH8
   4557 		 *     - WM_T_ICH9
   4558 		 *     - WM_T_ICH10
   4559 		 *     - WM_T_PCH
   4560 		 *     - WM_T_PCH2
   4561 		 *     - WM_T_PCH_LPT
   4562 		 */
   4563 	default:
   4564 		hw_ntxqueues = 1;
   4565 		hw_nrxqueues = 1;
   4566 		break;
   4567 	}
   4568 
   4569 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4570 
   4571 	/*
   4572 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4573 	 * the number of queues used actually.
   4574 	 */
   4575 	if (nvectors < hw_nqueues + 1) {
   4576 		sc->sc_nqueues = nvectors - 1;
   4577 	} else {
   4578 		sc->sc_nqueues = hw_nqueues;
   4579 	}
   4580 
   4581 	/*
   4582 	 * As queues more then cpus cannot improve scaling, we limit
   4583 	 * the number of queues used actually.
   4584 	 */
   4585 	if (ncpu < sc->sc_nqueues)
   4586 		sc->sc_nqueues = ncpu;
   4587 }
   4588 
   4589 static int
   4590 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4591 {
   4592 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4593 	wmq->wmq_id = qidx;
   4594 	wmq->wmq_intr_idx = intr_idx;
   4595 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4596 #ifdef WM_MPSAFE
   4597 	    | SOFTINT_MPSAFE
   4598 #endif
   4599 	    , wm_handle_queue, wmq);
   4600 	if (wmq->wmq_si != NULL)
   4601 		return 0;
   4602 
   4603 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4604 	    wmq->wmq_id);
   4605 
   4606 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4607 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4608 	return ENOMEM;
   4609 }
   4610 
   4611 /*
   4612  * Both single interrupt MSI and INTx can use this function.
   4613  */
   4614 static int
   4615 wm_setup_legacy(struct wm_softc *sc)
   4616 {
   4617 	pci_chipset_tag_t pc = sc->sc_pc;
   4618 	const char *intrstr = NULL;
   4619 	char intrbuf[PCI_INTRSTR_LEN];
   4620 	int error;
   4621 
   4622 	error = wm_alloc_txrx_queues(sc);
   4623 	if (error) {
   4624 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4625 		    error);
   4626 		return ENOMEM;
   4627 	}
   4628 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4629 	    sizeof(intrbuf));
   4630 #ifdef WM_MPSAFE
   4631 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4632 #endif
   4633 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4634 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4635 	if (sc->sc_ihs[0] == NULL) {
   4636 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4637 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4638 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4639 		return ENOMEM;
   4640 	}
   4641 
   4642 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4643 	sc->sc_nintrs = 1;
   4644 
   4645 	return wm_softint_establish(sc, 0, 0);
   4646 }
   4647 
   4648 static int
   4649 wm_setup_msix(struct wm_softc *sc)
   4650 {
   4651 	void *vih;
   4652 	kcpuset_t *affinity;
   4653 	int qidx, error, intr_idx, txrx_established;
   4654 	pci_chipset_tag_t pc = sc->sc_pc;
   4655 	const char *intrstr = NULL;
   4656 	char intrbuf[PCI_INTRSTR_LEN];
   4657 	char intr_xname[INTRDEVNAMEBUF];
   4658 
   4659 	if (sc->sc_nqueues < ncpu) {
   4660 		/*
   4661 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4662 		 * interrupts start from CPU#1.
   4663 		 */
   4664 		sc->sc_affinity_offset = 1;
   4665 	} else {
   4666 		/*
   4667 		 * In this case, this device use all CPUs. So, we unify
   4668 		 * affinitied cpu_index to msix vector number for readability.
   4669 		 */
   4670 		sc->sc_affinity_offset = 0;
   4671 	}
   4672 
   4673 	error = wm_alloc_txrx_queues(sc);
   4674 	if (error) {
   4675 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4676 		    error);
   4677 		return ENOMEM;
   4678 	}
   4679 
   4680 	kcpuset_create(&affinity, false);
   4681 	intr_idx = 0;
   4682 
   4683 	/*
   4684 	 * TX and RX
   4685 	 */
   4686 	txrx_established = 0;
   4687 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4688 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4689 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4690 
   4691 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4692 		    sizeof(intrbuf));
   4693 #ifdef WM_MPSAFE
   4694 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4695 		    PCI_INTR_MPSAFE, true);
   4696 #endif
   4697 		memset(intr_xname, 0, sizeof(intr_xname));
   4698 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4699 		    device_xname(sc->sc_dev), qidx);
   4700 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4701 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4702 		if (vih == NULL) {
   4703 			aprint_error_dev(sc->sc_dev,
   4704 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4705 			    intrstr ? " at " : "",
   4706 			    intrstr ? intrstr : "");
   4707 
   4708 			goto fail;
   4709 		}
   4710 		kcpuset_zero(affinity);
   4711 		/* Round-robin affinity */
   4712 		kcpuset_set(affinity, affinity_to);
   4713 		error = interrupt_distribute(vih, affinity, NULL);
   4714 		if (error == 0) {
   4715 			aprint_normal_dev(sc->sc_dev,
   4716 			    "for TX and RX interrupting at %s affinity to %u\n",
   4717 			    intrstr, affinity_to);
   4718 		} else {
   4719 			aprint_normal_dev(sc->sc_dev,
   4720 			    "for TX and RX interrupting at %s\n", intrstr);
   4721 		}
   4722 		sc->sc_ihs[intr_idx] = vih;
   4723 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   4724 			goto fail;
   4725 		txrx_established++;
   4726 		intr_idx++;
   4727 	}
   4728 
   4729 	/*
   4730 	 * LINK
   4731 	 */
   4732 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4733 	    sizeof(intrbuf));
   4734 #ifdef WM_MPSAFE
   4735 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4736 #endif
   4737 	memset(intr_xname, 0, sizeof(intr_xname));
   4738 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4739 	    device_xname(sc->sc_dev));
   4740 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4741 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4742 	if (vih == NULL) {
   4743 		aprint_error_dev(sc->sc_dev,
   4744 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4745 		    intrstr ? " at " : "",
   4746 		    intrstr ? intrstr : "");
   4747 
   4748 		goto fail;
   4749 	}
   4750 	/* keep default affinity to LINK interrupt */
   4751 	aprint_normal_dev(sc->sc_dev,
   4752 	    "for LINK interrupting at %s\n", intrstr);
   4753 	sc->sc_ihs[intr_idx] = vih;
   4754 	sc->sc_link_intr_idx = intr_idx;
   4755 
   4756 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4757 	kcpuset_destroy(affinity);
   4758 	return 0;
   4759 
   4760  fail:
   4761 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4762 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4763 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4764 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4765 	}
   4766 
   4767 	kcpuset_destroy(affinity);
   4768 	return ENOMEM;
   4769 }
   4770 
   4771 static void
   4772 wm_turnon(struct wm_softc *sc)
   4773 {
   4774 	int i;
   4775 
   4776 	KASSERT(WM_CORE_LOCKED(sc));
   4777 
   4778 	/*
   4779 	 * must unset stopping flags in ascending order.
   4780 	 */
   4781 	for(i = 0; i < sc->sc_nqueues; i++) {
   4782 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4783 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4784 
   4785 		mutex_enter(txq->txq_lock);
   4786 		txq->txq_stopping = false;
   4787 		mutex_exit(txq->txq_lock);
   4788 
   4789 		mutex_enter(rxq->rxq_lock);
   4790 		rxq->rxq_stopping = false;
   4791 		mutex_exit(rxq->rxq_lock);
   4792 	}
   4793 
   4794 	sc->sc_core_stopping = false;
   4795 }
   4796 
   4797 static void
   4798 wm_turnoff(struct wm_softc *sc)
   4799 {
   4800 	int i;
   4801 
   4802 	KASSERT(WM_CORE_LOCKED(sc));
   4803 
   4804 	sc->sc_core_stopping = true;
   4805 
   4806 	/*
   4807 	 * must set stopping flags in ascending order.
   4808 	 */
   4809 	for(i = 0; i < sc->sc_nqueues; i++) {
   4810 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4811 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4812 
   4813 		mutex_enter(rxq->rxq_lock);
   4814 		rxq->rxq_stopping = true;
   4815 		mutex_exit(rxq->rxq_lock);
   4816 
   4817 		mutex_enter(txq->txq_lock);
   4818 		txq->txq_stopping = true;
   4819 		mutex_exit(txq->txq_lock);
   4820 	}
   4821 }
   4822 
   4823 /*
   4824  * write interrupt interval value to ITR or EITR
   4825  */
   4826 static void
   4827 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   4828 {
   4829 
   4830 	if (!wmq->wmq_set_itr)
   4831 		return;
   4832 
   4833 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4834 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   4835 
   4836 		/*
   4837 		 * 82575 doesn't have CNT_INGR field.
   4838 		 * So, overwrite counter field by software.
   4839 		 */
   4840 		if (sc->sc_type == WM_T_82575)
   4841 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   4842 		else
   4843 			eitr |= EITR_CNT_INGR;
   4844 
   4845 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   4846 	} else if (sc->sc_type == WM_T_82574 && sc->sc_nintrs > 1) {
   4847 		/*
   4848 		 * 82574 has both ITR and EITR. SET EITR when we use
   4849 		 * the multi queue function with MSI-X.
   4850 		 */
   4851 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   4852 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   4853 	} else {
   4854 		KASSERT(wmq->wmq_id == 0);
   4855 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   4856 	}
   4857 
   4858 	wmq->wmq_set_itr = false;
   4859 }
   4860 
   4861 /*
   4862  * TODO
   4863  * Below dynamic calculation of itr is almost the same as linux igb,
   4864  * however it does not fit to wm(4). So, we will have been disable AIM
   4865  * until we will find appropriate calculation of itr.
   4866  */
   4867 /*
   4868  * calculate interrupt interval value to be going to write register in
   4869  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   4870  */
   4871 static void
   4872 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   4873 {
   4874 #ifdef NOTYET
   4875 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   4876 	struct wm_txqueue *txq = &wmq->wmq_txq;
   4877 	uint32_t avg_size = 0;
   4878 	uint32_t new_itr;
   4879 
   4880 	if (rxq->rxq_packets)
   4881 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   4882 	if (txq->txq_packets)
   4883 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   4884 
   4885 	if (avg_size == 0) {
   4886 		new_itr = 450; /* restore default value */
   4887 		goto out;
   4888 	}
   4889 
   4890 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   4891 	avg_size += 24;
   4892 
   4893 	/* Don't starve jumbo frames */
   4894 	avg_size = min(avg_size, 3000);
   4895 
   4896 	/* Give a little boost to mid-size frames */
   4897 	if ((avg_size > 300) && (avg_size < 1200))
   4898 		new_itr = avg_size / 3;
   4899 	else
   4900 		new_itr = avg_size / 2;
   4901 
   4902 out:
   4903 	/*
   4904 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   4905 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   4906 	 */
   4907 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   4908 		new_itr *= 4;
   4909 
   4910 	if (new_itr != wmq->wmq_itr) {
   4911 		wmq->wmq_itr = new_itr;
   4912 		wmq->wmq_set_itr = true;
   4913 	} else
   4914 		wmq->wmq_set_itr = false;
   4915 
   4916 	rxq->rxq_packets = 0;
   4917 	rxq->rxq_bytes = 0;
   4918 	txq->txq_packets = 0;
   4919 	txq->txq_bytes = 0;
   4920 #endif
   4921 }
   4922 
   4923 /*
   4924  * wm_init:		[ifnet interface function]
   4925  *
   4926  *	Initialize the interface.
   4927  */
   4928 static int
   4929 wm_init(struct ifnet *ifp)
   4930 {
   4931 	struct wm_softc *sc = ifp->if_softc;
   4932 	int ret;
   4933 
   4934 	WM_CORE_LOCK(sc);
   4935 	ret = wm_init_locked(ifp);
   4936 	WM_CORE_UNLOCK(sc);
   4937 
   4938 	return ret;
   4939 }
   4940 
   4941 static int
   4942 wm_init_locked(struct ifnet *ifp)
   4943 {
   4944 	struct wm_softc *sc = ifp->if_softc;
   4945 	int i, j, trynum, error = 0;
   4946 	uint32_t reg;
   4947 
   4948 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4949 		device_xname(sc->sc_dev), __func__));
   4950 	KASSERT(WM_CORE_LOCKED(sc));
   4951 
   4952 	/*
   4953 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4954 	 * There is a small but measurable benefit to avoiding the adjusment
   4955 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4956 	 * on such platforms.  One possibility is that the DMA itself is
   4957 	 * slightly more efficient if the front of the entire packet (instead
   4958 	 * of the front of the headers) is aligned.
   4959 	 *
   4960 	 * Note we must always set align_tweak to 0 if we are using
   4961 	 * jumbo frames.
   4962 	 */
   4963 #ifdef __NO_STRICT_ALIGNMENT
   4964 	sc->sc_align_tweak = 0;
   4965 #else
   4966 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4967 		sc->sc_align_tweak = 0;
   4968 	else
   4969 		sc->sc_align_tweak = 2;
   4970 #endif /* __NO_STRICT_ALIGNMENT */
   4971 
   4972 	/* Cancel any pending I/O. */
   4973 	wm_stop_locked(ifp, 0);
   4974 
   4975 	/* update statistics before reset */
   4976 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4977 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4978 
   4979 	/* PCH_SPT hardware workaround */
   4980 	if (sc->sc_type == WM_T_PCH_SPT)
   4981 		wm_flush_desc_rings(sc);
   4982 
   4983 	/* Reset the chip to a known state. */
   4984 	wm_reset(sc);
   4985 
   4986 	/* AMT based hardware can now take control from firmware */
   4987 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4988 		wm_get_hw_control(sc);
   4989 
   4990 	/* Init hardware bits */
   4991 	wm_initialize_hardware_bits(sc);
   4992 
   4993 	/* Reset the PHY. */
   4994 	if (sc->sc_flags & WM_F_HAS_MII)
   4995 		wm_gmii_reset(sc);
   4996 
   4997 	/* Calculate (E)ITR value */
   4998 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   4999 		/*
   5000 		 * For NEWQUEUE's EITR (except for 82575).
   5001 		 * 82575's EITR should be set same throttling value as other
   5002 		 * old controllers' ITR because the interrupt/sec calculation
   5003 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5004 		 *
   5005 		 * 82574's EITR should be set same throttling value as ITR.
   5006 		 *
   5007 		 * For N interrupts/sec, set this value to:
   5008 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5009 		 */
   5010 		sc->sc_itr_init = 450;
   5011 	} else if (sc->sc_type >= WM_T_82543) {
   5012 		/*
   5013 		 * Set up the interrupt throttling register (units of 256ns)
   5014 		 * Note that a footnote in Intel's documentation says this
   5015 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5016 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5017 		 * that that is also true for the 1024ns units of the other
   5018 		 * interrupt-related timer registers -- so, really, we ought
   5019 		 * to divide this value by 4 when the link speed is low.
   5020 		 *
   5021 		 * XXX implement this division at link speed change!
   5022 		 */
   5023 
   5024 		/*
   5025 		 * For N interrupts/sec, set this value to:
   5026 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5027 		 * absolute and packet timer values to this value
   5028 		 * divided by 4 to get "simple timer" behavior.
   5029 		 */
   5030 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5031 	}
   5032 
   5033 	error = wm_init_txrx_queues(sc);
   5034 	if (error)
   5035 		goto out;
   5036 
   5037 	/*
   5038 	 * Clear out the VLAN table -- we don't use it (yet).
   5039 	 */
   5040 	CSR_WRITE(sc, WMREG_VET, 0);
   5041 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5042 		trynum = 10; /* Due to hw errata */
   5043 	else
   5044 		trynum = 1;
   5045 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5046 		for (j = 0; j < trynum; j++)
   5047 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5048 
   5049 	/*
   5050 	 * Set up flow-control parameters.
   5051 	 *
   5052 	 * XXX Values could probably stand some tuning.
   5053 	 */
   5054 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5055 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5056 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5057 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5058 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5059 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5060 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5061 	}
   5062 
   5063 	sc->sc_fcrtl = FCRTL_DFLT;
   5064 	if (sc->sc_type < WM_T_82543) {
   5065 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5066 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5067 	} else {
   5068 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5069 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5070 	}
   5071 
   5072 	if (sc->sc_type == WM_T_80003)
   5073 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5074 	else
   5075 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5076 
   5077 	/* Writes the control register. */
   5078 	wm_set_vlan(sc);
   5079 
   5080 	if (sc->sc_flags & WM_F_HAS_MII) {
   5081 		int val;
   5082 
   5083 		switch (sc->sc_type) {
   5084 		case WM_T_80003:
   5085 		case WM_T_ICH8:
   5086 		case WM_T_ICH9:
   5087 		case WM_T_ICH10:
   5088 		case WM_T_PCH:
   5089 		case WM_T_PCH2:
   5090 		case WM_T_PCH_LPT:
   5091 		case WM_T_PCH_SPT:
   5092 			/*
   5093 			 * Set the mac to wait the maximum time between each
   5094 			 * iteration and increase the max iterations when
   5095 			 * polling the phy; this fixes erroneous timeouts at
   5096 			 * 10Mbps.
   5097 			 */
   5098 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5099 			    0xFFFF);
   5100 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   5101 			val |= 0x3F;
   5102 			wm_kmrn_writereg(sc,
   5103 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   5104 			break;
   5105 		default:
   5106 			break;
   5107 		}
   5108 
   5109 		if (sc->sc_type == WM_T_80003) {
   5110 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   5111 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   5112 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   5113 
   5114 			/* Bypass RX and TX FIFO's */
   5115 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5116 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5117 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5118 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5119 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5120 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5121 		}
   5122 	}
   5123 #if 0
   5124 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5125 #endif
   5126 
   5127 	/* Set up checksum offload parameters. */
   5128 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5129 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5130 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5131 		reg |= RXCSUM_IPOFL;
   5132 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5133 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5134 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5135 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5136 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5137 
   5138 	/* Set up MSI-X */
   5139 	if (sc->sc_nintrs > 1) {
   5140 		uint32_t ivar;
   5141 		struct wm_queue *wmq;
   5142 		int qid, qintr_idx;
   5143 
   5144 		if (sc->sc_type == WM_T_82575) {
   5145 			/* Interrupt control */
   5146 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5147 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5148 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5149 
   5150 			/* TX and RX */
   5151 			for (i = 0; i < sc->sc_nqueues; i++) {
   5152 				wmq = &sc->sc_queue[i];
   5153 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5154 				    EITR_TX_QUEUE(wmq->wmq_id)
   5155 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5156 			}
   5157 			/* Link status */
   5158 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5159 			    EITR_OTHER);
   5160 		} else if (sc->sc_type == WM_T_82574) {
   5161 			/* Interrupt control */
   5162 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5163 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5164 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5165 
   5166 			/*
   5167 			 * workaround issue with spurious interrupts
   5168 			 * in MSI-X mode.
   5169 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5170 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5171 			 */
   5172 			reg = CSR_READ(sc, WMREG_RFCTL);
   5173 			reg |= WMREG_RFCTL_ACKDIS;
   5174 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5175 
   5176 			ivar = 0;
   5177 			/* TX and RX */
   5178 			for (i = 0; i < sc->sc_nqueues; i++) {
   5179 				wmq = &sc->sc_queue[i];
   5180 				qid = wmq->wmq_id;
   5181 				qintr_idx = wmq->wmq_intr_idx;
   5182 
   5183 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5184 				    IVAR_TX_MASK_Q_82574(qid));
   5185 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5186 				    IVAR_RX_MASK_Q_82574(qid));
   5187 			}
   5188 			/* Link status */
   5189 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5190 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5191 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5192 		} else {
   5193 			/* Interrupt control */
   5194 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5195 			    | GPIE_EIAME | GPIE_PBA);
   5196 
   5197 			switch (sc->sc_type) {
   5198 			case WM_T_82580:
   5199 			case WM_T_I350:
   5200 			case WM_T_I354:
   5201 			case WM_T_I210:
   5202 			case WM_T_I211:
   5203 				/* TX and RX */
   5204 				for (i = 0; i < sc->sc_nqueues; i++) {
   5205 					wmq = &sc->sc_queue[i];
   5206 					qid = wmq->wmq_id;
   5207 					qintr_idx = wmq->wmq_intr_idx;
   5208 
   5209 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5210 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5211 					ivar |= __SHIFTIN((qintr_idx
   5212 						| IVAR_VALID),
   5213 					    IVAR_TX_MASK_Q(qid));
   5214 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5215 					ivar |= __SHIFTIN((qintr_idx
   5216 						| IVAR_VALID),
   5217 					    IVAR_RX_MASK_Q(qid));
   5218 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5219 				}
   5220 				break;
   5221 			case WM_T_82576:
   5222 				/* TX and RX */
   5223 				for (i = 0; i < sc->sc_nqueues; i++) {
   5224 					wmq = &sc->sc_queue[i];
   5225 					qid = wmq->wmq_id;
   5226 					qintr_idx = wmq->wmq_intr_idx;
   5227 
   5228 					ivar = CSR_READ(sc,
   5229 					    WMREG_IVAR_Q_82576(qid));
   5230 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5231 					ivar |= __SHIFTIN((qintr_idx
   5232 						| IVAR_VALID),
   5233 					    IVAR_TX_MASK_Q_82576(qid));
   5234 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5235 					ivar |= __SHIFTIN((qintr_idx
   5236 						| IVAR_VALID),
   5237 					    IVAR_RX_MASK_Q_82576(qid));
   5238 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5239 					    ivar);
   5240 				}
   5241 				break;
   5242 			default:
   5243 				break;
   5244 			}
   5245 
   5246 			/* Link status */
   5247 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5248 			    IVAR_MISC_OTHER);
   5249 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5250 		}
   5251 
   5252 		if (sc->sc_nqueues > 1) {
   5253 			wm_init_rss(sc);
   5254 
   5255 			/*
   5256 			** NOTE: Receive Full-Packet Checksum Offload
   5257 			** is mutually exclusive with Multiqueue. However
   5258 			** this is not the same as TCP/IP checksums which
   5259 			** still work.
   5260 			*/
   5261 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5262 			reg |= RXCSUM_PCSD;
   5263 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5264 		}
   5265 	}
   5266 
   5267 	/* Set up the interrupt registers. */
   5268 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5269 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5270 	    ICR_RXO | ICR_RXT0;
   5271 	if (sc->sc_nintrs > 1) {
   5272 		uint32_t mask;
   5273 		struct wm_queue *wmq;
   5274 
   5275 		switch (sc->sc_type) {
   5276 		case WM_T_82574:
   5277 			mask = 0;
   5278 			for (i = 0; i < sc->sc_nqueues; i++) {
   5279 				wmq = &sc->sc_queue[i];
   5280 				mask |= ICR_TXQ(wmq->wmq_id);
   5281 				mask |= ICR_RXQ(wmq->wmq_id);
   5282 			}
   5283 			mask |= ICR_OTHER;
   5284 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5285 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5286 			break;
   5287 		default:
   5288 			if (sc->sc_type == WM_T_82575) {
   5289 				mask = 0;
   5290 				for (i = 0; i < sc->sc_nqueues; i++) {
   5291 					wmq = &sc->sc_queue[i];
   5292 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5293 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5294 				}
   5295 				mask |= EITR_OTHER;
   5296 			} else {
   5297 				mask = 0;
   5298 				for (i = 0; i < sc->sc_nqueues; i++) {
   5299 					wmq = &sc->sc_queue[i];
   5300 					mask |= 1 << wmq->wmq_intr_idx;
   5301 				}
   5302 				mask |= 1 << sc->sc_link_intr_idx;
   5303 			}
   5304 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5305 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5306 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5307 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5308 			break;
   5309 		}
   5310 	} else
   5311 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5312 
   5313 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5314 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5315 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5316 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   5317 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5318 		reg |= KABGTXD_BGSQLBIAS;
   5319 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5320 	}
   5321 
   5322 	/* Set up the inter-packet gap. */
   5323 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5324 
   5325 	if (sc->sc_type >= WM_T_82543) {
   5326 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5327 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5328 			wm_itrs_writereg(sc, wmq);
   5329 		}
   5330 		/*
   5331 		 * Link interrupts occur much less than TX
   5332 		 * interrupts and RX interrupts. So, we don't
   5333 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5334 		 * FreeBSD's if_igb.
   5335 		 */
   5336 	}
   5337 
   5338 	/* Set the VLAN ethernetype. */
   5339 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5340 
   5341 	/*
   5342 	 * Set up the transmit control register; we start out with
   5343 	 * a collision distance suitable for FDX, but update it whe
   5344 	 * we resolve the media type.
   5345 	 */
   5346 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5347 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5348 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5349 	if (sc->sc_type >= WM_T_82571)
   5350 		sc->sc_tctl |= TCTL_MULR;
   5351 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5352 
   5353 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5354 		/* Write TDT after TCTL.EN is set. See the document. */
   5355 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5356 	}
   5357 
   5358 	if (sc->sc_type == WM_T_80003) {
   5359 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5360 		reg &= ~TCTL_EXT_GCEX_MASK;
   5361 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5362 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5363 	}
   5364 
   5365 	/* Set the media. */
   5366 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5367 		goto out;
   5368 
   5369 	/* Configure for OS presence */
   5370 	wm_init_manageability(sc);
   5371 
   5372 	/*
   5373 	 * Set up the receive control register; we actually program
   5374 	 * the register when we set the receive filter.  Use multicast
   5375 	 * address offset type 0.
   5376 	 *
   5377 	 * Only the i82544 has the ability to strip the incoming
   5378 	 * CRC, so we don't enable that feature.
   5379 	 */
   5380 	sc->sc_mchash_type = 0;
   5381 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5382 	    | RCTL_MO(sc->sc_mchash_type);
   5383 
   5384 	/*
   5385 	 * 82574 use one buffer extended Rx descriptor.
   5386 	 */
   5387 	if (sc->sc_type == WM_T_82574)
   5388 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5389 
   5390 	/*
   5391 	 * The I350 has a bug where it always strips the CRC whether
   5392 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5393 	 */
   5394 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5395 	    || (sc->sc_type == WM_T_I210))
   5396 		sc->sc_rctl |= RCTL_SECRC;
   5397 
   5398 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5399 	    && (ifp->if_mtu > ETHERMTU)) {
   5400 		sc->sc_rctl |= RCTL_LPE;
   5401 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5402 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5403 	}
   5404 
   5405 	if (MCLBYTES == 2048) {
   5406 		sc->sc_rctl |= RCTL_2k;
   5407 	} else {
   5408 		if (sc->sc_type >= WM_T_82543) {
   5409 			switch (MCLBYTES) {
   5410 			case 4096:
   5411 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5412 				break;
   5413 			case 8192:
   5414 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5415 				break;
   5416 			case 16384:
   5417 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5418 				break;
   5419 			default:
   5420 				panic("wm_init: MCLBYTES %d unsupported",
   5421 				    MCLBYTES);
   5422 				break;
   5423 			}
   5424 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5425 	}
   5426 
   5427 	/* Set the receive filter. */
   5428 	wm_set_filter(sc);
   5429 
   5430 	/* Enable ECC */
   5431 	switch (sc->sc_type) {
   5432 	case WM_T_82571:
   5433 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5434 		reg |= PBA_ECC_CORR_EN;
   5435 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5436 		break;
   5437 	case WM_T_PCH_LPT:
   5438 	case WM_T_PCH_SPT:
   5439 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5440 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5441 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5442 
   5443 		sc->sc_ctrl |= CTRL_MEHE;
   5444 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5445 		break;
   5446 	default:
   5447 		break;
   5448 	}
   5449 
   5450 	/* On 575 and later set RDT only if RX enabled */
   5451 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5452 		int qidx;
   5453 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5454 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5455 			for (i = 0; i < WM_NRXDESC; i++) {
   5456 				mutex_enter(rxq->rxq_lock);
   5457 				wm_init_rxdesc(rxq, i);
   5458 				mutex_exit(rxq->rxq_lock);
   5459 
   5460 			}
   5461 		}
   5462 	}
   5463 
   5464 	wm_turnon(sc);
   5465 
   5466 	/* Start the one second link check clock. */
   5467 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5468 
   5469 	/* ...all done! */
   5470 	ifp->if_flags |= IFF_RUNNING;
   5471 	ifp->if_flags &= ~IFF_OACTIVE;
   5472 
   5473  out:
   5474 	sc->sc_if_flags = ifp->if_flags;
   5475 	if (error)
   5476 		log(LOG_ERR, "%s: interface not running\n",
   5477 		    device_xname(sc->sc_dev));
   5478 	return error;
   5479 }
   5480 
   5481 /*
   5482  * wm_stop:		[ifnet interface function]
   5483  *
   5484  *	Stop transmission on the interface.
   5485  */
   5486 static void
   5487 wm_stop(struct ifnet *ifp, int disable)
   5488 {
   5489 	struct wm_softc *sc = ifp->if_softc;
   5490 
   5491 	WM_CORE_LOCK(sc);
   5492 	wm_stop_locked(ifp, disable);
   5493 	WM_CORE_UNLOCK(sc);
   5494 }
   5495 
   5496 static void
   5497 wm_stop_locked(struct ifnet *ifp, int disable)
   5498 {
   5499 	struct wm_softc *sc = ifp->if_softc;
   5500 	struct wm_txsoft *txs;
   5501 	int i, qidx;
   5502 
   5503 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5504 		device_xname(sc->sc_dev), __func__));
   5505 	KASSERT(WM_CORE_LOCKED(sc));
   5506 
   5507 	wm_turnoff(sc);
   5508 
   5509 	/* Stop the one second clock. */
   5510 	callout_stop(&sc->sc_tick_ch);
   5511 
   5512 	/* Stop the 82547 Tx FIFO stall check timer. */
   5513 	if (sc->sc_type == WM_T_82547)
   5514 		callout_stop(&sc->sc_txfifo_ch);
   5515 
   5516 	if (sc->sc_flags & WM_F_HAS_MII) {
   5517 		/* Down the MII. */
   5518 		mii_down(&sc->sc_mii);
   5519 	} else {
   5520 #if 0
   5521 		/* Should we clear PHY's status properly? */
   5522 		wm_reset(sc);
   5523 #endif
   5524 	}
   5525 
   5526 	/* Stop the transmit and receive processes. */
   5527 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5528 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5529 	sc->sc_rctl &= ~RCTL_EN;
   5530 
   5531 	/*
   5532 	 * Clear the interrupt mask to ensure the device cannot assert its
   5533 	 * interrupt line.
   5534 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5535 	 * service any currently pending or shared interrupt.
   5536 	 */
   5537 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5538 	sc->sc_icr = 0;
   5539 	if (sc->sc_nintrs > 1) {
   5540 		if (sc->sc_type != WM_T_82574) {
   5541 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5542 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5543 		} else
   5544 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5545 	}
   5546 
   5547 	/* Release any queued transmit buffers. */
   5548 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5549 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5550 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5551 		mutex_enter(txq->txq_lock);
   5552 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5553 			txs = &txq->txq_soft[i];
   5554 			if (txs->txs_mbuf != NULL) {
   5555 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5556 				m_freem(txs->txs_mbuf);
   5557 				txs->txs_mbuf = NULL;
   5558 			}
   5559 		}
   5560 		mutex_exit(txq->txq_lock);
   5561 	}
   5562 
   5563 	/* Mark the interface as down and cancel the watchdog timer. */
   5564 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5565 	ifp->if_timer = 0;
   5566 
   5567 	if (disable) {
   5568 		for (i = 0; i < sc->sc_nqueues; i++) {
   5569 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5570 			mutex_enter(rxq->rxq_lock);
   5571 			wm_rxdrain(rxq);
   5572 			mutex_exit(rxq->rxq_lock);
   5573 		}
   5574 	}
   5575 
   5576 #if 0 /* notyet */
   5577 	if (sc->sc_type >= WM_T_82544)
   5578 		CSR_WRITE(sc, WMREG_WUC, 0);
   5579 #endif
   5580 }
   5581 
   5582 static void
   5583 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5584 {
   5585 	struct mbuf *m;
   5586 	int i;
   5587 
   5588 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5589 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5590 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5591 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5592 		    m->m_data, m->m_len, m->m_flags);
   5593 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5594 	    i, i == 1 ? "" : "s");
   5595 }
   5596 
   5597 /*
   5598  * wm_82547_txfifo_stall:
   5599  *
   5600  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5601  *	reset the FIFO pointers, and restart packet transmission.
   5602  */
   5603 static void
   5604 wm_82547_txfifo_stall(void *arg)
   5605 {
   5606 	struct wm_softc *sc = arg;
   5607 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5608 
   5609 	mutex_enter(txq->txq_lock);
   5610 
   5611 	if (txq->txq_stopping)
   5612 		goto out;
   5613 
   5614 	if (txq->txq_fifo_stall) {
   5615 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5616 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5617 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5618 			/*
   5619 			 * Packets have drained.  Stop transmitter, reset
   5620 			 * FIFO pointers, restart transmitter, and kick
   5621 			 * the packet queue.
   5622 			 */
   5623 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5624 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5625 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5626 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5627 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5628 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5629 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5630 			CSR_WRITE_FLUSH(sc);
   5631 
   5632 			txq->txq_fifo_head = 0;
   5633 			txq->txq_fifo_stall = 0;
   5634 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5635 		} else {
   5636 			/*
   5637 			 * Still waiting for packets to drain; try again in
   5638 			 * another tick.
   5639 			 */
   5640 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5641 		}
   5642 	}
   5643 
   5644 out:
   5645 	mutex_exit(txq->txq_lock);
   5646 }
   5647 
   5648 /*
   5649  * wm_82547_txfifo_bugchk:
   5650  *
   5651  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5652  *	prevent enqueueing a packet that would wrap around the end
   5653  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5654  *
   5655  *	We do this by checking the amount of space before the end
   5656  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5657  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5658  *	the internal FIFO pointers to the beginning, and restart
   5659  *	transmission on the interface.
   5660  */
   5661 #define	WM_FIFO_HDR		0x10
   5662 #define	WM_82547_PAD_LEN	0x3e0
   5663 static int
   5664 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5665 {
   5666 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5667 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5668 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5669 
   5670 	/* Just return if already stalled. */
   5671 	if (txq->txq_fifo_stall)
   5672 		return 1;
   5673 
   5674 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5675 		/* Stall only occurs in half-duplex mode. */
   5676 		goto send_packet;
   5677 	}
   5678 
   5679 	if (len >= WM_82547_PAD_LEN + space) {
   5680 		txq->txq_fifo_stall = 1;
   5681 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5682 		return 1;
   5683 	}
   5684 
   5685  send_packet:
   5686 	txq->txq_fifo_head += len;
   5687 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5688 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5689 
   5690 	return 0;
   5691 }
   5692 
   5693 static int
   5694 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5695 {
   5696 	int error;
   5697 
   5698 	/*
   5699 	 * Allocate the control data structures, and create and load the
   5700 	 * DMA map for it.
   5701 	 *
   5702 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5703 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5704 	 * both sets within the same 4G segment.
   5705 	 */
   5706 	if (sc->sc_type < WM_T_82544)
   5707 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5708 	else
   5709 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5710 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5711 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5712 	else
   5713 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5714 
   5715 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5716 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5717 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5718 		aprint_error_dev(sc->sc_dev,
   5719 		    "unable to allocate TX control data, error = %d\n",
   5720 		    error);
   5721 		goto fail_0;
   5722 	}
   5723 
   5724 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5725 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5726 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5727 		aprint_error_dev(sc->sc_dev,
   5728 		    "unable to map TX control data, error = %d\n", error);
   5729 		goto fail_1;
   5730 	}
   5731 
   5732 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5733 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5734 		aprint_error_dev(sc->sc_dev,
   5735 		    "unable to create TX control data DMA map, error = %d\n",
   5736 		    error);
   5737 		goto fail_2;
   5738 	}
   5739 
   5740 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5741 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5742 		aprint_error_dev(sc->sc_dev,
   5743 		    "unable to load TX control data DMA map, error = %d\n",
   5744 		    error);
   5745 		goto fail_3;
   5746 	}
   5747 
   5748 	return 0;
   5749 
   5750  fail_3:
   5751 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5752  fail_2:
   5753 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5754 	    WM_TXDESCS_SIZE(txq));
   5755  fail_1:
   5756 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5757  fail_0:
   5758 	return error;
   5759 }
   5760 
   5761 static void
   5762 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5763 {
   5764 
   5765 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5766 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5767 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5768 	    WM_TXDESCS_SIZE(txq));
   5769 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5770 }
   5771 
   5772 static int
   5773 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5774 {
   5775 	int error;
   5776 	size_t rxq_descs_size;
   5777 
   5778 	/*
   5779 	 * Allocate the control data structures, and create and load the
   5780 	 * DMA map for it.
   5781 	 *
   5782 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5783 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5784 	 * both sets within the same 4G segment.
   5785 	 */
   5786 	rxq->rxq_ndesc = WM_NRXDESC;
   5787 	if (sc->sc_type == WM_T_82574)
   5788 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   5789 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5790 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   5791 	else
   5792 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   5793 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   5794 
   5795 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   5796 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5797 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5798 		aprint_error_dev(sc->sc_dev,
   5799 		    "unable to allocate RX control data, error = %d\n",
   5800 		    error);
   5801 		goto fail_0;
   5802 	}
   5803 
   5804 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5805 		    rxq->rxq_desc_rseg, rxq_descs_size,
   5806 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5807 		aprint_error_dev(sc->sc_dev,
   5808 		    "unable to map RX control data, error = %d\n", error);
   5809 		goto fail_1;
   5810 	}
   5811 
   5812 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   5813 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5814 		aprint_error_dev(sc->sc_dev,
   5815 		    "unable to create RX control data DMA map, error = %d\n",
   5816 		    error);
   5817 		goto fail_2;
   5818 	}
   5819 
   5820 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5821 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   5822 		aprint_error_dev(sc->sc_dev,
   5823 		    "unable to load RX control data DMA map, error = %d\n",
   5824 		    error);
   5825 		goto fail_3;
   5826 	}
   5827 
   5828 	return 0;
   5829 
   5830  fail_3:
   5831 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5832  fail_2:
   5833 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5834 	    rxq_descs_size);
   5835  fail_1:
   5836 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5837  fail_0:
   5838 	return error;
   5839 }
   5840 
   5841 static void
   5842 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5843 {
   5844 
   5845 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5846 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5847 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5848 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   5849 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5850 }
   5851 
   5852 
   5853 static int
   5854 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5855 {
   5856 	int i, error;
   5857 
   5858 	/* Create the transmit buffer DMA maps. */
   5859 	WM_TXQUEUELEN(txq) =
   5860 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5861 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5862 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5863 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5864 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5865 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5866 			aprint_error_dev(sc->sc_dev,
   5867 			    "unable to create Tx DMA map %d, error = %d\n",
   5868 			    i, error);
   5869 			goto fail;
   5870 		}
   5871 	}
   5872 
   5873 	return 0;
   5874 
   5875  fail:
   5876 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5877 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5878 			bus_dmamap_destroy(sc->sc_dmat,
   5879 			    txq->txq_soft[i].txs_dmamap);
   5880 	}
   5881 	return error;
   5882 }
   5883 
   5884 static void
   5885 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5886 {
   5887 	int i;
   5888 
   5889 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5890 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5891 			bus_dmamap_destroy(sc->sc_dmat,
   5892 			    txq->txq_soft[i].txs_dmamap);
   5893 	}
   5894 }
   5895 
   5896 static int
   5897 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5898 {
   5899 	int i, error;
   5900 
   5901 	/* Create the receive buffer DMA maps. */
   5902 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5903 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5904 			    MCLBYTES, 0, 0,
   5905 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5906 			aprint_error_dev(sc->sc_dev,
   5907 			    "unable to create Rx DMA map %d error = %d\n",
   5908 			    i, error);
   5909 			goto fail;
   5910 		}
   5911 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5912 	}
   5913 
   5914 	return 0;
   5915 
   5916  fail:
   5917 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5918 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5919 			bus_dmamap_destroy(sc->sc_dmat,
   5920 			    rxq->rxq_soft[i].rxs_dmamap);
   5921 	}
   5922 	return error;
   5923 }
   5924 
   5925 static void
   5926 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5927 {
   5928 	int i;
   5929 
   5930 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5931 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5932 			bus_dmamap_destroy(sc->sc_dmat,
   5933 			    rxq->rxq_soft[i].rxs_dmamap);
   5934 	}
   5935 }
   5936 
   5937 /*
   5938  * wm_alloc_quques:
   5939  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5940  */
   5941 static int
   5942 wm_alloc_txrx_queues(struct wm_softc *sc)
   5943 {
   5944 	int i, error, tx_done, rx_done;
   5945 
   5946 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5947 	    KM_SLEEP);
   5948 	if (sc->sc_queue == NULL) {
   5949 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5950 		error = ENOMEM;
   5951 		goto fail_0;
   5952 	}
   5953 
   5954 	/*
   5955 	 * For transmission
   5956 	 */
   5957 	error = 0;
   5958 	tx_done = 0;
   5959 	for (i = 0; i < sc->sc_nqueues; i++) {
   5960 #ifdef WM_EVENT_COUNTERS
   5961 		int j;
   5962 		const char *xname;
   5963 #endif
   5964 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5965 		txq->txq_sc = sc;
   5966 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5967 
   5968 		error = wm_alloc_tx_descs(sc, txq);
   5969 		if (error)
   5970 			break;
   5971 		error = wm_alloc_tx_buffer(sc, txq);
   5972 		if (error) {
   5973 			wm_free_tx_descs(sc, txq);
   5974 			break;
   5975 		}
   5976 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5977 		if (txq->txq_interq == NULL) {
   5978 			wm_free_tx_descs(sc, txq);
   5979 			wm_free_tx_buffer(sc, txq);
   5980 			error = ENOMEM;
   5981 			break;
   5982 		}
   5983 
   5984 #ifdef WM_EVENT_COUNTERS
   5985 		xname = device_xname(sc->sc_dev);
   5986 
   5987 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5988 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5989 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5990 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5991 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5992 
   5993 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5994 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5995 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5996 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5997 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5998 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5999 
   6000 		for (j = 0; j < WM_NTXSEGS; j++) {
   6001 			snprintf(txq->txq_txseg_evcnt_names[j],
   6002 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6003 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6004 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6005 		}
   6006 
   6007 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6008 
   6009 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6010 #endif /* WM_EVENT_COUNTERS */
   6011 
   6012 		tx_done++;
   6013 	}
   6014 	if (error)
   6015 		goto fail_1;
   6016 
   6017 	/*
   6018 	 * For recieve
   6019 	 */
   6020 	error = 0;
   6021 	rx_done = 0;
   6022 	for (i = 0; i < sc->sc_nqueues; i++) {
   6023 #ifdef WM_EVENT_COUNTERS
   6024 		const char *xname;
   6025 #endif
   6026 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6027 		rxq->rxq_sc = sc;
   6028 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6029 
   6030 		error = wm_alloc_rx_descs(sc, rxq);
   6031 		if (error)
   6032 			break;
   6033 
   6034 		error = wm_alloc_rx_buffer(sc, rxq);
   6035 		if (error) {
   6036 			wm_free_rx_descs(sc, rxq);
   6037 			break;
   6038 		}
   6039 
   6040 #ifdef WM_EVENT_COUNTERS
   6041 		xname = device_xname(sc->sc_dev);
   6042 
   6043 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6044 
   6045 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6046 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6047 #endif /* WM_EVENT_COUNTERS */
   6048 
   6049 		rx_done++;
   6050 	}
   6051 	if (error)
   6052 		goto fail_2;
   6053 
   6054 	return 0;
   6055 
   6056  fail_2:
   6057 	for (i = 0; i < rx_done; i++) {
   6058 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6059 		wm_free_rx_buffer(sc, rxq);
   6060 		wm_free_rx_descs(sc, rxq);
   6061 		if (rxq->rxq_lock)
   6062 			mutex_obj_free(rxq->rxq_lock);
   6063 	}
   6064  fail_1:
   6065 	for (i = 0; i < tx_done; i++) {
   6066 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6067 		pcq_destroy(txq->txq_interq);
   6068 		wm_free_tx_buffer(sc, txq);
   6069 		wm_free_tx_descs(sc, txq);
   6070 		if (txq->txq_lock)
   6071 			mutex_obj_free(txq->txq_lock);
   6072 	}
   6073 
   6074 	kmem_free(sc->sc_queue,
   6075 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6076  fail_0:
   6077 	return error;
   6078 }
   6079 
   6080 /*
   6081  * wm_free_quques:
   6082  *	Free {tx,rx}descs and {tx,rx} buffers
   6083  */
   6084 static void
   6085 wm_free_txrx_queues(struct wm_softc *sc)
   6086 {
   6087 	int i;
   6088 
   6089 	for (i = 0; i < sc->sc_nqueues; i++) {
   6090 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6091 
   6092 #ifdef WM_EVENT_COUNTERS
   6093 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6094 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6095 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6096 #endif /* WM_EVENT_COUNTERS */
   6097 
   6098 		wm_free_rx_buffer(sc, rxq);
   6099 		wm_free_rx_descs(sc, rxq);
   6100 		if (rxq->rxq_lock)
   6101 			mutex_obj_free(rxq->rxq_lock);
   6102 	}
   6103 
   6104 	for (i = 0; i < sc->sc_nqueues; i++) {
   6105 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6106 		struct mbuf *m;
   6107 #ifdef WM_EVENT_COUNTERS
   6108 		int j;
   6109 
   6110 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6111 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6112 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6113 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6114 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6115 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6116 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6117 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6118 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6119 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6120 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6121 
   6122 		for (j = 0; j < WM_NTXSEGS; j++)
   6123 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6124 
   6125 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6126 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6127 #endif /* WM_EVENT_COUNTERS */
   6128 
   6129 		/* drain txq_interq */
   6130 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6131 			m_freem(m);
   6132 		pcq_destroy(txq->txq_interq);
   6133 
   6134 		wm_free_tx_buffer(sc, txq);
   6135 		wm_free_tx_descs(sc, txq);
   6136 		if (txq->txq_lock)
   6137 			mutex_obj_free(txq->txq_lock);
   6138 	}
   6139 
   6140 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6141 }
   6142 
   6143 static void
   6144 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6145 {
   6146 
   6147 	KASSERT(mutex_owned(txq->txq_lock));
   6148 
   6149 	/* Initialize the transmit descriptor ring. */
   6150 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6151 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6152 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6153 	txq->txq_free = WM_NTXDESC(txq);
   6154 	txq->txq_next = 0;
   6155 }
   6156 
   6157 static void
   6158 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6159     struct wm_txqueue *txq)
   6160 {
   6161 
   6162 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6163 		device_xname(sc->sc_dev), __func__));
   6164 	KASSERT(mutex_owned(txq->txq_lock));
   6165 
   6166 	if (sc->sc_type < WM_T_82543) {
   6167 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6168 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6169 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6170 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6171 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6172 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6173 	} else {
   6174 		int qid = wmq->wmq_id;
   6175 
   6176 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6177 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6178 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6179 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6180 
   6181 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6182 			/*
   6183 			 * Don't write TDT before TCTL.EN is set.
   6184 			 * See the document.
   6185 			 */
   6186 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6187 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6188 			    | TXDCTL_WTHRESH(0));
   6189 		else {
   6190 			/* XXX should update with AIM? */
   6191 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6192 			if (sc->sc_type >= WM_T_82540) {
   6193 				/* should be same */
   6194 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6195 			}
   6196 
   6197 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6198 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6199 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6200 		}
   6201 	}
   6202 }
   6203 
   6204 static void
   6205 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6206 {
   6207 	int i;
   6208 
   6209 	KASSERT(mutex_owned(txq->txq_lock));
   6210 
   6211 	/* Initialize the transmit job descriptors. */
   6212 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6213 		txq->txq_soft[i].txs_mbuf = NULL;
   6214 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6215 	txq->txq_snext = 0;
   6216 	txq->txq_sdirty = 0;
   6217 }
   6218 
   6219 static void
   6220 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6221     struct wm_txqueue *txq)
   6222 {
   6223 
   6224 	KASSERT(mutex_owned(txq->txq_lock));
   6225 
   6226 	/*
   6227 	 * Set up some register offsets that are different between
   6228 	 * the i82542 and the i82543 and later chips.
   6229 	 */
   6230 	if (sc->sc_type < WM_T_82543)
   6231 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6232 	else
   6233 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6234 
   6235 	wm_init_tx_descs(sc, txq);
   6236 	wm_init_tx_regs(sc, wmq, txq);
   6237 	wm_init_tx_buffer(sc, txq);
   6238 }
   6239 
   6240 static void
   6241 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6242     struct wm_rxqueue *rxq)
   6243 {
   6244 
   6245 	KASSERT(mutex_owned(rxq->rxq_lock));
   6246 
   6247 	/*
   6248 	 * Initialize the receive descriptor and receive job
   6249 	 * descriptor rings.
   6250 	 */
   6251 	if (sc->sc_type < WM_T_82543) {
   6252 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6253 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6254 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6255 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6256 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6257 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6258 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6259 
   6260 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6261 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6262 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6263 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6264 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6265 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6266 	} else {
   6267 		int qid = wmq->wmq_id;
   6268 
   6269 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6270 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6271 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6272 
   6273 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6274 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6275 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6276 
   6277 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6278 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6279 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6280 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6281 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6282 			    | RXDCTL_WTHRESH(1));
   6283 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6284 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6285 		} else {
   6286 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6287 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6288 			/* XXX should update with AIM? */
   6289 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6290 			/* MUST be same */
   6291 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6292 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6293 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6294 		}
   6295 	}
   6296 }
   6297 
   6298 static int
   6299 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6300 {
   6301 	struct wm_rxsoft *rxs;
   6302 	int error, i;
   6303 
   6304 	KASSERT(mutex_owned(rxq->rxq_lock));
   6305 
   6306 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6307 		rxs = &rxq->rxq_soft[i];
   6308 		if (rxs->rxs_mbuf == NULL) {
   6309 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6310 				log(LOG_ERR, "%s: unable to allocate or map "
   6311 				    "rx buffer %d, error = %d\n",
   6312 				    device_xname(sc->sc_dev), i, error);
   6313 				/*
   6314 				 * XXX Should attempt to run with fewer receive
   6315 				 * XXX buffers instead of just failing.
   6316 				 */
   6317 				wm_rxdrain(rxq);
   6318 				return ENOMEM;
   6319 			}
   6320 		} else {
   6321 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6322 				wm_init_rxdesc(rxq, i);
   6323 			/*
   6324 			 * For 82575 and newer device, the RX descriptors
   6325 			 * must be initialized after the setting of RCTL.EN in
   6326 			 * wm_set_filter()
   6327 			 */
   6328 		}
   6329 	}
   6330 	rxq->rxq_ptr = 0;
   6331 	rxq->rxq_discard = 0;
   6332 	WM_RXCHAIN_RESET(rxq);
   6333 
   6334 	return 0;
   6335 }
   6336 
   6337 static int
   6338 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6339     struct wm_rxqueue *rxq)
   6340 {
   6341 
   6342 	KASSERT(mutex_owned(rxq->rxq_lock));
   6343 
   6344 	/*
   6345 	 * Set up some register offsets that are different between
   6346 	 * the i82542 and the i82543 and later chips.
   6347 	 */
   6348 	if (sc->sc_type < WM_T_82543)
   6349 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6350 	else
   6351 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6352 
   6353 	wm_init_rx_regs(sc, wmq, rxq);
   6354 	return wm_init_rx_buffer(sc, rxq);
   6355 }
   6356 
   6357 /*
   6358  * wm_init_quques:
   6359  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6360  */
   6361 static int
   6362 wm_init_txrx_queues(struct wm_softc *sc)
   6363 {
   6364 	int i, error = 0;
   6365 
   6366 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6367 		device_xname(sc->sc_dev), __func__));
   6368 
   6369 	for (i = 0; i < sc->sc_nqueues; i++) {
   6370 		struct wm_queue *wmq = &sc->sc_queue[i];
   6371 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6372 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6373 
   6374 		/*
   6375 		 * TODO
   6376 		 * Currently, use constant variable instead of AIM.
   6377 		 * Furthermore, the interrupt interval of multiqueue which use
   6378 		 * polling mode is less than default value.
   6379 		 * More tuning and AIM are required.
   6380 		 */
   6381 		if (sc->sc_nqueues > 1)
   6382 			wmq->wmq_itr = 50;
   6383 		else
   6384 			wmq->wmq_itr = sc->sc_itr_init;
   6385 		wmq->wmq_set_itr = true;
   6386 
   6387 		mutex_enter(txq->txq_lock);
   6388 		wm_init_tx_queue(sc, wmq, txq);
   6389 		mutex_exit(txq->txq_lock);
   6390 
   6391 		mutex_enter(rxq->rxq_lock);
   6392 		error = wm_init_rx_queue(sc, wmq, rxq);
   6393 		mutex_exit(rxq->rxq_lock);
   6394 		if (error)
   6395 			break;
   6396 	}
   6397 
   6398 	return error;
   6399 }
   6400 
   6401 /*
   6402  * wm_tx_offload:
   6403  *
   6404  *	Set up TCP/IP checksumming parameters for the
   6405  *	specified packet.
   6406  */
   6407 static int
   6408 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   6409     uint8_t *fieldsp)
   6410 {
   6411 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6412 	struct mbuf *m0 = txs->txs_mbuf;
   6413 	struct livengood_tcpip_ctxdesc *t;
   6414 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6415 	uint32_t ipcse;
   6416 	struct ether_header *eh;
   6417 	int offset, iphl;
   6418 	uint8_t fields;
   6419 
   6420 	/*
   6421 	 * XXX It would be nice if the mbuf pkthdr had offset
   6422 	 * fields for the protocol headers.
   6423 	 */
   6424 
   6425 	eh = mtod(m0, struct ether_header *);
   6426 	switch (htons(eh->ether_type)) {
   6427 	case ETHERTYPE_IP:
   6428 	case ETHERTYPE_IPV6:
   6429 		offset = ETHER_HDR_LEN;
   6430 		break;
   6431 
   6432 	case ETHERTYPE_VLAN:
   6433 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6434 		break;
   6435 
   6436 	default:
   6437 		/*
   6438 		 * Don't support this protocol or encapsulation.
   6439 		 */
   6440 		*fieldsp = 0;
   6441 		*cmdp = 0;
   6442 		return 0;
   6443 	}
   6444 
   6445 	if ((m0->m_pkthdr.csum_flags &
   6446 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   6447 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6448 	} else {
   6449 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6450 	}
   6451 	ipcse = offset + iphl - 1;
   6452 
   6453 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6454 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6455 	seg = 0;
   6456 	fields = 0;
   6457 
   6458 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6459 		int hlen = offset + iphl;
   6460 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6461 
   6462 		if (__predict_false(m0->m_len <
   6463 				    (hlen + sizeof(struct tcphdr)))) {
   6464 			/*
   6465 			 * TCP/IP headers are not in the first mbuf; we need
   6466 			 * to do this the slow and painful way.  Let's just
   6467 			 * hope this doesn't happen very often.
   6468 			 */
   6469 			struct tcphdr th;
   6470 
   6471 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6472 
   6473 			m_copydata(m0, hlen, sizeof(th), &th);
   6474 			if (v4) {
   6475 				struct ip ip;
   6476 
   6477 				m_copydata(m0, offset, sizeof(ip), &ip);
   6478 				ip.ip_len = 0;
   6479 				m_copyback(m0,
   6480 				    offset + offsetof(struct ip, ip_len),
   6481 				    sizeof(ip.ip_len), &ip.ip_len);
   6482 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6483 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6484 			} else {
   6485 				struct ip6_hdr ip6;
   6486 
   6487 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6488 				ip6.ip6_plen = 0;
   6489 				m_copyback(m0,
   6490 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6491 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6492 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6493 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6494 			}
   6495 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6496 			    sizeof(th.th_sum), &th.th_sum);
   6497 
   6498 			hlen += th.th_off << 2;
   6499 		} else {
   6500 			/*
   6501 			 * TCP/IP headers are in the first mbuf; we can do
   6502 			 * this the easy way.
   6503 			 */
   6504 			struct tcphdr *th;
   6505 
   6506 			if (v4) {
   6507 				struct ip *ip =
   6508 				    (void *)(mtod(m0, char *) + offset);
   6509 				th = (void *)(mtod(m0, char *) + hlen);
   6510 
   6511 				ip->ip_len = 0;
   6512 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6513 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6514 			} else {
   6515 				struct ip6_hdr *ip6 =
   6516 				    (void *)(mtod(m0, char *) + offset);
   6517 				th = (void *)(mtod(m0, char *) + hlen);
   6518 
   6519 				ip6->ip6_plen = 0;
   6520 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6521 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6522 			}
   6523 			hlen += th->th_off << 2;
   6524 		}
   6525 
   6526 		if (v4) {
   6527 			WM_Q_EVCNT_INCR(txq, txtso);
   6528 			cmdlen |= WTX_TCPIP_CMD_IP;
   6529 		} else {
   6530 			WM_Q_EVCNT_INCR(txq, txtso6);
   6531 			ipcse = 0;
   6532 		}
   6533 		cmd |= WTX_TCPIP_CMD_TSE;
   6534 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6535 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6536 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6537 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6538 	}
   6539 
   6540 	/*
   6541 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6542 	 * offload feature, if we load the context descriptor, we
   6543 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6544 	 */
   6545 
   6546 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6547 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6548 	    WTX_TCPIP_IPCSE(ipcse);
   6549 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6550 		WM_Q_EVCNT_INCR(txq, txipsum);
   6551 		fields |= WTX_IXSM;
   6552 	}
   6553 
   6554 	offset += iphl;
   6555 
   6556 	if (m0->m_pkthdr.csum_flags &
   6557 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6558 		WM_Q_EVCNT_INCR(txq, txtusum);
   6559 		fields |= WTX_TXSM;
   6560 		tucs = WTX_TCPIP_TUCSS(offset) |
   6561 		    WTX_TCPIP_TUCSO(offset +
   6562 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6563 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6564 	} else if ((m0->m_pkthdr.csum_flags &
   6565 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6566 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6567 		fields |= WTX_TXSM;
   6568 		tucs = WTX_TCPIP_TUCSS(offset) |
   6569 		    WTX_TCPIP_TUCSO(offset +
   6570 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6571 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6572 	} else {
   6573 		/* Just initialize it to a valid TCP context. */
   6574 		tucs = WTX_TCPIP_TUCSS(offset) |
   6575 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6576 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6577 	}
   6578 
   6579 	/* Fill in the context descriptor. */
   6580 	t = (struct livengood_tcpip_ctxdesc *)
   6581 	    &txq->txq_descs[txq->txq_next];
   6582 	t->tcpip_ipcs = htole32(ipcs);
   6583 	t->tcpip_tucs = htole32(tucs);
   6584 	t->tcpip_cmdlen = htole32(cmdlen);
   6585 	t->tcpip_seg = htole32(seg);
   6586 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6587 
   6588 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6589 	txs->txs_ndesc++;
   6590 
   6591 	*cmdp = cmd;
   6592 	*fieldsp = fields;
   6593 
   6594 	return 0;
   6595 }
   6596 
   6597 static inline int
   6598 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6599 {
   6600 	struct wm_softc *sc = ifp->if_softc;
   6601 	u_int cpuid = cpu_index(curcpu());
   6602 
   6603 	/*
   6604 	 * Currently, simple distribute strategy.
   6605 	 * TODO:
   6606 	 * distribute by flowid(RSS has value).
   6607 	 */
   6608         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6609 }
   6610 
   6611 /*
   6612  * wm_start:		[ifnet interface function]
   6613  *
   6614  *	Start packet transmission on the interface.
   6615  */
   6616 static void
   6617 wm_start(struct ifnet *ifp)
   6618 {
   6619 	struct wm_softc *sc = ifp->if_softc;
   6620 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6621 
   6622 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6623 
   6624 	/*
   6625 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6626 	 */
   6627 
   6628 	mutex_enter(txq->txq_lock);
   6629 	if (!txq->txq_stopping)
   6630 		wm_start_locked(ifp);
   6631 	mutex_exit(txq->txq_lock);
   6632 }
   6633 
   6634 static void
   6635 wm_start_locked(struct ifnet *ifp)
   6636 {
   6637 	struct wm_softc *sc = ifp->if_softc;
   6638 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6639 
   6640 	wm_send_common_locked(ifp, txq, false);
   6641 }
   6642 
   6643 static int
   6644 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6645 {
   6646 	int qid;
   6647 	struct wm_softc *sc = ifp->if_softc;
   6648 	struct wm_txqueue *txq;
   6649 
   6650 	qid = wm_select_txqueue(ifp, m);
   6651 	txq = &sc->sc_queue[qid].wmq_txq;
   6652 
   6653 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6654 		m_freem(m);
   6655 		WM_Q_EVCNT_INCR(txq, txdrop);
   6656 		return ENOBUFS;
   6657 	}
   6658 
   6659 	/*
   6660 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6661 	 */
   6662 	ifp->if_obytes += m->m_pkthdr.len;
   6663 	if (m->m_flags & M_MCAST)
   6664 		ifp->if_omcasts++;
   6665 
   6666 	if (mutex_tryenter(txq->txq_lock)) {
   6667 		if (!txq->txq_stopping)
   6668 			wm_transmit_locked(ifp, txq);
   6669 		mutex_exit(txq->txq_lock);
   6670 	}
   6671 
   6672 	return 0;
   6673 }
   6674 
   6675 static void
   6676 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6677 {
   6678 
   6679 	wm_send_common_locked(ifp, txq, true);
   6680 }
   6681 
   6682 static void
   6683 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6684     bool is_transmit)
   6685 {
   6686 	struct wm_softc *sc = ifp->if_softc;
   6687 	struct mbuf *m0;
   6688 	struct m_tag *mtag;
   6689 	struct wm_txsoft *txs;
   6690 	bus_dmamap_t dmamap;
   6691 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6692 	bus_addr_t curaddr;
   6693 	bus_size_t seglen, curlen;
   6694 	uint32_t cksumcmd;
   6695 	uint8_t cksumfields;
   6696 
   6697 	KASSERT(mutex_owned(txq->txq_lock));
   6698 
   6699 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   6700 		return;
   6701 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   6702 		return;
   6703 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6704 		return;
   6705 
   6706 	/* Remember the previous number of free descriptors. */
   6707 	ofree = txq->txq_free;
   6708 
   6709 	/*
   6710 	 * Loop through the send queue, setting up transmit descriptors
   6711 	 * until we drain the queue, or use up all available transmit
   6712 	 * descriptors.
   6713 	 */
   6714 	for (;;) {
   6715 		m0 = NULL;
   6716 
   6717 		/* Get a work queue entry. */
   6718 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6719 			wm_txeof(sc, txq);
   6720 			if (txq->txq_sfree == 0) {
   6721 				DPRINTF(WM_DEBUG_TX,
   6722 				    ("%s: TX: no free job descriptors\n",
   6723 					device_xname(sc->sc_dev)));
   6724 				WM_Q_EVCNT_INCR(txq, txsstall);
   6725 				break;
   6726 			}
   6727 		}
   6728 
   6729 		/* Grab a packet off the queue. */
   6730 		if (is_transmit)
   6731 			m0 = pcq_get(txq->txq_interq);
   6732 		else
   6733 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6734 		if (m0 == NULL)
   6735 			break;
   6736 
   6737 		DPRINTF(WM_DEBUG_TX,
   6738 		    ("%s: TX: have packet to transmit: %p\n",
   6739 		    device_xname(sc->sc_dev), m0));
   6740 
   6741 		txs = &txq->txq_soft[txq->txq_snext];
   6742 		dmamap = txs->txs_dmamap;
   6743 
   6744 		use_tso = (m0->m_pkthdr.csum_flags &
   6745 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6746 
   6747 		/*
   6748 		 * So says the Linux driver:
   6749 		 * The controller does a simple calculation to make sure
   6750 		 * there is enough room in the FIFO before initiating the
   6751 		 * DMA for each buffer.  The calc is:
   6752 		 *	4 = ceil(buffer len / MSS)
   6753 		 * To make sure we don't overrun the FIFO, adjust the max
   6754 		 * buffer len if the MSS drops.
   6755 		 */
   6756 		dmamap->dm_maxsegsz =
   6757 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6758 		    ? m0->m_pkthdr.segsz << 2
   6759 		    : WTX_MAX_LEN;
   6760 
   6761 		/*
   6762 		 * Load the DMA map.  If this fails, the packet either
   6763 		 * didn't fit in the allotted number of segments, or we
   6764 		 * were short on resources.  For the too-many-segments
   6765 		 * case, we simply report an error and drop the packet,
   6766 		 * since we can't sanely copy a jumbo packet to a single
   6767 		 * buffer.
   6768 		 */
   6769 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6770 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6771 		if (error) {
   6772 			if (error == EFBIG) {
   6773 				WM_Q_EVCNT_INCR(txq, txdrop);
   6774 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6775 				    "DMA segments, dropping...\n",
   6776 				    device_xname(sc->sc_dev));
   6777 				wm_dump_mbuf_chain(sc, m0);
   6778 				m_freem(m0);
   6779 				continue;
   6780 			}
   6781 			/*  Short on resources, just stop for now. */
   6782 			DPRINTF(WM_DEBUG_TX,
   6783 			    ("%s: TX: dmamap load failed: %d\n",
   6784 			    device_xname(sc->sc_dev), error));
   6785 			break;
   6786 		}
   6787 
   6788 		segs_needed = dmamap->dm_nsegs;
   6789 		if (use_tso) {
   6790 			/* For sentinel descriptor; see below. */
   6791 			segs_needed++;
   6792 		}
   6793 
   6794 		/*
   6795 		 * Ensure we have enough descriptors free to describe
   6796 		 * the packet.  Note, we always reserve one descriptor
   6797 		 * at the end of the ring due to the semantics of the
   6798 		 * TDT register, plus one more in the event we need
   6799 		 * to load offload context.
   6800 		 */
   6801 		if (segs_needed > txq->txq_free - 2) {
   6802 			/*
   6803 			 * Not enough free descriptors to transmit this
   6804 			 * packet.  We haven't committed anything yet,
   6805 			 * so just unload the DMA map, put the packet
   6806 			 * pack on the queue, and punt.  Notify the upper
   6807 			 * layer that there are no more slots left.
   6808 			 */
   6809 			DPRINTF(WM_DEBUG_TX,
   6810 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6811 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6812 			    segs_needed, txq->txq_free - 1));
   6813 			if (!is_transmit)
   6814 				ifp->if_flags |= IFF_OACTIVE;
   6815 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6816 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6817 			WM_Q_EVCNT_INCR(txq, txdstall);
   6818 			break;
   6819 		}
   6820 
   6821 		/*
   6822 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6823 		 * once we know we can transmit the packet, since we
   6824 		 * do some internal FIFO space accounting here.
   6825 		 */
   6826 		if (sc->sc_type == WM_T_82547 &&
   6827 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6828 			DPRINTF(WM_DEBUG_TX,
   6829 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6830 			    device_xname(sc->sc_dev)));
   6831 			if (!is_transmit)
   6832 				ifp->if_flags |= IFF_OACTIVE;
   6833 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6834 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6835 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6836 			break;
   6837 		}
   6838 
   6839 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6840 
   6841 		DPRINTF(WM_DEBUG_TX,
   6842 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6843 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6844 
   6845 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6846 
   6847 		/*
   6848 		 * Store a pointer to the packet so that we can free it
   6849 		 * later.
   6850 		 *
   6851 		 * Initially, we consider the number of descriptors the
   6852 		 * packet uses the number of DMA segments.  This may be
   6853 		 * incremented by 1 if we do checksum offload (a descriptor
   6854 		 * is used to set the checksum context).
   6855 		 */
   6856 		txs->txs_mbuf = m0;
   6857 		txs->txs_firstdesc = txq->txq_next;
   6858 		txs->txs_ndesc = segs_needed;
   6859 
   6860 		/* Set up offload parameters for this packet. */
   6861 		if (m0->m_pkthdr.csum_flags &
   6862 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6863 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6864 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6865 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6866 					  &cksumfields) != 0) {
   6867 				/* Error message already displayed. */
   6868 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6869 				continue;
   6870 			}
   6871 		} else {
   6872 			cksumcmd = 0;
   6873 			cksumfields = 0;
   6874 		}
   6875 
   6876 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6877 
   6878 		/* Sync the DMA map. */
   6879 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6880 		    BUS_DMASYNC_PREWRITE);
   6881 
   6882 		/* Initialize the transmit descriptor. */
   6883 		for (nexttx = txq->txq_next, seg = 0;
   6884 		     seg < dmamap->dm_nsegs; seg++) {
   6885 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6886 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6887 			     seglen != 0;
   6888 			     curaddr += curlen, seglen -= curlen,
   6889 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6890 				curlen = seglen;
   6891 
   6892 				/*
   6893 				 * So says the Linux driver:
   6894 				 * Work around for premature descriptor
   6895 				 * write-backs in TSO mode.  Append a
   6896 				 * 4-byte sentinel descriptor.
   6897 				 */
   6898 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6899 				    curlen > 8)
   6900 					curlen -= 4;
   6901 
   6902 				wm_set_dma_addr(
   6903 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6904 				txq->txq_descs[nexttx].wtx_cmdlen
   6905 				    = htole32(cksumcmd | curlen);
   6906 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6907 				    = 0;
   6908 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6909 				    = cksumfields;
   6910 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6911 				lasttx = nexttx;
   6912 
   6913 				DPRINTF(WM_DEBUG_TX,
   6914 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6915 				     "len %#04zx\n",
   6916 				    device_xname(sc->sc_dev), nexttx,
   6917 				    (uint64_t)curaddr, curlen));
   6918 			}
   6919 		}
   6920 
   6921 		KASSERT(lasttx != -1);
   6922 
   6923 		/*
   6924 		 * Set up the command byte on the last descriptor of
   6925 		 * the packet.  If we're in the interrupt delay window,
   6926 		 * delay the interrupt.
   6927 		 */
   6928 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6929 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6930 
   6931 		/*
   6932 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6933 		 * up the descriptor to encapsulate the packet for us.
   6934 		 *
   6935 		 * This is only valid on the last descriptor of the packet.
   6936 		 */
   6937 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6938 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6939 			    htole32(WTX_CMD_VLE);
   6940 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6941 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6942 		}
   6943 
   6944 		txs->txs_lastdesc = lasttx;
   6945 
   6946 		DPRINTF(WM_DEBUG_TX,
   6947 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6948 		    device_xname(sc->sc_dev),
   6949 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6950 
   6951 		/* Sync the descriptors we're using. */
   6952 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6953 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6954 
   6955 		/* Give the packet to the chip. */
   6956 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6957 
   6958 		DPRINTF(WM_DEBUG_TX,
   6959 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6960 
   6961 		DPRINTF(WM_DEBUG_TX,
   6962 		    ("%s: TX: finished transmitting packet, job %d\n",
   6963 		    device_xname(sc->sc_dev), txq->txq_snext));
   6964 
   6965 		/* Advance the tx pointer. */
   6966 		txq->txq_free -= txs->txs_ndesc;
   6967 		txq->txq_next = nexttx;
   6968 
   6969 		txq->txq_sfree--;
   6970 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6971 
   6972 		/* Pass the packet to any BPF listeners. */
   6973 		bpf_mtap(ifp, m0);
   6974 	}
   6975 
   6976 	if (m0 != NULL) {
   6977 		if (!is_transmit)
   6978 			ifp->if_flags |= IFF_OACTIVE;
   6979 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   6980 		WM_Q_EVCNT_INCR(txq, txdrop);
   6981 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6982 			__func__));
   6983 		m_freem(m0);
   6984 	}
   6985 
   6986 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6987 		/* No more slots; notify upper layer. */
   6988 		if (!is_transmit)
   6989 			ifp->if_flags |= IFF_OACTIVE;
   6990 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   6991 	}
   6992 
   6993 	if (txq->txq_free != ofree) {
   6994 		/* Set a watchdog timer in case the chip flakes out. */
   6995 		ifp->if_timer = 5;
   6996 	}
   6997 }
   6998 
   6999 /*
   7000  * wm_nq_tx_offload:
   7001  *
   7002  *	Set up TCP/IP checksumming parameters for the
   7003  *	specified packet, for NEWQUEUE devices
   7004  */
   7005 static int
   7006 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7007     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7008 {
   7009 	struct mbuf *m0 = txs->txs_mbuf;
   7010 	struct m_tag *mtag;
   7011 	uint32_t vl_len, mssidx, cmdc;
   7012 	struct ether_header *eh;
   7013 	int offset, iphl;
   7014 
   7015 	/*
   7016 	 * XXX It would be nice if the mbuf pkthdr had offset
   7017 	 * fields for the protocol headers.
   7018 	 */
   7019 	*cmdlenp = 0;
   7020 	*fieldsp = 0;
   7021 
   7022 	eh = mtod(m0, struct ether_header *);
   7023 	switch (htons(eh->ether_type)) {
   7024 	case ETHERTYPE_IP:
   7025 	case ETHERTYPE_IPV6:
   7026 		offset = ETHER_HDR_LEN;
   7027 		break;
   7028 
   7029 	case ETHERTYPE_VLAN:
   7030 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7031 		break;
   7032 
   7033 	default:
   7034 		/* Don't support this protocol or encapsulation. */
   7035 		*do_csum = false;
   7036 		return 0;
   7037 	}
   7038 	*do_csum = true;
   7039 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7040 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7041 
   7042 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7043 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7044 
   7045 	if ((m0->m_pkthdr.csum_flags &
   7046 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7047 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7048 	} else {
   7049 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7050 	}
   7051 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7052 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7053 
   7054 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7055 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7056 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7057 		*cmdlenp |= NQTX_CMD_VLE;
   7058 	}
   7059 
   7060 	mssidx = 0;
   7061 
   7062 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7063 		int hlen = offset + iphl;
   7064 		int tcp_hlen;
   7065 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7066 
   7067 		if (__predict_false(m0->m_len <
   7068 				    (hlen + sizeof(struct tcphdr)))) {
   7069 			/*
   7070 			 * TCP/IP headers are not in the first mbuf; we need
   7071 			 * to do this the slow and painful way.  Let's just
   7072 			 * hope this doesn't happen very often.
   7073 			 */
   7074 			struct tcphdr th;
   7075 
   7076 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7077 
   7078 			m_copydata(m0, hlen, sizeof(th), &th);
   7079 			if (v4) {
   7080 				struct ip ip;
   7081 
   7082 				m_copydata(m0, offset, sizeof(ip), &ip);
   7083 				ip.ip_len = 0;
   7084 				m_copyback(m0,
   7085 				    offset + offsetof(struct ip, ip_len),
   7086 				    sizeof(ip.ip_len), &ip.ip_len);
   7087 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7088 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7089 			} else {
   7090 				struct ip6_hdr ip6;
   7091 
   7092 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7093 				ip6.ip6_plen = 0;
   7094 				m_copyback(m0,
   7095 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7096 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7097 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7098 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7099 			}
   7100 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7101 			    sizeof(th.th_sum), &th.th_sum);
   7102 
   7103 			tcp_hlen = th.th_off << 2;
   7104 		} else {
   7105 			/*
   7106 			 * TCP/IP headers are in the first mbuf; we can do
   7107 			 * this the easy way.
   7108 			 */
   7109 			struct tcphdr *th;
   7110 
   7111 			if (v4) {
   7112 				struct ip *ip =
   7113 				    (void *)(mtod(m0, char *) + offset);
   7114 				th = (void *)(mtod(m0, char *) + hlen);
   7115 
   7116 				ip->ip_len = 0;
   7117 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7118 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7119 			} else {
   7120 				struct ip6_hdr *ip6 =
   7121 				    (void *)(mtod(m0, char *) + offset);
   7122 				th = (void *)(mtod(m0, char *) + hlen);
   7123 
   7124 				ip6->ip6_plen = 0;
   7125 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7126 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7127 			}
   7128 			tcp_hlen = th->th_off << 2;
   7129 		}
   7130 		hlen += tcp_hlen;
   7131 		*cmdlenp |= NQTX_CMD_TSE;
   7132 
   7133 		if (v4) {
   7134 			WM_Q_EVCNT_INCR(txq, txtso);
   7135 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7136 		} else {
   7137 			WM_Q_EVCNT_INCR(txq, txtso6);
   7138 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7139 		}
   7140 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7141 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7142 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7143 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7144 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7145 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7146 	} else {
   7147 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7148 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7149 	}
   7150 
   7151 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7152 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7153 		cmdc |= NQTXC_CMD_IP4;
   7154 	}
   7155 
   7156 	if (m0->m_pkthdr.csum_flags &
   7157 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7158 		WM_Q_EVCNT_INCR(txq, txtusum);
   7159 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7160 			cmdc |= NQTXC_CMD_TCP;
   7161 		} else {
   7162 			cmdc |= NQTXC_CMD_UDP;
   7163 		}
   7164 		cmdc |= NQTXC_CMD_IP4;
   7165 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7166 	}
   7167 	if (m0->m_pkthdr.csum_flags &
   7168 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7169 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7170 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7171 			cmdc |= NQTXC_CMD_TCP;
   7172 		} else {
   7173 			cmdc |= NQTXC_CMD_UDP;
   7174 		}
   7175 		cmdc |= NQTXC_CMD_IP6;
   7176 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7177 	}
   7178 
   7179 	/* Fill in the context descriptor. */
   7180 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7181 	    htole32(vl_len);
   7182 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7183 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7184 	    htole32(cmdc);
   7185 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7186 	    htole32(mssidx);
   7187 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7188 	DPRINTF(WM_DEBUG_TX,
   7189 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7190 	    txq->txq_next, 0, vl_len));
   7191 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7192 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7193 	txs->txs_ndesc++;
   7194 	return 0;
   7195 }
   7196 
   7197 /*
   7198  * wm_nq_start:		[ifnet interface function]
   7199  *
   7200  *	Start packet transmission on the interface for NEWQUEUE devices
   7201  */
   7202 static void
   7203 wm_nq_start(struct ifnet *ifp)
   7204 {
   7205 	struct wm_softc *sc = ifp->if_softc;
   7206 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7207 
   7208 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7209 
   7210 	/*
   7211 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7212 	 */
   7213 
   7214 	mutex_enter(txq->txq_lock);
   7215 	if (!txq->txq_stopping)
   7216 		wm_nq_start_locked(ifp);
   7217 	mutex_exit(txq->txq_lock);
   7218 }
   7219 
   7220 static void
   7221 wm_nq_start_locked(struct ifnet *ifp)
   7222 {
   7223 	struct wm_softc *sc = ifp->if_softc;
   7224 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7225 
   7226 	wm_nq_send_common_locked(ifp, txq, false);
   7227 }
   7228 
   7229 static int
   7230 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7231 {
   7232 	int qid;
   7233 	struct wm_softc *sc = ifp->if_softc;
   7234 	struct wm_txqueue *txq;
   7235 
   7236 	qid = wm_select_txqueue(ifp, m);
   7237 	txq = &sc->sc_queue[qid].wmq_txq;
   7238 
   7239 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7240 		m_freem(m);
   7241 		WM_Q_EVCNT_INCR(txq, txdrop);
   7242 		return ENOBUFS;
   7243 	}
   7244 
   7245 	/*
   7246 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7247 	 */
   7248 	ifp->if_obytes += m->m_pkthdr.len;
   7249 	if (m->m_flags & M_MCAST)
   7250 		ifp->if_omcasts++;
   7251 
   7252 	/*
   7253 	 * The situations which this mutex_tryenter() fails at running time
   7254 	 * are below two patterns.
   7255 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7256 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7257 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7258 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7259 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7260 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7261 	 */
   7262 	if (mutex_tryenter(txq->txq_lock)) {
   7263 		if (!txq->txq_stopping)
   7264 			wm_nq_transmit_locked(ifp, txq);
   7265 		mutex_exit(txq->txq_lock);
   7266 	}
   7267 
   7268 	return 0;
   7269 }
   7270 
   7271 static void
   7272 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7273 {
   7274 
   7275 	wm_nq_send_common_locked(ifp, txq, true);
   7276 }
   7277 
   7278 static void
   7279 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7280     bool is_transmit)
   7281 {
   7282 	struct wm_softc *sc = ifp->if_softc;
   7283 	struct mbuf *m0;
   7284 	struct m_tag *mtag;
   7285 	struct wm_txsoft *txs;
   7286 	bus_dmamap_t dmamap;
   7287 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7288 	bool do_csum, sent;
   7289 
   7290 	KASSERT(mutex_owned(txq->txq_lock));
   7291 
   7292 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7293 		return;
   7294 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7295 		return;
   7296 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7297 		return;
   7298 
   7299 	sent = false;
   7300 
   7301 	/*
   7302 	 * Loop through the send queue, setting up transmit descriptors
   7303 	 * until we drain the queue, or use up all available transmit
   7304 	 * descriptors.
   7305 	 */
   7306 	for (;;) {
   7307 		m0 = NULL;
   7308 
   7309 		/* Get a work queue entry. */
   7310 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7311 			wm_txeof(sc, txq);
   7312 			if (txq->txq_sfree == 0) {
   7313 				DPRINTF(WM_DEBUG_TX,
   7314 				    ("%s: TX: no free job descriptors\n",
   7315 					device_xname(sc->sc_dev)));
   7316 				WM_Q_EVCNT_INCR(txq, txsstall);
   7317 				break;
   7318 			}
   7319 		}
   7320 
   7321 		/* Grab a packet off the queue. */
   7322 		if (is_transmit)
   7323 			m0 = pcq_get(txq->txq_interq);
   7324 		else
   7325 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7326 		if (m0 == NULL)
   7327 			break;
   7328 
   7329 		DPRINTF(WM_DEBUG_TX,
   7330 		    ("%s: TX: have packet to transmit: %p\n",
   7331 		    device_xname(sc->sc_dev), m0));
   7332 
   7333 		txs = &txq->txq_soft[txq->txq_snext];
   7334 		dmamap = txs->txs_dmamap;
   7335 
   7336 		/*
   7337 		 * Load the DMA map.  If this fails, the packet either
   7338 		 * didn't fit in the allotted number of segments, or we
   7339 		 * were short on resources.  For the too-many-segments
   7340 		 * case, we simply report an error and drop the packet,
   7341 		 * since we can't sanely copy a jumbo packet to a single
   7342 		 * buffer.
   7343 		 */
   7344 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7345 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7346 		if (error) {
   7347 			if (error == EFBIG) {
   7348 				WM_Q_EVCNT_INCR(txq, txdrop);
   7349 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7350 				    "DMA segments, dropping...\n",
   7351 				    device_xname(sc->sc_dev));
   7352 				wm_dump_mbuf_chain(sc, m0);
   7353 				m_freem(m0);
   7354 				continue;
   7355 			}
   7356 			/* Short on resources, just stop for now. */
   7357 			DPRINTF(WM_DEBUG_TX,
   7358 			    ("%s: TX: dmamap load failed: %d\n",
   7359 			    device_xname(sc->sc_dev), error));
   7360 			break;
   7361 		}
   7362 
   7363 		segs_needed = dmamap->dm_nsegs;
   7364 
   7365 		/*
   7366 		 * Ensure we have enough descriptors free to describe
   7367 		 * the packet.  Note, we always reserve one descriptor
   7368 		 * at the end of the ring due to the semantics of the
   7369 		 * TDT register, plus one more in the event we need
   7370 		 * to load offload context.
   7371 		 */
   7372 		if (segs_needed > txq->txq_free - 2) {
   7373 			/*
   7374 			 * Not enough free descriptors to transmit this
   7375 			 * packet.  We haven't committed anything yet,
   7376 			 * so just unload the DMA map, put the packet
   7377 			 * pack on the queue, and punt.  Notify the upper
   7378 			 * layer that there are no more slots left.
   7379 			 */
   7380 			DPRINTF(WM_DEBUG_TX,
   7381 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7382 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7383 			    segs_needed, txq->txq_free - 1));
   7384 			if (!is_transmit)
   7385 				ifp->if_flags |= IFF_OACTIVE;
   7386 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7387 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7388 			WM_Q_EVCNT_INCR(txq, txdstall);
   7389 			break;
   7390 		}
   7391 
   7392 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7393 
   7394 		DPRINTF(WM_DEBUG_TX,
   7395 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7396 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7397 
   7398 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7399 
   7400 		/*
   7401 		 * Store a pointer to the packet so that we can free it
   7402 		 * later.
   7403 		 *
   7404 		 * Initially, we consider the number of descriptors the
   7405 		 * packet uses the number of DMA segments.  This may be
   7406 		 * incremented by 1 if we do checksum offload (a descriptor
   7407 		 * is used to set the checksum context).
   7408 		 */
   7409 		txs->txs_mbuf = m0;
   7410 		txs->txs_firstdesc = txq->txq_next;
   7411 		txs->txs_ndesc = segs_needed;
   7412 
   7413 		/* Set up offload parameters for this packet. */
   7414 		uint32_t cmdlen, fields, dcmdlen;
   7415 		if (m0->m_pkthdr.csum_flags &
   7416 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7417 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7418 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7419 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7420 			    &do_csum) != 0) {
   7421 				/* Error message already displayed. */
   7422 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7423 				continue;
   7424 			}
   7425 		} else {
   7426 			do_csum = false;
   7427 			cmdlen = 0;
   7428 			fields = 0;
   7429 		}
   7430 
   7431 		/* Sync the DMA map. */
   7432 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7433 		    BUS_DMASYNC_PREWRITE);
   7434 
   7435 		/* Initialize the first transmit descriptor. */
   7436 		nexttx = txq->txq_next;
   7437 		if (!do_csum) {
   7438 			/* setup a legacy descriptor */
   7439 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7440 			    dmamap->dm_segs[0].ds_addr);
   7441 			txq->txq_descs[nexttx].wtx_cmdlen =
   7442 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7443 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7444 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7445 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7446 			    NULL) {
   7447 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7448 				    htole32(WTX_CMD_VLE);
   7449 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7450 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7451 			} else {
   7452 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7453 			}
   7454 			dcmdlen = 0;
   7455 		} else {
   7456 			/* setup an advanced data descriptor */
   7457 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7458 			    htole64(dmamap->dm_segs[0].ds_addr);
   7459 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7460 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7461 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7462 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7463 			    htole32(fields);
   7464 			DPRINTF(WM_DEBUG_TX,
   7465 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7466 			    device_xname(sc->sc_dev), nexttx,
   7467 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7468 			DPRINTF(WM_DEBUG_TX,
   7469 			    ("\t 0x%08x%08x\n", fields,
   7470 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7471 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7472 		}
   7473 
   7474 		lasttx = nexttx;
   7475 		nexttx = WM_NEXTTX(txq, nexttx);
   7476 		/*
   7477 		 * fill in the next descriptors. legacy or adcanced format
   7478 		 * is the same here
   7479 		 */
   7480 		for (seg = 1; seg < dmamap->dm_nsegs;
   7481 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7482 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7483 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7484 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7485 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7486 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7487 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7488 			lasttx = nexttx;
   7489 
   7490 			DPRINTF(WM_DEBUG_TX,
   7491 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7492 			     "len %#04zx\n",
   7493 			    device_xname(sc->sc_dev), nexttx,
   7494 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7495 			    dmamap->dm_segs[seg].ds_len));
   7496 		}
   7497 
   7498 		KASSERT(lasttx != -1);
   7499 
   7500 		/*
   7501 		 * Set up the command byte on the last descriptor of
   7502 		 * the packet.  If we're in the interrupt delay window,
   7503 		 * delay the interrupt.
   7504 		 */
   7505 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7506 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7507 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7508 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7509 
   7510 		txs->txs_lastdesc = lasttx;
   7511 
   7512 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7513 		    device_xname(sc->sc_dev),
   7514 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7515 
   7516 		/* Sync the descriptors we're using. */
   7517 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7518 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7519 
   7520 		/* Give the packet to the chip. */
   7521 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7522 		sent = true;
   7523 
   7524 		DPRINTF(WM_DEBUG_TX,
   7525 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7526 
   7527 		DPRINTF(WM_DEBUG_TX,
   7528 		    ("%s: TX: finished transmitting packet, job %d\n",
   7529 		    device_xname(sc->sc_dev), txq->txq_snext));
   7530 
   7531 		/* Advance the tx pointer. */
   7532 		txq->txq_free -= txs->txs_ndesc;
   7533 		txq->txq_next = nexttx;
   7534 
   7535 		txq->txq_sfree--;
   7536 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7537 
   7538 		/* Pass the packet to any BPF listeners. */
   7539 		bpf_mtap(ifp, m0);
   7540 	}
   7541 
   7542 	if (m0 != NULL) {
   7543 		if (!is_transmit)
   7544 			ifp->if_flags |= IFF_OACTIVE;
   7545 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7546 		WM_Q_EVCNT_INCR(txq, txdrop);
   7547 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7548 			__func__));
   7549 		m_freem(m0);
   7550 	}
   7551 
   7552 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7553 		/* No more slots; notify upper layer. */
   7554 		if (!is_transmit)
   7555 			ifp->if_flags |= IFF_OACTIVE;
   7556 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7557 	}
   7558 
   7559 	if (sent) {
   7560 		/* Set a watchdog timer in case the chip flakes out. */
   7561 		ifp->if_timer = 5;
   7562 	}
   7563 }
   7564 
   7565 static void
   7566 wm_deferred_start_locked(struct wm_txqueue *txq)
   7567 {
   7568 	struct wm_softc *sc = txq->txq_sc;
   7569 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7570 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7571 	int qid = wmq->wmq_id;
   7572 
   7573 	KASSERT(mutex_owned(txq->txq_lock));
   7574 
   7575 	if (txq->txq_stopping) {
   7576 		mutex_exit(txq->txq_lock);
   7577 		return;
   7578 	}
   7579 
   7580 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7581 		/* XXX need for ALTQ */
   7582 		if (qid == 0)
   7583 			wm_nq_start_locked(ifp);
   7584 		wm_nq_transmit_locked(ifp, txq);
   7585 	} else {
   7586 		/* XXX need for ALTQ */
   7587 		if (qid == 0)
   7588 			wm_start_locked(ifp);
   7589 		wm_transmit_locked(ifp, txq);
   7590 	}
   7591 }
   7592 
   7593 /* Interrupt */
   7594 
   7595 /*
   7596  * wm_txeof:
   7597  *
   7598  *	Helper; handle transmit interrupts.
   7599  */
   7600 static int
   7601 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7602 {
   7603 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7604 	struct wm_txsoft *txs;
   7605 	bool processed = false;
   7606 	int count = 0;
   7607 	int i;
   7608 	uint8_t status;
   7609 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7610 
   7611 	KASSERT(mutex_owned(txq->txq_lock));
   7612 
   7613 	if (txq->txq_stopping)
   7614 		return 0;
   7615 
   7616 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7617 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7618 	if (wmq->wmq_id == 0)
   7619 		ifp->if_flags &= ~IFF_OACTIVE;
   7620 
   7621 	/*
   7622 	 * Go through the Tx list and free mbufs for those
   7623 	 * frames which have been transmitted.
   7624 	 */
   7625 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7626 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7627 		txs = &txq->txq_soft[i];
   7628 
   7629 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7630 			device_xname(sc->sc_dev), i));
   7631 
   7632 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7633 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7634 
   7635 		status =
   7636 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7637 		if ((status & WTX_ST_DD) == 0) {
   7638 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7639 			    BUS_DMASYNC_PREREAD);
   7640 			break;
   7641 		}
   7642 
   7643 		processed = true;
   7644 		count++;
   7645 		DPRINTF(WM_DEBUG_TX,
   7646 		    ("%s: TX: job %d done: descs %d..%d\n",
   7647 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7648 		    txs->txs_lastdesc));
   7649 
   7650 		/*
   7651 		 * XXX We should probably be using the statistics
   7652 		 * XXX registers, but I don't know if they exist
   7653 		 * XXX on chips before the i82544.
   7654 		 */
   7655 
   7656 #ifdef WM_EVENT_COUNTERS
   7657 		if (status & WTX_ST_TU)
   7658 			WM_Q_EVCNT_INCR(txq, tu);
   7659 #endif /* WM_EVENT_COUNTERS */
   7660 
   7661 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7662 			ifp->if_oerrors++;
   7663 			if (status & WTX_ST_LC)
   7664 				log(LOG_WARNING, "%s: late collision\n",
   7665 				    device_xname(sc->sc_dev));
   7666 			else if (status & WTX_ST_EC) {
   7667 				ifp->if_collisions += 16;
   7668 				log(LOG_WARNING, "%s: excessive collisions\n",
   7669 				    device_xname(sc->sc_dev));
   7670 			}
   7671 		} else
   7672 			ifp->if_opackets++;
   7673 
   7674 		txq->txq_packets++;
   7675 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   7676 
   7677 		txq->txq_free += txs->txs_ndesc;
   7678 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7679 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7680 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7681 		m_freem(txs->txs_mbuf);
   7682 		txs->txs_mbuf = NULL;
   7683 	}
   7684 
   7685 	/* Update the dirty transmit buffer pointer. */
   7686 	txq->txq_sdirty = i;
   7687 	DPRINTF(WM_DEBUG_TX,
   7688 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7689 
   7690 	if (count != 0)
   7691 		rnd_add_uint32(&sc->rnd_source, count);
   7692 
   7693 	/*
   7694 	 * If there are no more pending transmissions, cancel the watchdog
   7695 	 * timer.
   7696 	 */
   7697 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7698 		ifp->if_timer = 0;
   7699 
   7700 	return processed;
   7701 }
   7702 
   7703 static inline uint32_t
   7704 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   7705 {
   7706 	struct wm_softc *sc = rxq->rxq_sc;
   7707 
   7708 	if (sc->sc_type == WM_T_82574)
   7709 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7710 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7711 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7712 	else
   7713 		return rxq->rxq_descs[idx].wrx_status;
   7714 }
   7715 
   7716 static inline uint32_t
   7717 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   7718 {
   7719 	struct wm_softc *sc = rxq->rxq_sc;
   7720 
   7721 	if (sc->sc_type == WM_T_82574)
   7722 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7723 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7724 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7725 	else
   7726 		return rxq->rxq_descs[idx].wrx_errors;
   7727 }
   7728 
   7729 static inline uint16_t
   7730 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   7731 {
   7732 	struct wm_softc *sc = rxq->rxq_sc;
   7733 
   7734 	if (sc->sc_type == WM_T_82574)
   7735 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   7736 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7737 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   7738 	else
   7739 		return rxq->rxq_descs[idx].wrx_special;
   7740 }
   7741 
   7742 static inline int
   7743 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   7744 {
   7745 	struct wm_softc *sc = rxq->rxq_sc;
   7746 
   7747 	if (sc->sc_type == WM_T_82574)
   7748 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   7749 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7750 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   7751 	else
   7752 		return rxq->rxq_descs[idx].wrx_len;
   7753 }
   7754 
   7755 #ifdef WM_DEBUG
   7756 static inline uint32_t
   7757 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   7758 {
   7759 	struct wm_softc *sc = rxq->rxq_sc;
   7760 
   7761 	if (sc->sc_type == WM_T_82574)
   7762 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   7763 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7764 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   7765 	else
   7766 		return 0;
   7767 }
   7768 
   7769 static inline uint8_t
   7770 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   7771 {
   7772 	struct wm_softc *sc = rxq->rxq_sc;
   7773 
   7774 	if (sc->sc_type == WM_T_82574)
   7775 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   7776 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7777 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   7778 	else
   7779 		return 0;
   7780 }
   7781 #endif /* WM_DEBUG */
   7782 
   7783 static inline bool
   7784 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   7785     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7786 {
   7787 
   7788 	if (sc->sc_type == WM_T_82574)
   7789 		return (status & ext_bit) != 0;
   7790 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7791 		return (status & nq_bit) != 0;
   7792 	else
   7793 		return (status & legacy_bit) != 0;
   7794 }
   7795 
   7796 static inline bool
   7797 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   7798     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7799 {
   7800 
   7801 	if (sc->sc_type == WM_T_82574)
   7802 		return (error & ext_bit) != 0;
   7803 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7804 		return (error & nq_bit) != 0;
   7805 	else
   7806 		return (error & legacy_bit) != 0;
   7807 }
   7808 
   7809 static inline bool
   7810 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   7811 {
   7812 
   7813 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7814 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   7815 		return true;
   7816 	else
   7817 		return false;
   7818 }
   7819 
   7820 static inline bool
   7821 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   7822 {
   7823 	struct wm_softc *sc = rxq->rxq_sc;
   7824 
   7825 	/* XXXX missing error bit for newqueue? */
   7826 	if (wm_rxdesc_is_set_error(sc, errors,
   7827 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   7828 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   7829 		NQRXC_ERROR_RXE)) {
   7830 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   7831 			log(LOG_WARNING, "%s: symbol error\n",
   7832 			    device_xname(sc->sc_dev));
   7833 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   7834 			log(LOG_WARNING, "%s: receive sequence error\n",
   7835 			    device_xname(sc->sc_dev));
   7836 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   7837 			log(LOG_WARNING, "%s: CRC error\n",
   7838 			    device_xname(sc->sc_dev));
   7839 		return true;
   7840 	}
   7841 
   7842 	return false;
   7843 }
   7844 
   7845 static inline bool
   7846 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   7847 {
   7848 	struct wm_softc *sc = rxq->rxq_sc;
   7849 
   7850 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   7851 		NQRXC_STATUS_DD)) {
   7852 		/* We have processed all of the receive descriptors. */
   7853 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   7854 		return false;
   7855 	}
   7856 
   7857 	return true;
   7858 }
   7859 
   7860 static inline bool
   7861 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   7862     struct mbuf *m)
   7863 {
   7864 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   7865 
   7866 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7867 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   7868 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   7869 	}
   7870 
   7871 	return true;
   7872 }
   7873 
   7874 static inline void
   7875 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   7876     uint32_t errors, struct mbuf *m)
   7877 {
   7878 	struct wm_softc *sc = rxq->rxq_sc;
   7879 
   7880 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   7881 		if (wm_rxdesc_is_set_status(sc, status,
   7882 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   7883 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   7884 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7885 			if (wm_rxdesc_is_set_error(sc, errors,
   7886 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   7887 				m->m_pkthdr.csum_flags |=
   7888 					M_CSUM_IPv4_BAD;
   7889 		}
   7890 		if (wm_rxdesc_is_set_status(sc, status,
   7891 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   7892 			/*
   7893 			 * Note: we don't know if this was TCP or UDP,
   7894 			 * so we just set both bits, and expect the
   7895 			 * upper layers to deal.
   7896 			 */
   7897 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   7898 			m->m_pkthdr.csum_flags |=
   7899 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7900 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7901 			if (wm_rxdesc_is_set_error(sc, errors,
   7902 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   7903 				m->m_pkthdr.csum_flags |=
   7904 					M_CSUM_TCP_UDP_BAD;
   7905 		}
   7906 	}
   7907 }
   7908 
   7909 /*
   7910  * wm_rxeof:
   7911  *
   7912  *	Helper; handle receive interrupts.
   7913  */
   7914 static void
   7915 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   7916 {
   7917 	struct wm_softc *sc = rxq->rxq_sc;
   7918 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7919 	struct wm_rxsoft *rxs;
   7920 	struct mbuf *m;
   7921 	int i, len;
   7922 	int count = 0;
   7923 	uint32_t status, errors;
   7924 	uint16_t vlantag;
   7925 
   7926 	KASSERT(mutex_owned(rxq->rxq_lock));
   7927 
   7928 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7929 		if (limit-- == 0) {
   7930 			rxq->rxq_ptr = i;
   7931 			break;
   7932 		}
   7933 
   7934 		rxs = &rxq->rxq_soft[i];
   7935 
   7936 		DPRINTF(WM_DEBUG_RX,
   7937 		    ("%s: RX: checking descriptor %d\n",
   7938 		    device_xname(sc->sc_dev), i));
   7939 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7940 
   7941 		status = wm_rxdesc_get_status(rxq, i);
   7942 		errors = wm_rxdesc_get_errors(rxq, i);
   7943 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   7944 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   7945 #ifdef WM_DEBUG
   7946 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   7947 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   7948 #endif
   7949 
   7950 		if (!wm_rxdesc_dd(rxq, i, status)) {
   7951 			/*
   7952 			 * Update the receive pointer holding rxq_lock
   7953 			 * consistent with increment counter.
   7954 			 */
   7955 			rxq->rxq_ptr = i;
   7956 			break;
   7957 		}
   7958 
   7959 		count++;
   7960 		if (__predict_false(rxq->rxq_discard)) {
   7961 			DPRINTF(WM_DEBUG_RX,
   7962 			    ("%s: RX: discarding contents of descriptor %d\n",
   7963 			    device_xname(sc->sc_dev), i));
   7964 			wm_init_rxdesc(rxq, i);
   7965 			if (wm_rxdesc_is_eop(rxq, status)) {
   7966 				/* Reset our state. */
   7967 				DPRINTF(WM_DEBUG_RX,
   7968 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7969 				    device_xname(sc->sc_dev)));
   7970 				rxq->rxq_discard = 0;
   7971 			}
   7972 			continue;
   7973 		}
   7974 
   7975 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7976 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7977 
   7978 		m = rxs->rxs_mbuf;
   7979 
   7980 		/*
   7981 		 * Add a new receive buffer to the ring, unless of
   7982 		 * course the length is zero. Treat the latter as a
   7983 		 * failed mapping.
   7984 		 */
   7985 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7986 			/*
   7987 			 * Failed, throw away what we've done so
   7988 			 * far, and discard the rest of the packet.
   7989 			 */
   7990 			ifp->if_ierrors++;
   7991 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7992 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7993 			wm_init_rxdesc(rxq, i);
   7994 			if (!wm_rxdesc_is_eop(rxq, status))
   7995 				rxq->rxq_discard = 1;
   7996 			if (rxq->rxq_head != NULL)
   7997 				m_freem(rxq->rxq_head);
   7998 			WM_RXCHAIN_RESET(rxq);
   7999 			DPRINTF(WM_DEBUG_RX,
   8000 			    ("%s: RX: Rx buffer allocation failed, "
   8001 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8002 			    rxq->rxq_discard ? " (discard)" : ""));
   8003 			continue;
   8004 		}
   8005 
   8006 		m->m_len = len;
   8007 		rxq->rxq_len += len;
   8008 		DPRINTF(WM_DEBUG_RX,
   8009 		    ("%s: RX: buffer at %p len %d\n",
   8010 		    device_xname(sc->sc_dev), m->m_data, len));
   8011 
   8012 		/* If this is not the end of the packet, keep looking. */
   8013 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8014 			WM_RXCHAIN_LINK(rxq, m);
   8015 			DPRINTF(WM_DEBUG_RX,
   8016 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8017 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8018 			continue;
   8019 		}
   8020 
   8021 		/*
   8022 		 * Okay, we have the entire packet now.  The chip is
   8023 		 * configured to include the FCS except I350 and I21[01]
   8024 		 * (not all chips can be configured to strip it),
   8025 		 * so we need to trim it.
   8026 		 * May need to adjust length of previous mbuf in the
   8027 		 * chain if the current mbuf is too short.
   8028 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8029 		 * is always set in I350, so we don't trim it.
   8030 		 */
   8031 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8032 		    && (sc->sc_type != WM_T_I210)
   8033 		    && (sc->sc_type != WM_T_I211)) {
   8034 			if (m->m_len < ETHER_CRC_LEN) {
   8035 				rxq->rxq_tail->m_len
   8036 				    -= (ETHER_CRC_LEN - m->m_len);
   8037 				m->m_len = 0;
   8038 			} else
   8039 				m->m_len -= ETHER_CRC_LEN;
   8040 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8041 		} else
   8042 			len = rxq->rxq_len;
   8043 
   8044 		WM_RXCHAIN_LINK(rxq, m);
   8045 
   8046 		*rxq->rxq_tailp = NULL;
   8047 		m = rxq->rxq_head;
   8048 
   8049 		WM_RXCHAIN_RESET(rxq);
   8050 
   8051 		DPRINTF(WM_DEBUG_RX,
   8052 		    ("%s: RX: have entire packet, len -> %d\n",
   8053 		    device_xname(sc->sc_dev), len));
   8054 
   8055 		/* If an error occurred, update stats and drop the packet. */
   8056 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8057 			m_freem(m);
   8058 			continue;
   8059 		}
   8060 
   8061 		/* No errors.  Receive the packet. */
   8062 		m_set_rcvif(m, ifp);
   8063 		m->m_pkthdr.len = len;
   8064 		/*
   8065 		 * TODO
   8066 		 * should be save rsshash and rsstype to this mbuf.
   8067 		 */
   8068 		DPRINTF(WM_DEBUG_RX,
   8069 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8070 			device_xname(sc->sc_dev), rsstype, rsshash));
   8071 
   8072 		/*
   8073 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8074 		 * for us.  Associate the tag with the packet.
   8075 		 */
   8076 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8077 			continue;
   8078 
   8079 		/* Set up checksum info for this packet. */
   8080 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8081 		/*
   8082 		 * Update the receive pointer holding rxq_lock consistent with
   8083 		 * increment counter.
   8084 		 */
   8085 		rxq->rxq_ptr = i;
   8086 		rxq->rxq_packets++;
   8087 		rxq->rxq_bytes += len;
   8088 		mutex_exit(rxq->rxq_lock);
   8089 
   8090 		/* Pass it on. */
   8091 		if_percpuq_enqueue(sc->sc_ipq, m);
   8092 
   8093 		mutex_enter(rxq->rxq_lock);
   8094 
   8095 		if (rxq->rxq_stopping)
   8096 			break;
   8097 	}
   8098 
   8099 	if (count != 0)
   8100 		rnd_add_uint32(&sc->rnd_source, count);
   8101 
   8102 	DPRINTF(WM_DEBUG_RX,
   8103 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8104 }
   8105 
   8106 /*
   8107  * wm_linkintr_gmii:
   8108  *
   8109  *	Helper; handle link interrupts for GMII.
   8110  */
   8111 static void
   8112 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8113 {
   8114 
   8115 	KASSERT(WM_CORE_LOCKED(sc));
   8116 
   8117 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8118 		__func__));
   8119 
   8120 	if (icr & ICR_LSC) {
   8121 		uint32_t reg;
   8122 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8123 
   8124 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8125 			wm_gig_downshift_workaround_ich8lan(sc);
   8126 
   8127 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8128 			device_xname(sc->sc_dev)));
   8129 		mii_pollstat(&sc->sc_mii);
   8130 		if (sc->sc_type == WM_T_82543) {
   8131 			int miistatus, active;
   8132 
   8133 			/*
   8134 			 * With 82543, we need to force speed and
   8135 			 * duplex on the MAC equal to what the PHY
   8136 			 * speed and duplex configuration is.
   8137 			 */
   8138 			miistatus = sc->sc_mii.mii_media_status;
   8139 
   8140 			if (miistatus & IFM_ACTIVE) {
   8141 				active = sc->sc_mii.mii_media_active;
   8142 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8143 				switch (IFM_SUBTYPE(active)) {
   8144 				case IFM_10_T:
   8145 					sc->sc_ctrl |= CTRL_SPEED_10;
   8146 					break;
   8147 				case IFM_100_TX:
   8148 					sc->sc_ctrl |= CTRL_SPEED_100;
   8149 					break;
   8150 				case IFM_1000_T:
   8151 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8152 					break;
   8153 				default:
   8154 					/*
   8155 					 * fiber?
   8156 					 * Shoud not enter here.
   8157 					 */
   8158 					printf("unknown media (%x)\n", active);
   8159 					break;
   8160 				}
   8161 				if (active & IFM_FDX)
   8162 					sc->sc_ctrl |= CTRL_FD;
   8163 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8164 			}
   8165 		} else if ((sc->sc_type == WM_T_ICH8)
   8166 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8167 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8168 		} else if (sc->sc_type == WM_T_PCH) {
   8169 			wm_k1_gig_workaround_hv(sc,
   8170 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8171 		}
   8172 
   8173 		if ((sc->sc_phytype == WMPHY_82578)
   8174 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8175 			== IFM_1000_T)) {
   8176 
   8177 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8178 				delay(200*1000); /* XXX too big */
   8179 
   8180 				/* Link stall fix for link up */
   8181 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8182 				    HV_MUX_DATA_CTRL,
   8183 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8184 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8185 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8186 				    HV_MUX_DATA_CTRL,
   8187 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8188 			}
   8189 		}
   8190 		/*
   8191 		 * I217 Packet Loss issue:
   8192 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8193 		 * on power up.
   8194 		 * Set the Beacon Duration for I217 to 8 usec
   8195 		 */
   8196 		if ((sc->sc_type == WM_T_PCH_LPT)
   8197 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8198 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8199 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8200 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8201 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8202 		}
   8203 
   8204 		/* XXX Work-around I218 hang issue */
   8205 		/* e1000_k1_workaround_lpt_lp() */
   8206 
   8207 		if ((sc->sc_type == WM_T_PCH_LPT)
   8208 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8209 			/*
   8210 			 * Set platform power management values for Latency
   8211 			 * Tolerance Reporting (LTR)
   8212 			 */
   8213 			wm_platform_pm_pch_lpt(sc,
   8214 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8215 				    != 0));
   8216 		}
   8217 
   8218 		/* FEXTNVM6 K1-off workaround */
   8219 		if (sc->sc_type == WM_T_PCH_SPT) {
   8220 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8221 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8222 			    & FEXTNVM6_K1_OFF_ENABLE)
   8223 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8224 			else
   8225 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8226 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8227 		}
   8228 	} else if (icr & ICR_RXSEQ) {
   8229 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8230 			device_xname(sc->sc_dev)));
   8231 	}
   8232 }
   8233 
   8234 /*
   8235  * wm_linkintr_tbi:
   8236  *
   8237  *	Helper; handle link interrupts for TBI mode.
   8238  */
   8239 static void
   8240 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8241 {
   8242 	uint32_t status;
   8243 
   8244 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8245 		__func__));
   8246 
   8247 	status = CSR_READ(sc, WMREG_STATUS);
   8248 	if (icr & ICR_LSC) {
   8249 		if (status & STATUS_LU) {
   8250 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8251 			    device_xname(sc->sc_dev),
   8252 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8253 			/*
   8254 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8255 			 * so we should update sc->sc_ctrl
   8256 			 */
   8257 
   8258 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8259 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8260 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8261 			if (status & STATUS_FD)
   8262 				sc->sc_tctl |=
   8263 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8264 			else
   8265 				sc->sc_tctl |=
   8266 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8267 			if (sc->sc_ctrl & CTRL_TFCE)
   8268 				sc->sc_fcrtl |= FCRTL_XONE;
   8269 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8270 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8271 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8272 				      sc->sc_fcrtl);
   8273 			sc->sc_tbi_linkup = 1;
   8274 		} else {
   8275 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8276 			    device_xname(sc->sc_dev)));
   8277 			sc->sc_tbi_linkup = 0;
   8278 		}
   8279 		/* Update LED */
   8280 		wm_tbi_serdes_set_linkled(sc);
   8281 	} else if (icr & ICR_RXSEQ) {
   8282 		DPRINTF(WM_DEBUG_LINK,
   8283 		    ("%s: LINK: Receive sequence error\n",
   8284 		    device_xname(sc->sc_dev)));
   8285 	}
   8286 }
   8287 
   8288 /*
   8289  * wm_linkintr_serdes:
   8290  *
   8291  *	Helper; handle link interrupts for TBI mode.
   8292  */
   8293 static void
   8294 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8295 {
   8296 	struct mii_data *mii = &sc->sc_mii;
   8297 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8298 	uint32_t pcs_adv, pcs_lpab, reg;
   8299 
   8300 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8301 		__func__));
   8302 
   8303 	if (icr & ICR_LSC) {
   8304 		/* Check PCS */
   8305 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8306 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8307 			mii->mii_media_status |= IFM_ACTIVE;
   8308 			sc->sc_tbi_linkup = 1;
   8309 		} else {
   8310 			mii->mii_media_status |= IFM_NONE;
   8311 			sc->sc_tbi_linkup = 0;
   8312 			wm_tbi_serdes_set_linkled(sc);
   8313 			return;
   8314 		}
   8315 		mii->mii_media_active |= IFM_1000_SX;
   8316 		if ((reg & PCS_LSTS_FDX) != 0)
   8317 			mii->mii_media_active |= IFM_FDX;
   8318 		else
   8319 			mii->mii_media_active |= IFM_HDX;
   8320 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8321 			/* Check flow */
   8322 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8323 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8324 				DPRINTF(WM_DEBUG_LINK,
   8325 				    ("XXX LINKOK but not ACOMP\n"));
   8326 				return;
   8327 			}
   8328 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8329 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8330 			DPRINTF(WM_DEBUG_LINK,
   8331 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8332 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8333 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8334 				mii->mii_media_active |= IFM_FLOW
   8335 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8336 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8337 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8338 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8339 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8340 				mii->mii_media_active |= IFM_FLOW
   8341 				    | IFM_ETH_TXPAUSE;
   8342 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8343 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8344 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8345 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8346 				mii->mii_media_active |= IFM_FLOW
   8347 				    | IFM_ETH_RXPAUSE;
   8348 		}
   8349 		/* Update LED */
   8350 		wm_tbi_serdes_set_linkled(sc);
   8351 	} else {
   8352 		DPRINTF(WM_DEBUG_LINK,
   8353 		    ("%s: LINK: Receive sequence error\n",
   8354 		    device_xname(sc->sc_dev)));
   8355 	}
   8356 }
   8357 
   8358 /*
   8359  * wm_linkintr:
   8360  *
   8361  *	Helper; handle link interrupts.
   8362  */
   8363 static void
   8364 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8365 {
   8366 
   8367 	KASSERT(WM_CORE_LOCKED(sc));
   8368 
   8369 	if (sc->sc_flags & WM_F_HAS_MII)
   8370 		wm_linkintr_gmii(sc, icr);
   8371 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8372 	    && (sc->sc_type >= WM_T_82575))
   8373 		wm_linkintr_serdes(sc, icr);
   8374 	else
   8375 		wm_linkintr_tbi(sc, icr);
   8376 }
   8377 
   8378 /*
   8379  * wm_intr_legacy:
   8380  *
   8381  *	Interrupt service routine for INTx and MSI.
   8382  */
   8383 static int
   8384 wm_intr_legacy(void *arg)
   8385 {
   8386 	struct wm_softc *sc = arg;
   8387 	struct wm_queue *wmq = &sc->sc_queue[0];
   8388 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8389 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8390 	uint32_t icr, rndval = 0;
   8391 	int handled = 0;
   8392 
   8393 	DPRINTF(WM_DEBUG_TX,
   8394 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   8395 	while (1 /* CONSTCOND */) {
   8396 		icr = CSR_READ(sc, WMREG_ICR);
   8397 		if ((icr & sc->sc_icr) == 0)
   8398 			break;
   8399 		if (rndval == 0)
   8400 			rndval = icr;
   8401 
   8402 		mutex_enter(rxq->rxq_lock);
   8403 
   8404 		if (rxq->rxq_stopping) {
   8405 			mutex_exit(rxq->rxq_lock);
   8406 			break;
   8407 		}
   8408 
   8409 		handled = 1;
   8410 
   8411 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8412 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8413 			DPRINTF(WM_DEBUG_RX,
   8414 			    ("%s: RX: got Rx intr 0x%08x\n",
   8415 			    device_xname(sc->sc_dev),
   8416 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8417 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8418 		}
   8419 #endif
   8420 		wm_rxeof(rxq, UINT_MAX);
   8421 
   8422 		mutex_exit(rxq->rxq_lock);
   8423 		mutex_enter(txq->txq_lock);
   8424 
   8425 		if (txq->txq_stopping) {
   8426 			mutex_exit(txq->txq_lock);
   8427 			break;
   8428 		}
   8429 
   8430 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8431 		if (icr & ICR_TXDW) {
   8432 			DPRINTF(WM_DEBUG_TX,
   8433 			    ("%s: TX: got TXDW interrupt\n",
   8434 			    device_xname(sc->sc_dev)));
   8435 			WM_Q_EVCNT_INCR(txq, txdw);
   8436 		}
   8437 #endif
   8438 		wm_txeof(sc, txq);
   8439 
   8440 		mutex_exit(txq->txq_lock);
   8441 		WM_CORE_LOCK(sc);
   8442 
   8443 		if (sc->sc_core_stopping) {
   8444 			WM_CORE_UNLOCK(sc);
   8445 			break;
   8446 		}
   8447 
   8448 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8449 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8450 			wm_linkintr(sc, icr);
   8451 		}
   8452 
   8453 		WM_CORE_UNLOCK(sc);
   8454 
   8455 		if (icr & ICR_RXO) {
   8456 #if defined(WM_DEBUG)
   8457 			log(LOG_WARNING, "%s: Receive overrun\n",
   8458 			    device_xname(sc->sc_dev));
   8459 #endif /* defined(WM_DEBUG) */
   8460 		}
   8461 	}
   8462 
   8463 	rnd_add_uint32(&sc->rnd_source, rndval);
   8464 
   8465 	if (handled) {
   8466 		/* Try to get more packets going. */
   8467 		softint_schedule(wmq->wmq_si);
   8468 	}
   8469 
   8470 	return handled;
   8471 }
   8472 
   8473 static inline void
   8474 wm_txrxintr_disable(struct wm_queue *wmq)
   8475 {
   8476 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8477 
   8478 	if (sc->sc_type == WM_T_82574)
   8479 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8480 	else if (sc->sc_type == WM_T_82575)
   8481 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8482 	else
   8483 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8484 }
   8485 
   8486 static inline void
   8487 wm_txrxintr_enable(struct wm_queue *wmq)
   8488 {
   8489 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8490 
   8491 	wm_itrs_calculate(sc, wmq);
   8492 
   8493 	if (sc->sc_type == WM_T_82574)
   8494 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8495 	else if (sc->sc_type == WM_T_82575)
   8496 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8497 	else
   8498 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8499 }
   8500 
   8501 static int
   8502 wm_txrxintr_msix(void *arg)
   8503 {
   8504 	struct wm_queue *wmq = arg;
   8505 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8506 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8507 	struct wm_softc *sc = txq->txq_sc;
   8508 	u_int limit = sc->sc_rx_intr_process_limit;
   8509 
   8510 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8511 
   8512 	DPRINTF(WM_DEBUG_TX,
   8513 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8514 
   8515 	wm_txrxintr_disable(wmq);
   8516 
   8517 	mutex_enter(txq->txq_lock);
   8518 
   8519 	if (txq->txq_stopping) {
   8520 		mutex_exit(txq->txq_lock);
   8521 		return 0;
   8522 	}
   8523 
   8524 	WM_Q_EVCNT_INCR(txq, txdw);
   8525 	wm_txeof(sc, txq);
   8526 	/* wm_deferred start() is done in wm_handle_queue(). */
   8527 	mutex_exit(txq->txq_lock);
   8528 
   8529 	DPRINTF(WM_DEBUG_RX,
   8530 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8531 	mutex_enter(rxq->rxq_lock);
   8532 
   8533 	if (rxq->rxq_stopping) {
   8534 		mutex_exit(rxq->rxq_lock);
   8535 		return 0;
   8536 	}
   8537 
   8538 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8539 	wm_rxeof(rxq, limit);
   8540 	mutex_exit(rxq->rxq_lock);
   8541 
   8542 	wm_itrs_writereg(sc, wmq);
   8543 
   8544 	softint_schedule(wmq->wmq_si);
   8545 
   8546 	return 1;
   8547 }
   8548 
   8549 static void
   8550 wm_handle_queue(void *arg)
   8551 {
   8552 	struct wm_queue *wmq = arg;
   8553 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8554 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8555 	struct wm_softc *sc = txq->txq_sc;
   8556 	u_int limit = sc->sc_rx_process_limit;
   8557 
   8558 	mutex_enter(txq->txq_lock);
   8559 	if (txq->txq_stopping) {
   8560 		mutex_exit(txq->txq_lock);
   8561 		return;
   8562 	}
   8563 	wm_txeof(sc, txq);
   8564 	wm_deferred_start_locked(txq);
   8565 	mutex_exit(txq->txq_lock);
   8566 
   8567 	mutex_enter(rxq->rxq_lock);
   8568 	if (rxq->rxq_stopping) {
   8569 		mutex_exit(rxq->rxq_lock);
   8570 		return;
   8571 	}
   8572 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8573 	wm_rxeof(rxq, limit);
   8574 	mutex_exit(rxq->rxq_lock);
   8575 
   8576 	wm_txrxintr_enable(wmq);
   8577 }
   8578 
   8579 /*
   8580  * wm_linkintr_msix:
   8581  *
   8582  *	Interrupt service routine for link status change for MSI-X.
   8583  */
   8584 static int
   8585 wm_linkintr_msix(void *arg)
   8586 {
   8587 	struct wm_softc *sc = arg;
   8588 	uint32_t reg;
   8589 
   8590 	DPRINTF(WM_DEBUG_LINK,
   8591 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8592 
   8593 	reg = CSR_READ(sc, WMREG_ICR);
   8594 	WM_CORE_LOCK(sc);
   8595 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8596 		goto out;
   8597 
   8598 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8599 	wm_linkintr(sc, ICR_LSC);
   8600 
   8601 out:
   8602 	WM_CORE_UNLOCK(sc);
   8603 
   8604 	if (sc->sc_type == WM_T_82574)
   8605 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8606 	else if (sc->sc_type == WM_T_82575)
   8607 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8608 	else
   8609 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8610 
   8611 	return 1;
   8612 }
   8613 
   8614 /*
   8615  * Media related.
   8616  * GMII, SGMII, TBI (and SERDES)
   8617  */
   8618 
   8619 /* Common */
   8620 
   8621 /*
   8622  * wm_tbi_serdes_set_linkled:
   8623  *
   8624  *	Update the link LED on TBI and SERDES devices.
   8625  */
   8626 static void
   8627 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8628 {
   8629 
   8630 	if (sc->sc_tbi_linkup)
   8631 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8632 	else
   8633 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8634 
   8635 	/* 82540 or newer devices are active low */
   8636 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8637 
   8638 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8639 }
   8640 
   8641 /* GMII related */
   8642 
   8643 /*
   8644  * wm_gmii_reset:
   8645  *
   8646  *	Reset the PHY.
   8647  */
   8648 static void
   8649 wm_gmii_reset(struct wm_softc *sc)
   8650 {
   8651 	uint32_t reg;
   8652 	int rv;
   8653 
   8654 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8655 		device_xname(sc->sc_dev), __func__));
   8656 
   8657 	rv = sc->phy.acquire(sc);
   8658 	if (rv != 0) {
   8659 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8660 		    __func__);
   8661 		return;
   8662 	}
   8663 
   8664 	switch (sc->sc_type) {
   8665 	case WM_T_82542_2_0:
   8666 	case WM_T_82542_2_1:
   8667 		/* null */
   8668 		break;
   8669 	case WM_T_82543:
   8670 		/*
   8671 		 * With 82543, we need to force speed and duplex on the MAC
   8672 		 * equal to what the PHY speed and duplex configuration is.
   8673 		 * In addition, we need to perform a hardware reset on the PHY
   8674 		 * to take it out of reset.
   8675 		 */
   8676 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8677 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8678 
   8679 		/* The PHY reset pin is active-low. */
   8680 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8681 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8682 		    CTRL_EXT_SWDPIN(4));
   8683 		reg |= CTRL_EXT_SWDPIO(4);
   8684 
   8685 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8686 		CSR_WRITE_FLUSH(sc);
   8687 		delay(10*1000);
   8688 
   8689 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8690 		CSR_WRITE_FLUSH(sc);
   8691 		delay(150);
   8692 #if 0
   8693 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8694 #endif
   8695 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8696 		break;
   8697 	case WM_T_82544:	/* reset 10000us */
   8698 	case WM_T_82540:
   8699 	case WM_T_82545:
   8700 	case WM_T_82545_3:
   8701 	case WM_T_82546:
   8702 	case WM_T_82546_3:
   8703 	case WM_T_82541:
   8704 	case WM_T_82541_2:
   8705 	case WM_T_82547:
   8706 	case WM_T_82547_2:
   8707 	case WM_T_82571:	/* reset 100us */
   8708 	case WM_T_82572:
   8709 	case WM_T_82573:
   8710 	case WM_T_82574:
   8711 	case WM_T_82575:
   8712 	case WM_T_82576:
   8713 	case WM_T_82580:
   8714 	case WM_T_I350:
   8715 	case WM_T_I354:
   8716 	case WM_T_I210:
   8717 	case WM_T_I211:
   8718 	case WM_T_82583:
   8719 	case WM_T_80003:
   8720 		/* generic reset */
   8721 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8722 		CSR_WRITE_FLUSH(sc);
   8723 		delay(20000);
   8724 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8725 		CSR_WRITE_FLUSH(sc);
   8726 		delay(20000);
   8727 
   8728 		if ((sc->sc_type == WM_T_82541)
   8729 		    || (sc->sc_type == WM_T_82541_2)
   8730 		    || (sc->sc_type == WM_T_82547)
   8731 		    || (sc->sc_type == WM_T_82547_2)) {
   8732 			/* workaround for igp are done in igp_reset() */
   8733 			/* XXX add code to set LED after phy reset */
   8734 		}
   8735 		break;
   8736 	case WM_T_ICH8:
   8737 	case WM_T_ICH9:
   8738 	case WM_T_ICH10:
   8739 	case WM_T_PCH:
   8740 	case WM_T_PCH2:
   8741 	case WM_T_PCH_LPT:
   8742 	case WM_T_PCH_SPT:
   8743 		/* generic reset */
   8744 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8745 		CSR_WRITE_FLUSH(sc);
   8746 		delay(100);
   8747 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8748 		CSR_WRITE_FLUSH(sc);
   8749 		delay(150);
   8750 		break;
   8751 	default:
   8752 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8753 		    __func__);
   8754 		break;
   8755 	}
   8756 
   8757 	sc->phy.release(sc);
   8758 
   8759 	/* get_cfg_done */
   8760 	wm_get_cfg_done(sc);
   8761 
   8762 	/* extra setup */
   8763 	switch (sc->sc_type) {
   8764 	case WM_T_82542_2_0:
   8765 	case WM_T_82542_2_1:
   8766 	case WM_T_82543:
   8767 	case WM_T_82544:
   8768 	case WM_T_82540:
   8769 	case WM_T_82545:
   8770 	case WM_T_82545_3:
   8771 	case WM_T_82546:
   8772 	case WM_T_82546_3:
   8773 	case WM_T_82541_2:
   8774 	case WM_T_82547_2:
   8775 	case WM_T_82571:
   8776 	case WM_T_82572:
   8777 	case WM_T_82573:
   8778 	case WM_T_82575:
   8779 	case WM_T_82576:
   8780 	case WM_T_82580:
   8781 	case WM_T_I350:
   8782 	case WM_T_I354:
   8783 	case WM_T_I210:
   8784 	case WM_T_I211:
   8785 	case WM_T_80003:
   8786 		/* null */
   8787 		break;
   8788 	case WM_T_82574:
   8789 	case WM_T_82583:
   8790 		wm_lplu_d0_disable(sc);
   8791 		break;
   8792 	case WM_T_82541:
   8793 	case WM_T_82547:
   8794 		/* XXX Configure actively LED after PHY reset */
   8795 		break;
   8796 	case WM_T_ICH8:
   8797 	case WM_T_ICH9:
   8798 	case WM_T_ICH10:
   8799 	case WM_T_PCH:
   8800 	case WM_T_PCH2:
   8801 	case WM_T_PCH_LPT:
   8802 	case WM_T_PCH_SPT:
   8803 		/* Allow time for h/w to get to a quiescent state afer reset */
   8804 		delay(10*1000);
   8805 
   8806 		if (sc->sc_type == WM_T_PCH)
   8807 			wm_hv_phy_workaround_ich8lan(sc);
   8808 
   8809 		if (sc->sc_type == WM_T_PCH2)
   8810 			wm_lv_phy_workaround_ich8lan(sc);
   8811 
   8812 		/* Clear the host wakeup bit after lcd reset */
   8813 		if (sc->sc_type >= WM_T_PCH) {
   8814 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8815 			    BM_PORT_GEN_CFG);
   8816 			reg &= ~BM_WUC_HOST_WU_BIT;
   8817 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8818 			    BM_PORT_GEN_CFG, reg);
   8819 		}
   8820 
   8821 		/*
   8822 		 * XXX Configure the LCD with th extended configuration region
   8823 		 * in NVM
   8824 		 */
   8825 
   8826 		/* Disable D0 LPLU. */
   8827 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8828 			wm_lplu_d0_disable_pch(sc);
   8829 		else
   8830 			wm_lplu_d0_disable(sc);	/* ICH* */
   8831 		break;
   8832 	default:
   8833 		panic("%s: unknown type\n", __func__);
   8834 		break;
   8835 	}
   8836 }
   8837 
   8838 /*
   8839  * Setup sc_phytype and mii_{read|write}reg.
   8840  *
   8841  *  To identify PHY type, correct read/write function should be selected.
   8842  * To select correct read/write function, PCI ID or MAC type are required
   8843  * without accessing PHY registers.
   8844  *
   8845  *  On the first call of this function, PHY ID is not known yet. Check
   8846  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   8847  * result might be incorrect.
   8848  *
   8849  *  In the second call, PHY OUI and model is used to identify PHY type.
   8850  * It might not be perfpect because of the lack of compared entry, but it
   8851  * would be better than the first call.
   8852  *
   8853  *  If the detected new result and previous assumption is different,
   8854  * diagnous message will be printed.
   8855  */
   8856 static void
   8857 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   8858     uint16_t phy_model)
   8859 {
   8860 	device_t dev = sc->sc_dev;
   8861 	struct mii_data *mii = &sc->sc_mii;
   8862 	uint16_t new_phytype = WMPHY_UNKNOWN;
   8863 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   8864 	mii_readreg_t new_readreg;
   8865 	mii_writereg_t new_writereg;
   8866 
   8867 	if (mii->mii_readreg == NULL) {
   8868 		/*
   8869 		 *  This is the first call of this function. For ICH and PCH
   8870 		 * variants, it's difficult to determine the PHY access method
   8871 		 * by sc_type, so use the PCI product ID for some devices.
   8872 		 */
   8873 
   8874 		switch (sc->sc_pcidevid) {
   8875 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   8876 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   8877 			/* 82577 */
   8878 			new_phytype = WMPHY_82577;
   8879 			break;
   8880 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   8881 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   8882 			/* 82578 */
   8883 			new_phytype = WMPHY_82578;
   8884 			break;
   8885 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8886 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8887 			/* 82579 */
   8888 			new_phytype = WMPHY_82579;
   8889 			break;
   8890 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8891 		case PCI_PRODUCT_INTEL_82801I_BM:
   8892 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   8893 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8894 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8895 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8896 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8897 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8898 			/* ICH8, 9, 10 with 82567 */
   8899 			new_phytype = WMPHY_BM;
   8900 			break;
   8901 		default:
   8902 			break;
   8903 		}
   8904 	} else {
   8905 		/* It's not the first call. Use PHY OUI and model */
   8906 		switch (phy_oui) {
   8907 		case MII_OUI_ATHEROS: /* XXX ??? */
   8908 			switch (phy_model) {
   8909 			case 0x0004: /* XXX */
   8910 				new_phytype = WMPHY_82578;
   8911 				break;
   8912 			default:
   8913 				break;
   8914 			}
   8915 			break;
   8916 		case MII_OUI_xxMARVELL:
   8917 			switch (phy_model) {
   8918 			case MII_MODEL_xxMARVELL_I210:
   8919 				new_phytype = WMPHY_I210;
   8920 				break;
   8921 			case MII_MODEL_xxMARVELL_E1011:
   8922 			case MII_MODEL_xxMARVELL_E1000_3:
   8923 			case MII_MODEL_xxMARVELL_E1000_5:
   8924 			case MII_MODEL_xxMARVELL_E1112:
   8925 				new_phytype = WMPHY_M88;
   8926 				break;
   8927 			case MII_MODEL_xxMARVELL_E1149:
   8928 				new_phytype = WMPHY_BM;
   8929 				break;
   8930 			case MII_MODEL_xxMARVELL_E1111:
   8931 			case MII_MODEL_xxMARVELL_I347:
   8932 			case MII_MODEL_xxMARVELL_E1512:
   8933 			case MII_MODEL_xxMARVELL_E1340M:
   8934 			case MII_MODEL_xxMARVELL_E1543:
   8935 				new_phytype = WMPHY_M88;
   8936 				break;
   8937 			case MII_MODEL_xxMARVELL_I82563:
   8938 				new_phytype = WMPHY_GG82563;
   8939 				break;
   8940 			default:
   8941 				break;
   8942 			}
   8943 			break;
   8944 		case MII_OUI_INTEL:
   8945 			switch (phy_model) {
   8946 			case MII_MODEL_INTEL_I82577:
   8947 				new_phytype = WMPHY_82577;
   8948 				break;
   8949 			case MII_MODEL_INTEL_I82579:
   8950 				new_phytype = WMPHY_82579;
   8951 				break;
   8952 			case MII_MODEL_INTEL_I217:
   8953 				new_phytype = WMPHY_I217;
   8954 				break;
   8955 			case MII_MODEL_INTEL_I82580:
   8956 			case MII_MODEL_INTEL_I350:
   8957 				new_phytype = WMPHY_82580;
   8958 				break;
   8959 			default:
   8960 				break;
   8961 			}
   8962 			break;
   8963 		case MII_OUI_yyINTEL:
   8964 			switch (phy_model) {
   8965 			case MII_MODEL_yyINTEL_I82562G:
   8966 			case MII_MODEL_yyINTEL_I82562EM:
   8967 			case MII_MODEL_yyINTEL_I82562ET:
   8968 				new_phytype = WMPHY_IFE;
   8969 				break;
   8970 			case MII_MODEL_yyINTEL_IGP01E1000:
   8971 				new_phytype = WMPHY_IGP;
   8972 				break;
   8973 			case MII_MODEL_yyINTEL_I82566:
   8974 				new_phytype = WMPHY_IGP_3;
   8975 				break;
   8976 			default:
   8977 				break;
   8978 			}
   8979 			break;
   8980 		default:
   8981 			break;
   8982 		}
   8983 		if (new_phytype == WMPHY_UNKNOWN)
   8984 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   8985 			    __func__);
   8986 
   8987 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8988 		    && (sc->sc_phytype != new_phytype )) {
   8989 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   8990 			    "was incorrect. PHY type from PHY ID = %u\n",
   8991 			    sc->sc_phytype, new_phytype);
   8992 		}
   8993 	}
   8994 
   8995 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   8996 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   8997 		/* SGMII */
   8998 		new_readreg = wm_sgmii_readreg;
   8999 		new_writereg = wm_sgmii_writereg;
   9000 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9001 		/* BM2 (phyaddr == 1) */
   9002 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9003 		    && (new_phytype != WMPHY_BM)
   9004 		    && (new_phytype != WMPHY_UNKNOWN))
   9005 			doubt_phytype = new_phytype;
   9006 		new_phytype = WMPHY_BM;
   9007 		new_readreg = wm_gmii_bm_readreg;
   9008 		new_writereg = wm_gmii_bm_writereg;
   9009 	} else if (sc->sc_type >= WM_T_PCH) {
   9010 		/* All PCH* use _hv_ */
   9011 		new_readreg = wm_gmii_hv_readreg;
   9012 		new_writereg = wm_gmii_hv_writereg;
   9013 	} else if (sc->sc_type >= WM_T_ICH8) {
   9014 		/* non-82567 ICH8, 9 and 10 */
   9015 		new_readreg = wm_gmii_i82544_readreg;
   9016 		new_writereg = wm_gmii_i82544_writereg;
   9017 	} else if (sc->sc_type >= WM_T_80003) {
   9018 		/* 80003 */
   9019 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9020 		    && (new_phytype != WMPHY_GG82563)
   9021 		    && (new_phytype != WMPHY_UNKNOWN))
   9022 			doubt_phytype = new_phytype;
   9023 		new_phytype = WMPHY_GG82563;
   9024 		new_readreg = wm_gmii_i80003_readreg;
   9025 		new_writereg = wm_gmii_i80003_writereg;
   9026 	} else if (sc->sc_type >= WM_T_I210) {
   9027 		/* I210 and I211 */
   9028 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9029 		    && (new_phytype != WMPHY_I210)
   9030 		    && (new_phytype != WMPHY_UNKNOWN))
   9031 			doubt_phytype = new_phytype;
   9032 		new_phytype = WMPHY_I210;
   9033 		new_readreg = wm_gmii_gs40g_readreg;
   9034 		new_writereg = wm_gmii_gs40g_writereg;
   9035 	} else if (sc->sc_type >= WM_T_82580) {
   9036 		/* 82580, I350 and I354 */
   9037 		new_readreg = wm_gmii_82580_readreg;
   9038 		new_writereg = wm_gmii_82580_writereg;
   9039 	} else if (sc->sc_type >= WM_T_82544) {
   9040 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9041 		new_readreg = wm_gmii_i82544_readreg;
   9042 		new_writereg = wm_gmii_i82544_writereg;
   9043 	} else {
   9044 		new_readreg = wm_gmii_i82543_readreg;
   9045 		new_writereg = wm_gmii_i82543_writereg;
   9046 	}
   9047 
   9048 	if (new_phytype == WMPHY_BM) {
   9049 		/* All BM use _bm_ */
   9050 		new_readreg = wm_gmii_bm_readreg;
   9051 		new_writereg = wm_gmii_bm_writereg;
   9052 	}
   9053 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9054 		/* All PCH* use _hv_ */
   9055 		new_readreg = wm_gmii_hv_readreg;
   9056 		new_writereg = wm_gmii_hv_writereg;
   9057 	}
   9058 
   9059 	/* Diag output */
   9060 	if (doubt_phytype != WMPHY_UNKNOWN)
   9061 		aprint_error_dev(dev, "Assumed new PHY type was "
   9062 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9063 		    new_phytype);
   9064 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9065 	    && (sc->sc_phytype != new_phytype ))
   9066 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9067 		    "was incorrect. New PHY type = %u\n",
   9068 		    sc->sc_phytype, new_phytype);
   9069 
   9070 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9071 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9072 
   9073 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9074 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9075 		    "function was incorrect.\n");
   9076 
   9077 	/* Update now */
   9078 	sc->sc_phytype = new_phytype;
   9079 	mii->mii_readreg = new_readreg;
   9080 	mii->mii_writereg = new_writereg;
   9081 }
   9082 
   9083 /*
   9084  * wm_get_phy_id_82575:
   9085  *
   9086  * Return PHY ID. Return -1 if it failed.
   9087  */
   9088 static int
   9089 wm_get_phy_id_82575(struct wm_softc *sc)
   9090 {
   9091 	uint32_t reg;
   9092 	int phyid = -1;
   9093 
   9094 	/* XXX */
   9095 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9096 		return -1;
   9097 
   9098 	if (wm_sgmii_uses_mdio(sc)) {
   9099 		switch (sc->sc_type) {
   9100 		case WM_T_82575:
   9101 		case WM_T_82576:
   9102 			reg = CSR_READ(sc, WMREG_MDIC);
   9103 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9104 			break;
   9105 		case WM_T_82580:
   9106 		case WM_T_I350:
   9107 		case WM_T_I354:
   9108 		case WM_T_I210:
   9109 		case WM_T_I211:
   9110 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9111 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9112 			break;
   9113 		default:
   9114 			return -1;
   9115 		}
   9116 	}
   9117 
   9118 	return phyid;
   9119 }
   9120 
   9121 
   9122 /*
   9123  * wm_gmii_mediainit:
   9124  *
   9125  *	Initialize media for use on 1000BASE-T devices.
   9126  */
   9127 static void
   9128 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9129 {
   9130 	device_t dev = sc->sc_dev;
   9131 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9132 	struct mii_data *mii = &sc->sc_mii;
   9133 	uint32_t reg;
   9134 
   9135 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9136 		device_xname(sc->sc_dev), __func__));
   9137 
   9138 	/* We have GMII. */
   9139 	sc->sc_flags |= WM_F_HAS_MII;
   9140 
   9141 	if (sc->sc_type == WM_T_80003)
   9142 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9143 	else
   9144 		sc->sc_tipg = TIPG_1000T_DFLT;
   9145 
   9146 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9147 	if ((sc->sc_type == WM_T_82580)
   9148 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9149 	    || (sc->sc_type == WM_T_I211)) {
   9150 		reg = CSR_READ(sc, WMREG_PHPM);
   9151 		reg &= ~PHPM_GO_LINK_D;
   9152 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9153 	}
   9154 
   9155 	/*
   9156 	 * Let the chip set speed/duplex on its own based on
   9157 	 * signals from the PHY.
   9158 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9159 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9160 	 */
   9161 	sc->sc_ctrl |= CTRL_SLU;
   9162 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9163 
   9164 	/* Initialize our media structures and probe the GMII. */
   9165 	mii->mii_ifp = ifp;
   9166 
   9167 	/*
   9168 	 * The first call of wm_mii_setup_phytype. The result might be
   9169 	 * incorrect.
   9170 	 */
   9171 	wm_gmii_setup_phytype(sc, 0, 0);
   9172 
   9173 	mii->mii_statchg = wm_gmii_statchg;
   9174 
   9175 	/* get PHY control from SMBus to PCIe */
   9176 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9177 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9178 		wm_smbustopci(sc);
   9179 
   9180 	wm_gmii_reset(sc);
   9181 
   9182 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9183 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9184 	    wm_gmii_mediastatus);
   9185 
   9186 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9187 	    || (sc->sc_type == WM_T_82580)
   9188 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9189 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9190 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9191 			/* Attach only one port */
   9192 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9193 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9194 		} else {
   9195 			int i, id;
   9196 			uint32_t ctrl_ext;
   9197 
   9198 			id = wm_get_phy_id_82575(sc);
   9199 			if (id != -1) {
   9200 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9201 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9202 			}
   9203 			if ((id == -1)
   9204 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9205 				/* Power on sgmii phy if it is disabled */
   9206 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9207 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9208 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9209 				CSR_WRITE_FLUSH(sc);
   9210 				delay(300*1000); /* XXX too long */
   9211 
   9212 				/* from 1 to 8 */
   9213 				for (i = 1; i < 8; i++)
   9214 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9215 					    0xffffffff, i, MII_OFFSET_ANY,
   9216 					    MIIF_DOPAUSE);
   9217 
   9218 				/* restore previous sfp cage power state */
   9219 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9220 			}
   9221 		}
   9222 	} else {
   9223 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9224 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9225 	}
   9226 
   9227 	/*
   9228 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9229 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9230 	 */
   9231 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9232 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9233 		wm_set_mdio_slow_mode_hv(sc);
   9234 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9235 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9236 	}
   9237 
   9238 	/*
   9239 	 * (For ICH8 variants)
   9240 	 * If PHY detection failed, use BM's r/w function and retry.
   9241 	 */
   9242 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9243 		/* if failed, retry with *_bm_* */
   9244 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9245 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9246 		    sc->sc_phytype);
   9247 		sc->sc_phytype = WMPHY_BM;
   9248 		mii->mii_readreg = wm_gmii_bm_readreg;
   9249 		mii->mii_writereg = wm_gmii_bm_writereg;
   9250 
   9251 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9252 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9253 	}
   9254 
   9255 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9256 		/* Any PHY wasn't find */
   9257 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9258 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9259 		sc->sc_phytype = WMPHY_NONE;
   9260 	} else {
   9261 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9262 
   9263 		/*
   9264 		 * PHY Found! Check PHY type again by the second call of
   9265 		 * wm_mii_setup_phytype.
   9266 		 */
   9267 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9268 		    child->mii_mpd_model);
   9269 
   9270 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9271 	}
   9272 }
   9273 
   9274 /*
   9275  * wm_gmii_mediachange:	[ifmedia interface function]
   9276  *
   9277  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9278  */
   9279 static int
   9280 wm_gmii_mediachange(struct ifnet *ifp)
   9281 {
   9282 	struct wm_softc *sc = ifp->if_softc;
   9283 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9284 	int rc;
   9285 
   9286 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9287 		device_xname(sc->sc_dev), __func__));
   9288 	if ((ifp->if_flags & IFF_UP) == 0)
   9289 		return 0;
   9290 
   9291 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9292 	sc->sc_ctrl |= CTRL_SLU;
   9293 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9294 	    || (sc->sc_type > WM_T_82543)) {
   9295 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9296 	} else {
   9297 		sc->sc_ctrl &= ~CTRL_ASDE;
   9298 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9299 		if (ife->ifm_media & IFM_FDX)
   9300 			sc->sc_ctrl |= CTRL_FD;
   9301 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9302 		case IFM_10_T:
   9303 			sc->sc_ctrl |= CTRL_SPEED_10;
   9304 			break;
   9305 		case IFM_100_TX:
   9306 			sc->sc_ctrl |= CTRL_SPEED_100;
   9307 			break;
   9308 		case IFM_1000_T:
   9309 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9310 			break;
   9311 		default:
   9312 			panic("wm_gmii_mediachange: bad media 0x%x",
   9313 			    ife->ifm_media);
   9314 		}
   9315 	}
   9316 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9317 	if (sc->sc_type <= WM_T_82543)
   9318 		wm_gmii_reset(sc);
   9319 
   9320 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9321 		return 0;
   9322 	return rc;
   9323 }
   9324 
   9325 /*
   9326  * wm_gmii_mediastatus:	[ifmedia interface function]
   9327  *
   9328  *	Get the current interface media status on a 1000BASE-T device.
   9329  */
   9330 static void
   9331 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9332 {
   9333 	struct wm_softc *sc = ifp->if_softc;
   9334 
   9335 	ether_mediastatus(ifp, ifmr);
   9336 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9337 	    | sc->sc_flowflags;
   9338 }
   9339 
   9340 #define	MDI_IO		CTRL_SWDPIN(2)
   9341 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9342 #define	MDI_CLK		CTRL_SWDPIN(3)
   9343 
   9344 static void
   9345 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9346 {
   9347 	uint32_t i, v;
   9348 
   9349 	v = CSR_READ(sc, WMREG_CTRL);
   9350 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9351 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9352 
   9353 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9354 		if (data & i)
   9355 			v |= MDI_IO;
   9356 		else
   9357 			v &= ~MDI_IO;
   9358 		CSR_WRITE(sc, WMREG_CTRL, v);
   9359 		CSR_WRITE_FLUSH(sc);
   9360 		delay(10);
   9361 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9362 		CSR_WRITE_FLUSH(sc);
   9363 		delay(10);
   9364 		CSR_WRITE(sc, WMREG_CTRL, v);
   9365 		CSR_WRITE_FLUSH(sc);
   9366 		delay(10);
   9367 	}
   9368 }
   9369 
   9370 static uint32_t
   9371 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9372 {
   9373 	uint32_t v, i, data = 0;
   9374 
   9375 	v = CSR_READ(sc, WMREG_CTRL);
   9376 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9377 	v |= CTRL_SWDPIO(3);
   9378 
   9379 	CSR_WRITE(sc, WMREG_CTRL, v);
   9380 	CSR_WRITE_FLUSH(sc);
   9381 	delay(10);
   9382 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9383 	CSR_WRITE_FLUSH(sc);
   9384 	delay(10);
   9385 	CSR_WRITE(sc, WMREG_CTRL, v);
   9386 	CSR_WRITE_FLUSH(sc);
   9387 	delay(10);
   9388 
   9389 	for (i = 0; i < 16; i++) {
   9390 		data <<= 1;
   9391 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9392 		CSR_WRITE_FLUSH(sc);
   9393 		delay(10);
   9394 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9395 			data |= 1;
   9396 		CSR_WRITE(sc, WMREG_CTRL, v);
   9397 		CSR_WRITE_FLUSH(sc);
   9398 		delay(10);
   9399 	}
   9400 
   9401 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9402 	CSR_WRITE_FLUSH(sc);
   9403 	delay(10);
   9404 	CSR_WRITE(sc, WMREG_CTRL, v);
   9405 	CSR_WRITE_FLUSH(sc);
   9406 	delay(10);
   9407 
   9408 	return data;
   9409 }
   9410 
   9411 #undef MDI_IO
   9412 #undef MDI_DIR
   9413 #undef MDI_CLK
   9414 
   9415 /*
   9416  * wm_gmii_i82543_readreg:	[mii interface function]
   9417  *
   9418  *	Read a PHY register on the GMII (i82543 version).
   9419  */
   9420 static int
   9421 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   9422 {
   9423 	struct wm_softc *sc = device_private(self);
   9424 	int rv;
   9425 
   9426 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9427 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9428 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9429 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9430 
   9431 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9432 	    device_xname(sc->sc_dev), phy, reg, rv));
   9433 
   9434 	return rv;
   9435 }
   9436 
   9437 /*
   9438  * wm_gmii_i82543_writereg:	[mii interface function]
   9439  *
   9440  *	Write a PHY register on the GMII (i82543 version).
   9441  */
   9442 static void
   9443 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   9444 {
   9445 	struct wm_softc *sc = device_private(self);
   9446 
   9447 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9448 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9449 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9450 	    (MII_COMMAND_START << 30), 32);
   9451 }
   9452 
   9453 /*
   9454  * wm_gmii_mdic_readreg:	[mii interface function]
   9455  *
   9456  *	Read a PHY register on the GMII.
   9457  */
   9458 static int
   9459 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   9460 {
   9461 	struct wm_softc *sc = device_private(self);
   9462 	uint32_t mdic = 0;
   9463 	int i, rv;
   9464 
   9465 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9466 	    MDIC_REGADD(reg));
   9467 
   9468 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9469 		mdic = CSR_READ(sc, WMREG_MDIC);
   9470 		if (mdic & MDIC_READY)
   9471 			break;
   9472 		delay(50);
   9473 	}
   9474 
   9475 	if ((mdic & MDIC_READY) == 0) {
   9476 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9477 		    device_xname(sc->sc_dev), phy, reg);
   9478 		rv = 0;
   9479 	} else if (mdic & MDIC_E) {
   9480 #if 0 /* This is normal if no PHY is present. */
   9481 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9482 		    device_xname(sc->sc_dev), phy, reg);
   9483 #endif
   9484 		rv = 0;
   9485 	} else {
   9486 		rv = MDIC_DATA(mdic);
   9487 		if (rv == 0xffff)
   9488 			rv = 0;
   9489 	}
   9490 
   9491 	return rv;
   9492 }
   9493 
   9494 /*
   9495  * wm_gmii_mdic_writereg:	[mii interface function]
   9496  *
   9497  *	Write a PHY register on the GMII.
   9498  */
   9499 static void
   9500 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   9501 {
   9502 	struct wm_softc *sc = device_private(self);
   9503 	uint32_t mdic = 0;
   9504 	int i;
   9505 
   9506 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9507 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9508 
   9509 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9510 		mdic = CSR_READ(sc, WMREG_MDIC);
   9511 		if (mdic & MDIC_READY)
   9512 			break;
   9513 		delay(50);
   9514 	}
   9515 
   9516 	if ((mdic & MDIC_READY) == 0)
   9517 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9518 		    device_xname(sc->sc_dev), phy, reg);
   9519 	else if (mdic & MDIC_E)
   9520 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9521 		    device_xname(sc->sc_dev), phy, reg);
   9522 }
   9523 
   9524 /*
   9525  * wm_gmii_i82544_readreg:	[mii interface function]
   9526  *
   9527  *	Read a PHY register on the GMII.
   9528  */
   9529 static int
   9530 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   9531 {
   9532 	struct wm_softc *sc = device_private(self);
   9533 	int rv;
   9534 
   9535 	if (sc->phy.acquire(sc)) {
   9536 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9537 		    __func__);
   9538 		return 0;
   9539 	}
   9540 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9541 	sc->phy.release(sc);
   9542 
   9543 	return rv;
   9544 }
   9545 
   9546 /*
   9547  * wm_gmii_i82544_writereg:	[mii interface function]
   9548  *
   9549  *	Write a PHY register on the GMII.
   9550  */
   9551 static void
   9552 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   9553 {
   9554 	struct wm_softc *sc = device_private(self);
   9555 
   9556 	if (sc->phy.acquire(sc)) {
   9557 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9558 		    __func__);
   9559 	}
   9560 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9561 	sc->phy.release(sc);
   9562 }
   9563 
   9564 /*
   9565  * wm_gmii_i80003_readreg:	[mii interface function]
   9566  *
   9567  *	Read a PHY register on the kumeran
   9568  * This could be handled by the PHY layer if we didn't have to lock the
   9569  * ressource ...
   9570  */
   9571 static int
   9572 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   9573 {
   9574 	struct wm_softc *sc = device_private(self);
   9575 	int rv;
   9576 
   9577 	if (phy != 1) /* only one PHY on kumeran bus */
   9578 		return 0;
   9579 
   9580 	if (sc->phy.acquire(sc)) {
   9581 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9582 		    __func__);
   9583 		return 0;
   9584 	}
   9585 
   9586 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9587 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9588 		    reg >> GG82563_PAGE_SHIFT);
   9589 	} else {
   9590 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9591 		    reg >> GG82563_PAGE_SHIFT);
   9592 	}
   9593 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9594 	delay(200);
   9595 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9596 	delay(200);
   9597 	sc->phy.release(sc);
   9598 
   9599 	return rv;
   9600 }
   9601 
   9602 /*
   9603  * wm_gmii_i80003_writereg:	[mii interface function]
   9604  *
   9605  *	Write a PHY register on the kumeran.
   9606  * This could be handled by the PHY layer if we didn't have to lock the
   9607  * ressource ...
   9608  */
   9609 static void
   9610 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   9611 {
   9612 	struct wm_softc *sc = device_private(self);
   9613 
   9614 	if (phy != 1) /* only one PHY on kumeran bus */
   9615 		return;
   9616 
   9617 	if (sc->phy.acquire(sc)) {
   9618 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9619 		    __func__);
   9620 		return;
   9621 	}
   9622 
   9623 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9624 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9625 		    reg >> GG82563_PAGE_SHIFT);
   9626 	} else {
   9627 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9628 		    reg >> GG82563_PAGE_SHIFT);
   9629 	}
   9630 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9631 	delay(200);
   9632 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9633 	delay(200);
   9634 
   9635 	sc->phy.release(sc);
   9636 }
   9637 
   9638 /*
   9639  * wm_gmii_bm_readreg:	[mii interface function]
   9640  *
   9641  *	Read a PHY register on the kumeran
   9642  * This could be handled by the PHY layer if we didn't have to lock the
   9643  * ressource ...
   9644  */
   9645 static int
   9646 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   9647 {
   9648 	struct wm_softc *sc = device_private(self);
   9649 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9650 	uint16_t val;
   9651 	int rv;
   9652 
   9653 	if (sc->phy.acquire(sc)) {
   9654 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9655 		    __func__);
   9656 		return 0;
   9657 	}
   9658 
   9659 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9660 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9661 		    || (reg == 31)) ? 1 : phy;
   9662 	/* Page 800 works differently than the rest so it has its own func */
   9663 	if (page == BM_WUC_PAGE) {
   9664 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9665 		rv = val;
   9666 		goto release;
   9667 	}
   9668 
   9669 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9670 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9671 		    && (sc->sc_type != WM_T_82583))
   9672 			wm_gmii_mdic_writereg(self, phy,
   9673 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9674 		else
   9675 			wm_gmii_mdic_writereg(self, phy,
   9676 			    BME1000_PHY_PAGE_SELECT, page);
   9677 	}
   9678 
   9679 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9680 
   9681 release:
   9682 	sc->phy.release(sc);
   9683 	return rv;
   9684 }
   9685 
   9686 /*
   9687  * wm_gmii_bm_writereg:	[mii interface function]
   9688  *
   9689  *	Write a PHY register on the kumeran.
   9690  * This could be handled by the PHY layer if we didn't have to lock the
   9691  * ressource ...
   9692  */
   9693 static void
   9694 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   9695 {
   9696 	struct wm_softc *sc = device_private(self);
   9697 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9698 
   9699 	if (sc->phy.acquire(sc)) {
   9700 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9701 		    __func__);
   9702 		return;
   9703 	}
   9704 
   9705 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9706 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9707 		    || (reg == 31)) ? 1 : phy;
   9708 	/* Page 800 works differently than the rest so it has its own func */
   9709 	if (page == BM_WUC_PAGE) {
   9710 		uint16_t tmp;
   9711 
   9712 		tmp = val;
   9713 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9714 		goto release;
   9715 	}
   9716 
   9717 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9718 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9719 		    && (sc->sc_type != WM_T_82583))
   9720 			wm_gmii_mdic_writereg(self, phy,
   9721 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9722 		else
   9723 			wm_gmii_mdic_writereg(self, phy,
   9724 			    BME1000_PHY_PAGE_SELECT, page);
   9725 	}
   9726 
   9727 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9728 
   9729 release:
   9730 	sc->phy.release(sc);
   9731 }
   9732 
   9733 static void
   9734 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   9735 {
   9736 	struct wm_softc *sc = device_private(self);
   9737 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9738 	uint16_t wuce, reg;
   9739 
   9740 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9741 		device_xname(sc->sc_dev), __func__));
   9742 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9743 	if (sc->sc_type == WM_T_PCH) {
   9744 		/* XXX e1000 driver do nothing... why? */
   9745 	}
   9746 
   9747 	/*
   9748 	 * 1) Enable PHY wakeup register first.
   9749 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9750 	 */
   9751 
   9752 	/* Set page 769 */
   9753 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9754 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9755 
   9756 	/* Read WUCE and save it */
   9757 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9758 
   9759 	reg = wuce | BM_WUC_ENABLE_BIT;
   9760 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9761 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9762 
   9763 	/* Select page 800 */
   9764 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9765 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9766 
   9767 	/*
   9768 	 * 2) Access PHY wakeup register.
   9769 	 * See e1000_access_phy_wakeup_reg_bm.
   9770 	 */
   9771 
   9772 	/* Write page 800 */
   9773 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9774 
   9775 	if (rd)
   9776 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9777 	else
   9778 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9779 
   9780 	/*
   9781 	 * 3) Disable PHY wakeup register.
   9782 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9783 	 */
   9784 	/* Set page 769 */
   9785 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9786 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9787 
   9788 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9789 }
   9790 
   9791 /*
   9792  * wm_gmii_hv_readreg:	[mii interface function]
   9793  *
   9794  *	Read a PHY register on the kumeran
   9795  * This could be handled by the PHY layer if we didn't have to lock the
   9796  * ressource ...
   9797  */
   9798 static int
   9799 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9800 {
   9801 	struct wm_softc *sc = device_private(self);
   9802 	int rv;
   9803 
   9804 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9805 		device_xname(sc->sc_dev), __func__));
   9806 	if (sc->phy.acquire(sc)) {
   9807 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9808 		    __func__);
   9809 		return 0;
   9810 	}
   9811 
   9812 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9813 	sc->phy.release(sc);
   9814 	return rv;
   9815 }
   9816 
   9817 static int
   9818 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9819 {
   9820 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9821 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9822 	uint16_t val;
   9823 	int rv;
   9824 
   9825 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9826 
   9827 	/* Page 800 works differently than the rest so it has its own func */
   9828 	if (page == BM_WUC_PAGE) {
   9829 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9830 		return val;
   9831 	}
   9832 
   9833 	/*
   9834 	 * Lower than page 768 works differently than the rest so it has its
   9835 	 * own func
   9836 	 */
   9837 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9838 		printf("gmii_hv_readreg!!!\n");
   9839 		return 0;
   9840 	}
   9841 
   9842 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9843 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9844 		    page << BME1000_PAGE_SHIFT);
   9845 	}
   9846 
   9847 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9848 	return rv;
   9849 }
   9850 
   9851 /*
   9852  * wm_gmii_hv_writereg:	[mii interface function]
   9853  *
   9854  *	Write a PHY register on the kumeran.
   9855  * This could be handled by the PHY layer if we didn't have to lock the
   9856  * ressource ...
   9857  */
   9858 static void
   9859 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9860 {
   9861 	struct wm_softc *sc = device_private(self);
   9862 
   9863 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9864 		device_xname(sc->sc_dev), __func__));
   9865 
   9866 	if (sc->phy.acquire(sc)) {
   9867 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9868 		    __func__);
   9869 		return;
   9870 	}
   9871 
   9872 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   9873 	sc->phy.release(sc);
   9874 }
   9875 
   9876 static void
   9877 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   9878 {
   9879 	struct wm_softc *sc = device_private(self);
   9880 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9881 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9882 
   9883 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9884 
   9885 	/* Page 800 works differently than the rest so it has its own func */
   9886 	if (page == BM_WUC_PAGE) {
   9887 		uint16_t tmp;
   9888 
   9889 		tmp = val;
   9890 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9891 		return;
   9892 	}
   9893 
   9894 	/*
   9895 	 * Lower than page 768 works differently than the rest so it has its
   9896 	 * own func
   9897 	 */
   9898 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9899 		printf("gmii_hv_writereg!!!\n");
   9900 		return;
   9901 	}
   9902 
   9903 	{
   9904 		/*
   9905 		 * XXX Workaround MDIO accesses being disabled after entering
   9906 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9907 		 * register is set)
   9908 		 */
   9909 		if (sc->sc_phytype == WMPHY_82578) {
   9910 			struct mii_softc *child;
   9911 
   9912 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9913 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   9914 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   9915 			    && ((val & (1 << 11)) != 0)) {
   9916 				printf("XXX need workaround\n");
   9917 			}
   9918 		}
   9919 
   9920 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9921 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9922 			    page << BME1000_PAGE_SHIFT);
   9923 		}
   9924 	}
   9925 
   9926 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   9927 }
   9928 
   9929 /*
   9930  * wm_gmii_82580_readreg:	[mii interface function]
   9931  *
   9932  *	Read a PHY register on the 82580 and I350.
   9933  * This could be handled by the PHY layer if we didn't have to lock the
   9934  * ressource ...
   9935  */
   9936 static int
   9937 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   9938 {
   9939 	struct wm_softc *sc = device_private(self);
   9940 	int rv;
   9941 
   9942 	if (sc->phy.acquire(sc) != 0) {
   9943 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9944 		    __func__);
   9945 		return 0;
   9946 	}
   9947 
   9948 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9949 
   9950 	sc->phy.release(sc);
   9951 	return rv;
   9952 }
   9953 
   9954 /*
   9955  * wm_gmii_82580_writereg:	[mii interface function]
   9956  *
   9957  *	Write a PHY register on the 82580 and I350.
   9958  * This could be handled by the PHY layer if we didn't have to lock the
   9959  * ressource ...
   9960  */
   9961 static void
   9962 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   9963 {
   9964 	struct wm_softc *sc = device_private(self);
   9965 
   9966 	if (sc->phy.acquire(sc) != 0) {
   9967 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9968 		    __func__);
   9969 		return;
   9970 	}
   9971 
   9972 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9973 
   9974 	sc->phy.release(sc);
   9975 }
   9976 
   9977 /*
   9978  * wm_gmii_gs40g_readreg:	[mii interface function]
   9979  *
   9980  *	Read a PHY register on the I2100 and I211.
   9981  * This could be handled by the PHY layer if we didn't have to lock the
   9982  * ressource ...
   9983  */
   9984 static int
   9985 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   9986 {
   9987 	struct wm_softc *sc = device_private(self);
   9988 	int page, offset;
   9989 	int rv;
   9990 
   9991 	/* Acquire semaphore */
   9992 	if (sc->phy.acquire(sc)) {
   9993 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9994 		    __func__);
   9995 		return 0;
   9996 	}
   9997 
   9998 	/* Page select */
   9999 	page = reg >> GS40G_PAGE_SHIFT;
   10000 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10001 
   10002 	/* Read reg */
   10003 	offset = reg & GS40G_OFFSET_MASK;
   10004 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   10005 
   10006 	sc->phy.release(sc);
   10007 	return rv;
   10008 }
   10009 
   10010 /*
   10011  * wm_gmii_gs40g_writereg:	[mii interface function]
   10012  *
   10013  *	Write a PHY register on the I210 and I211.
   10014  * This could be handled by the PHY layer if we didn't have to lock the
   10015  * ressource ...
   10016  */
   10017 static void
   10018 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   10019 {
   10020 	struct wm_softc *sc = device_private(self);
   10021 	int page, offset;
   10022 
   10023 	/* Acquire semaphore */
   10024 	if (sc->phy.acquire(sc)) {
   10025 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10026 		    __func__);
   10027 		return;
   10028 	}
   10029 
   10030 	/* Page select */
   10031 	page = reg >> GS40G_PAGE_SHIFT;
   10032 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10033 
   10034 	/* Write reg */
   10035 	offset = reg & GS40G_OFFSET_MASK;
   10036 	wm_gmii_mdic_writereg(self, phy, offset, val);
   10037 
   10038 	/* Release semaphore */
   10039 	sc->phy.release(sc);
   10040 }
   10041 
   10042 /*
   10043  * wm_gmii_statchg:	[mii interface function]
   10044  *
   10045  *	Callback from MII layer when media changes.
   10046  */
   10047 static void
   10048 wm_gmii_statchg(struct ifnet *ifp)
   10049 {
   10050 	struct wm_softc *sc = ifp->if_softc;
   10051 	struct mii_data *mii = &sc->sc_mii;
   10052 
   10053 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10054 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10055 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10056 
   10057 	/*
   10058 	 * Get flow control negotiation result.
   10059 	 */
   10060 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10061 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10062 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10063 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10064 	}
   10065 
   10066 	if (sc->sc_flowflags & IFM_FLOW) {
   10067 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10068 			sc->sc_ctrl |= CTRL_TFCE;
   10069 			sc->sc_fcrtl |= FCRTL_XONE;
   10070 		}
   10071 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10072 			sc->sc_ctrl |= CTRL_RFCE;
   10073 	}
   10074 
   10075 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10076 		DPRINTF(WM_DEBUG_LINK,
   10077 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10078 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10079 	} else {
   10080 		DPRINTF(WM_DEBUG_LINK,
   10081 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10082 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10083 	}
   10084 
   10085 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10086 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10087 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10088 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10089 	if (sc->sc_type == WM_T_80003) {
   10090 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10091 		case IFM_1000_T:
   10092 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10093 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10094 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10095 			break;
   10096 		default:
   10097 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10098 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10099 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10100 			break;
   10101 		}
   10102 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10103 	}
   10104 }
   10105 
   10106 /* kumeran related (80003, ICH* and PCH*) */
   10107 
   10108 /*
   10109  * wm_kmrn_readreg:
   10110  *
   10111  *	Read a kumeran register
   10112  */
   10113 static int
   10114 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   10115 {
   10116 	int rv;
   10117 
   10118 	if (sc->sc_type == WM_T_80003)
   10119 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10120 	else
   10121 		rv = sc->phy.acquire(sc);
   10122 	if (rv != 0) {
   10123 		aprint_error_dev(sc->sc_dev,
   10124 		    "%s: failed to get semaphore\n", __func__);
   10125 		return 0;
   10126 	}
   10127 
   10128 	rv = wm_kmrn_readreg_locked(sc, reg);
   10129 
   10130 	if (sc->sc_type == WM_T_80003)
   10131 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10132 	else
   10133 		sc->phy.release(sc);
   10134 
   10135 	return rv;
   10136 }
   10137 
   10138 static int
   10139 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   10140 {
   10141 	int rv;
   10142 
   10143 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10144 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10145 	    KUMCTRLSTA_REN);
   10146 	CSR_WRITE_FLUSH(sc);
   10147 	delay(2);
   10148 
   10149 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10150 
   10151 	return rv;
   10152 }
   10153 
   10154 /*
   10155  * wm_kmrn_writereg:
   10156  *
   10157  *	Write a kumeran register
   10158  */
   10159 static void
   10160 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   10161 {
   10162 	int rv;
   10163 
   10164 	if (sc->sc_type == WM_T_80003)
   10165 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10166 	else
   10167 		rv = sc->phy.acquire(sc);
   10168 	if (rv != 0) {
   10169 		aprint_error_dev(sc->sc_dev,
   10170 		    "%s: failed to get semaphore\n", __func__);
   10171 		return;
   10172 	}
   10173 
   10174 	wm_kmrn_writereg_locked(sc, reg, val);
   10175 
   10176 	if (sc->sc_type == WM_T_80003)
   10177 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10178 	else
   10179 		sc->phy.release(sc);
   10180 }
   10181 
   10182 static void
   10183 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   10184 {
   10185 
   10186 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10187 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10188 	    (val & KUMCTRLSTA_MASK));
   10189 }
   10190 
   10191 /* SGMII related */
   10192 
   10193 /*
   10194  * wm_sgmii_uses_mdio
   10195  *
   10196  * Check whether the transaction is to the internal PHY or the external
   10197  * MDIO interface. Return true if it's MDIO.
   10198  */
   10199 static bool
   10200 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10201 {
   10202 	uint32_t reg;
   10203 	bool ismdio = false;
   10204 
   10205 	switch (sc->sc_type) {
   10206 	case WM_T_82575:
   10207 	case WM_T_82576:
   10208 		reg = CSR_READ(sc, WMREG_MDIC);
   10209 		ismdio = ((reg & MDIC_DEST) != 0);
   10210 		break;
   10211 	case WM_T_82580:
   10212 	case WM_T_I350:
   10213 	case WM_T_I354:
   10214 	case WM_T_I210:
   10215 	case WM_T_I211:
   10216 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10217 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10218 		break;
   10219 	default:
   10220 		break;
   10221 	}
   10222 
   10223 	return ismdio;
   10224 }
   10225 
   10226 /*
   10227  * wm_sgmii_readreg:	[mii interface function]
   10228  *
   10229  *	Read a PHY register on the SGMII
   10230  * This could be handled by the PHY layer if we didn't have to lock the
   10231  * ressource ...
   10232  */
   10233 static int
   10234 wm_sgmii_readreg(device_t self, int phy, int reg)
   10235 {
   10236 	struct wm_softc *sc = device_private(self);
   10237 	uint32_t i2ccmd;
   10238 	int i, rv;
   10239 
   10240 	if (sc->phy.acquire(sc)) {
   10241 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10242 		    __func__);
   10243 		return 0;
   10244 	}
   10245 
   10246 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10247 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10248 	    | I2CCMD_OPCODE_READ;
   10249 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10250 
   10251 	/* Poll the ready bit */
   10252 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10253 		delay(50);
   10254 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10255 		if (i2ccmd & I2CCMD_READY)
   10256 			break;
   10257 	}
   10258 	if ((i2ccmd & I2CCMD_READY) == 0)
   10259 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   10260 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10261 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10262 
   10263 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10264 
   10265 	sc->phy.release(sc);
   10266 	return rv;
   10267 }
   10268 
   10269 /*
   10270  * wm_sgmii_writereg:	[mii interface function]
   10271  *
   10272  *	Write a PHY register on the SGMII.
   10273  * This could be handled by the PHY layer if we didn't have to lock the
   10274  * ressource ...
   10275  */
   10276 static void
   10277 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   10278 {
   10279 	struct wm_softc *sc = device_private(self);
   10280 	uint32_t i2ccmd;
   10281 	int i;
   10282 	int val_swapped;
   10283 
   10284 	if (sc->phy.acquire(sc) != 0) {
   10285 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10286 		    __func__);
   10287 		return;
   10288 	}
   10289 	/* Swap the data bytes for the I2C interface */
   10290 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10291 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10292 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10293 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10294 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10295 
   10296 	/* Poll the ready bit */
   10297 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10298 		delay(50);
   10299 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10300 		if (i2ccmd & I2CCMD_READY)
   10301 			break;
   10302 	}
   10303 	if ((i2ccmd & I2CCMD_READY) == 0)
   10304 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   10305 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10306 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10307 
   10308 	sc->phy.release(sc);
   10309 }
   10310 
   10311 /* TBI related */
   10312 
   10313 /*
   10314  * wm_tbi_mediainit:
   10315  *
   10316  *	Initialize media for use on 1000BASE-X devices.
   10317  */
   10318 static void
   10319 wm_tbi_mediainit(struct wm_softc *sc)
   10320 {
   10321 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10322 	const char *sep = "";
   10323 
   10324 	if (sc->sc_type < WM_T_82543)
   10325 		sc->sc_tipg = TIPG_WM_DFLT;
   10326 	else
   10327 		sc->sc_tipg = TIPG_LG_DFLT;
   10328 
   10329 	sc->sc_tbi_serdes_anegticks = 5;
   10330 
   10331 	/* Initialize our media structures */
   10332 	sc->sc_mii.mii_ifp = ifp;
   10333 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10334 
   10335 	if ((sc->sc_type >= WM_T_82575)
   10336 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10337 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10338 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10339 	else
   10340 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10341 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10342 
   10343 	/*
   10344 	 * SWD Pins:
   10345 	 *
   10346 	 *	0 = Link LED (output)
   10347 	 *	1 = Loss Of Signal (input)
   10348 	 */
   10349 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10350 
   10351 	/* XXX Perhaps this is only for TBI */
   10352 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10353 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10354 
   10355 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10356 		sc->sc_ctrl &= ~CTRL_LRST;
   10357 
   10358 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10359 
   10360 #define	ADD(ss, mm, dd)							\
   10361 do {									\
   10362 	aprint_normal("%s%s", sep, ss);					\
   10363 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10364 	sep = ", ";							\
   10365 } while (/*CONSTCOND*/0)
   10366 
   10367 	aprint_normal_dev(sc->sc_dev, "");
   10368 
   10369 	if (sc->sc_type == WM_T_I354) {
   10370 		uint32_t status;
   10371 
   10372 		status = CSR_READ(sc, WMREG_STATUS);
   10373 		if (((status & STATUS_2P5_SKU) != 0)
   10374 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10375 			ADD("2500baseKX-FDX", IFM_2500_SX | IFM_FDX,ANAR_X_FD);
   10376 		} else
   10377 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX,ANAR_X_FD);
   10378 	} else if (sc->sc_type == WM_T_82545) {
   10379 		/* Only 82545 is LX (XXX except SFP) */
   10380 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10381 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10382 	} else {
   10383 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10384 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10385 	}
   10386 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10387 	aprint_normal("\n");
   10388 
   10389 #undef ADD
   10390 
   10391 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10392 }
   10393 
   10394 /*
   10395  * wm_tbi_mediachange:	[ifmedia interface function]
   10396  *
   10397  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10398  */
   10399 static int
   10400 wm_tbi_mediachange(struct ifnet *ifp)
   10401 {
   10402 	struct wm_softc *sc = ifp->if_softc;
   10403 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10404 	uint32_t status;
   10405 	int i;
   10406 
   10407 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10408 		/* XXX need some work for >= 82571 and < 82575 */
   10409 		if (sc->sc_type < WM_T_82575)
   10410 			return 0;
   10411 	}
   10412 
   10413 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10414 	    || (sc->sc_type >= WM_T_82575))
   10415 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10416 
   10417 	sc->sc_ctrl &= ~CTRL_LRST;
   10418 	sc->sc_txcw = TXCW_ANE;
   10419 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10420 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10421 	else if (ife->ifm_media & IFM_FDX)
   10422 		sc->sc_txcw |= TXCW_FD;
   10423 	else
   10424 		sc->sc_txcw |= TXCW_HD;
   10425 
   10426 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10427 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10428 
   10429 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10430 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10431 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10432 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10433 	CSR_WRITE_FLUSH(sc);
   10434 	delay(1000);
   10435 
   10436 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10437 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10438 
   10439 	/*
   10440 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10441 	 * optics detect a signal, 0 if they don't.
   10442 	 */
   10443 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10444 		/* Have signal; wait for the link to come up. */
   10445 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10446 			delay(10000);
   10447 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10448 				break;
   10449 		}
   10450 
   10451 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10452 			    device_xname(sc->sc_dev),i));
   10453 
   10454 		status = CSR_READ(sc, WMREG_STATUS);
   10455 		DPRINTF(WM_DEBUG_LINK,
   10456 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10457 			device_xname(sc->sc_dev),status, STATUS_LU));
   10458 		if (status & STATUS_LU) {
   10459 			/* Link is up. */
   10460 			DPRINTF(WM_DEBUG_LINK,
   10461 			    ("%s: LINK: set media -> link up %s\n",
   10462 			    device_xname(sc->sc_dev),
   10463 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10464 
   10465 			/*
   10466 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10467 			 * so we should update sc->sc_ctrl
   10468 			 */
   10469 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10470 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10471 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10472 			if (status & STATUS_FD)
   10473 				sc->sc_tctl |=
   10474 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10475 			else
   10476 				sc->sc_tctl |=
   10477 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10478 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10479 				sc->sc_fcrtl |= FCRTL_XONE;
   10480 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10481 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10482 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10483 				      sc->sc_fcrtl);
   10484 			sc->sc_tbi_linkup = 1;
   10485 		} else {
   10486 			if (i == WM_LINKUP_TIMEOUT)
   10487 				wm_check_for_link(sc);
   10488 			/* Link is down. */
   10489 			DPRINTF(WM_DEBUG_LINK,
   10490 			    ("%s: LINK: set media -> link down\n",
   10491 			    device_xname(sc->sc_dev)));
   10492 			sc->sc_tbi_linkup = 0;
   10493 		}
   10494 	} else {
   10495 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10496 		    device_xname(sc->sc_dev)));
   10497 		sc->sc_tbi_linkup = 0;
   10498 	}
   10499 
   10500 	wm_tbi_serdes_set_linkled(sc);
   10501 
   10502 	return 0;
   10503 }
   10504 
   10505 /*
   10506  * wm_tbi_mediastatus:	[ifmedia interface function]
   10507  *
   10508  *	Get the current interface media status on a 1000BASE-X device.
   10509  */
   10510 static void
   10511 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10512 {
   10513 	struct wm_softc *sc = ifp->if_softc;
   10514 	uint32_t ctrl, status;
   10515 
   10516 	ifmr->ifm_status = IFM_AVALID;
   10517 	ifmr->ifm_active = IFM_ETHER;
   10518 
   10519 	status = CSR_READ(sc, WMREG_STATUS);
   10520 	if ((status & STATUS_LU) == 0) {
   10521 		ifmr->ifm_active |= IFM_NONE;
   10522 		return;
   10523 	}
   10524 
   10525 	ifmr->ifm_status |= IFM_ACTIVE;
   10526 	/* Only 82545 is LX */
   10527 	if (sc->sc_type == WM_T_82545)
   10528 		ifmr->ifm_active |= IFM_1000_LX;
   10529 	else
   10530 		ifmr->ifm_active |= IFM_1000_SX;
   10531 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10532 		ifmr->ifm_active |= IFM_FDX;
   10533 	else
   10534 		ifmr->ifm_active |= IFM_HDX;
   10535 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10536 	if (ctrl & CTRL_RFCE)
   10537 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10538 	if (ctrl & CTRL_TFCE)
   10539 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10540 }
   10541 
   10542 /* XXX TBI only */
   10543 static int
   10544 wm_check_for_link(struct wm_softc *sc)
   10545 {
   10546 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10547 	uint32_t rxcw;
   10548 	uint32_t ctrl;
   10549 	uint32_t status;
   10550 	uint32_t sig;
   10551 
   10552 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10553 		/* XXX need some work for >= 82571 */
   10554 		if (sc->sc_type >= WM_T_82571) {
   10555 			sc->sc_tbi_linkup = 1;
   10556 			return 0;
   10557 		}
   10558 	}
   10559 
   10560 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10561 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10562 	status = CSR_READ(sc, WMREG_STATUS);
   10563 
   10564 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10565 
   10566 	DPRINTF(WM_DEBUG_LINK,
   10567 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10568 		device_xname(sc->sc_dev), __func__,
   10569 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10570 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10571 
   10572 	/*
   10573 	 * SWDPIN   LU RXCW
   10574 	 *      0    0    0
   10575 	 *      0    0    1	(should not happen)
   10576 	 *      0    1    0	(should not happen)
   10577 	 *      0    1    1	(should not happen)
   10578 	 *      1    0    0	Disable autonego and force linkup
   10579 	 *      1    0    1	got /C/ but not linkup yet
   10580 	 *      1    1    0	(linkup)
   10581 	 *      1    1    1	If IFM_AUTO, back to autonego
   10582 	 *
   10583 	 */
   10584 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10585 	    && ((status & STATUS_LU) == 0)
   10586 	    && ((rxcw & RXCW_C) == 0)) {
   10587 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10588 			__func__));
   10589 		sc->sc_tbi_linkup = 0;
   10590 		/* Disable auto-negotiation in the TXCW register */
   10591 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10592 
   10593 		/*
   10594 		 * Force link-up and also force full-duplex.
   10595 		 *
   10596 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10597 		 * so we should update sc->sc_ctrl
   10598 		 */
   10599 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10600 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10601 	} else if (((status & STATUS_LU) != 0)
   10602 	    && ((rxcw & RXCW_C) != 0)
   10603 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10604 		sc->sc_tbi_linkup = 1;
   10605 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10606 			__func__));
   10607 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10608 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10609 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10610 	    && ((rxcw & RXCW_C) != 0)) {
   10611 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10612 	} else {
   10613 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10614 			status));
   10615 	}
   10616 
   10617 	return 0;
   10618 }
   10619 
   10620 /*
   10621  * wm_tbi_tick:
   10622  *
   10623  *	Check the link on TBI devices.
   10624  *	This function acts as mii_tick().
   10625  */
   10626 static void
   10627 wm_tbi_tick(struct wm_softc *sc)
   10628 {
   10629 	struct mii_data *mii = &sc->sc_mii;
   10630 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10631 	uint32_t status;
   10632 
   10633 	KASSERT(WM_CORE_LOCKED(sc));
   10634 
   10635 	status = CSR_READ(sc, WMREG_STATUS);
   10636 
   10637 	/* XXX is this needed? */
   10638 	(void)CSR_READ(sc, WMREG_RXCW);
   10639 	(void)CSR_READ(sc, WMREG_CTRL);
   10640 
   10641 	/* set link status */
   10642 	if ((status & STATUS_LU) == 0) {
   10643 		DPRINTF(WM_DEBUG_LINK,
   10644 		    ("%s: LINK: checklink -> down\n",
   10645 			device_xname(sc->sc_dev)));
   10646 		sc->sc_tbi_linkup = 0;
   10647 	} else if (sc->sc_tbi_linkup == 0) {
   10648 		DPRINTF(WM_DEBUG_LINK,
   10649 		    ("%s: LINK: checklink -> up %s\n",
   10650 			device_xname(sc->sc_dev),
   10651 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10652 		sc->sc_tbi_linkup = 1;
   10653 		sc->sc_tbi_serdes_ticks = 0;
   10654 	}
   10655 
   10656 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   10657 		goto setled;
   10658 
   10659 	if ((status & STATUS_LU) == 0) {
   10660 		sc->sc_tbi_linkup = 0;
   10661 		/* If the timer expired, retry autonegotiation */
   10662 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10663 		    && (++sc->sc_tbi_serdes_ticks
   10664 			>= sc->sc_tbi_serdes_anegticks)) {
   10665 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10666 			sc->sc_tbi_serdes_ticks = 0;
   10667 			/*
   10668 			 * Reset the link, and let autonegotiation do
   10669 			 * its thing
   10670 			 */
   10671 			sc->sc_ctrl |= CTRL_LRST;
   10672 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10673 			CSR_WRITE_FLUSH(sc);
   10674 			delay(1000);
   10675 			sc->sc_ctrl &= ~CTRL_LRST;
   10676 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10677 			CSR_WRITE_FLUSH(sc);
   10678 			delay(1000);
   10679 			CSR_WRITE(sc, WMREG_TXCW,
   10680 			    sc->sc_txcw & ~TXCW_ANE);
   10681 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10682 		}
   10683 	}
   10684 
   10685 setled:
   10686 	wm_tbi_serdes_set_linkled(sc);
   10687 }
   10688 
   10689 /* SERDES related */
   10690 static void
   10691 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10692 {
   10693 	uint32_t reg;
   10694 
   10695 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10696 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   10697 		return;
   10698 
   10699 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   10700 	reg |= PCS_CFG_PCS_EN;
   10701 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   10702 
   10703 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10704 	reg &= ~CTRL_EXT_SWDPIN(3);
   10705 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10706 	CSR_WRITE_FLUSH(sc);
   10707 }
   10708 
   10709 static int
   10710 wm_serdes_mediachange(struct ifnet *ifp)
   10711 {
   10712 	struct wm_softc *sc = ifp->if_softc;
   10713 	bool pcs_autoneg = true; /* XXX */
   10714 	uint32_t ctrl_ext, pcs_lctl, reg;
   10715 
   10716 	/* XXX Currently, this function is not called on 8257[12] */
   10717 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10718 	    || (sc->sc_type >= WM_T_82575))
   10719 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10720 
   10721 	wm_serdes_power_up_link_82575(sc);
   10722 
   10723 	sc->sc_ctrl |= CTRL_SLU;
   10724 
   10725 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10726 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10727 
   10728 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10729 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10730 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10731 	case CTRL_EXT_LINK_MODE_SGMII:
   10732 		pcs_autoneg = true;
   10733 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10734 		break;
   10735 	case CTRL_EXT_LINK_MODE_1000KX:
   10736 		pcs_autoneg = false;
   10737 		/* FALLTHROUGH */
   10738 	default:
   10739 		if ((sc->sc_type == WM_T_82575)
   10740 		    || (sc->sc_type == WM_T_82576)) {
   10741 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10742 				pcs_autoneg = false;
   10743 		}
   10744 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10745 		    | CTRL_FRCFDX;
   10746 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10747 	}
   10748 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10749 
   10750 	if (pcs_autoneg) {
   10751 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10752 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10753 
   10754 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10755 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10756 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10757 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10758 	} else
   10759 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10760 
   10761 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10762 
   10763 
   10764 	return 0;
   10765 }
   10766 
   10767 static void
   10768 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10769 {
   10770 	struct wm_softc *sc = ifp->if_softc;
   10771 	struct mii_data *mii = &sc->sc_mii;
   10772 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10773 	uint32_t pcs_adv, pcs_lpab, reg;
   10774 
   10775 	ifmr->ifm_status = IFM_AVALID;
   10776 	ifmr->ifm_active = IFM_ETHER;
   10777 
   10778 	/* Check PCS */
   10779 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10780 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10781 		ifmr->ifm_active |= IFM_NONE;
   10782 		sc->sc_tbi_linkup = 0;
   10783 		goto setled;
   10784 	}
   10785 
   10786 	sc->sc_tbi_linkup = 1;
   10787 	ifmr->ifm_status |= IFM_ACTIVE;
   10788 	if (sc->sc_type == WM_T_I354) {
   10789 		uint32_t status;
   10790 
   10791 		status = CSR_READ(sc, WMREG_STATUS);
   10792 		if (((status & STATUS_2P5_SKU) != 0)
   10793 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10794 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   10795 		} else
   10796 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   10797 	} else {
   10798 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   10799 		case PCS_LSTS_SPEED_10:
   10800 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   10801 			break;
   10802 		case PCS_LSTS_SPEED_100:
   10803 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   10804 			break;
   10805 		case PCS_LSTS_SPEED_1000:
   10806 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10807 			break;
   10808 		default:
   10809 			device_printf(sc->sc_dev, "Unknown speed\n");
   10810 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10811 			break;
   10812 		}
   10813 	}
   10814 	if ((reg & PCS_LSTS_FDX) != 0)
   10815 		ifmr->ifm_active |= IFM_FDX;
   10816 	else
   10817 		ifmr->ifm_active |= IFM_HDX;
   10818 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10819 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10820 		/* Check flow */
   10821 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10822 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10823 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10824 			goto setled;
   10825 		}
   10826 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10827 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10828 		DPRINTF(WM_DEBUG_LINK,
   10829 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10830 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10831 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10832 			mii->mii_media_active |= IFM_FLOW
   10833 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10834 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10835 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10836 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10837 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10838 			mii->mii_media_active |= IFM_FLOW
   10839 			    | IFM_ETH_TXPAUSE;
   10840 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10841 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10842 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10843 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10844 			mii->mii_media_active |= IFM_FLOW
   10845 			    | IFM_ETH_RXPAUSE;
   10846 		}
   10847 	}
   10848 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10849 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10850 setled:
   10851 	wm_tbi_serdes_set_linkled(sc);
   10852 }
   10853 
   10854 /*
   10855  * wm_serdes_tick:
   10856  *
   10857  *	Check the link on serdes devices.
   10858  */
   10859 static void
   10860 wm_serdes_tick(struct wm_softc *sc)
   10861 {
   10862 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10863 	struct mii_data *mii = &sc->sc_mii;
   10864 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10865 	uint32_t reg;
   10866 
   10867 	KASSERT(WM_CORE_LOCKED(sc));
   10868 
   10869 	mii->mii_media_status = IFM_AVALID;
   10870 	mii->mii_media_active = IFM_ETHER;
   10871 
   10872 	/* Check PCS */
   10873 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10874 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   10875 		mii->mii_media_status |= IFM_ACTIVE;
   10876 		sc->sc_tbi_linkup = 1;
   10877 		sc->sc_tbi_serdes_ticks = 0;
   10878 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   10879 		if ((reg & PCS_LSTS_FDX) != 0)
   10880 			mii->mii_media_active |= IFM_FDX;
   10881 		else
   10882 			mii->mii_media_active |= IFM_HDX;
   10883 	} else {
   10884 		mii->mii_media_status |= IFM_NONE;
   10885 		sc->sc_tbi_linkup = 0;
   10886 		/* If the timer expired, retry autonegotiation */
   10887 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10888 		    && (++sc->sc_tbi_serdes_ticks
   10889 			>= sc->sc_tbi_serdes_anegticks)) {
   10890 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10891 			sc->sc_tbi_serdes_ticks = 0;
   10892 			/* XXX */
   10893 			wm_serdes_mediachange(ifp);
   10894 		}
   10895 	}
   10896 
   10897 	wm_tbi_serdes_set_linkled(sc);
   10898 }
   10899 
   10900 /* SFP related */
   10901 
   10902 static int
   10903 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   10904 {
   10905 	uint32_t i2ccmd;
   10906 	int i;
   10907 
   10908 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10909 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10910 
   10911 	/* Poll the ready bit */
   10912 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10913 		delay(50);
   10914 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10915 		if (i2ccmd & I2CCMD_READY)
   10916 			break;
   10917 	}
   10918 	if ((i2ccmd & I2CCMD_READY) == 0)
   10919 		return -1;
   10920 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10921 		return -1;
   10922 
   10923 	*data = i2ccmd & 0x00ff;
   10924 
   10925 	return 0;
   10926 }
   10927 
   10928 static uint32_t
   10929 wm_sfp_get_media_type(struct wm_softc *sc)
   10930 {
   10931 	uint32_t ctrl_ext;
   10932 	uint8_t val = 0;
   10933 	int timeout = 3;
   10934 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   10935 	int rv = -1;
   10936 
   10937 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10938 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   10939 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   10940 	CSR_WRITE_FLUSH(sc);
   10941 
   10942 	/* Read SFP module data */
   10943 	while (timeout) {
   10944 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   10945 		if (rv == 0)
   10946 			break;
   10947 		delay(100*1000); /* XXX too big */
   10948 		timeout--;
   10949 	}
   10950 	if (rv != 0)
   10951 		goto out;
   10952 	switch (val) {
   10953 	case SFF_SFP_ID_SFF:
   10954 		aprint_normal_dev(sc->sc_dev,
   10955 		    "Module/Connector soldered to board\n");
   10956 		break;
   10957 	case SFF_SFP_ID_SFP:
   10958 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   10959 		break;
   10960 	case SFF_SFP_ID_UNKNOWN:
   10961 		goto out;
   10962 	default:
   10963 		break;
   10964 	}
   10965 
   10966 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   10967 	if (rv != 0) {
   10968 		goto out;
   10969 	}
   10970 
   10971 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   10972 		mediatype = WM_MEDIATYPE_SERDES;
   10973 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   10974 		sc->sc_flags |= WM_F_SGMII;
   10975 		mediatype = WM_MEDIATYPE_COPPER;
   10976 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   10977 		sc->sc_flags |= WM_F_SGMII;
   10978 		mediatype = WM_MEDIATYPE_SERDES;
   10979 	}
   10980 
   10981 out:
   10982 	/* Restore I2C interface setting */
   10983 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10984 
   10985 	return mediatype;
   10986 }
   10987 
   10988 /*
   10989  * NVM related.
   10990  * Microwire, SPI (w/wo EERD) and Flash.
   10991  */
   10992 
   10993 /* Both spi and uwire */
   10994 
   10995 /*
   10996  * wm_eeprom_sendbits:
   10997  *
   10998  *	Send a series of bits to the EEPROM.
   10999  */
   11000 static void
   11001 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11002 {
   11003 	uint32_t reg;
   11004 	int x;
   11005 
   11006 	reg = CSR_READ(sc, WMREG_EECD);
   11007 
   11008 	for (x = nbits; x > 0; x--) {
   11009 		if (bits & (1U << (x - 1)))
   11010 			reg |= EECD_DI;
   11011 		else
   11012 			reg &= ~EECD_DI;
   11013 		CSR_WRITE(sc, WMREG_EECD, reg);
   11014 		CSR_WRITE_FLUSH(sc);
   11015 		delay(2);
   11016 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11017 		CSR_WRITE_FLUSH(sc);
   11018 		delay(2);
   11019 		CSR_WRITE(sc, WMREG_EECD, reg);
   11020 		CSR_WRITE_FLUSH(sc);
   11021 		delay(2);
   11022 	}
   11023 }
   11024 
   11025 /*
   11026  * wm_eeprom_recvbits:
   11027  *
   11028  *	Receive a series of bits from the EEPROM.
   11029  */
   11030 static void
   11031 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11032 {
   11033 	uint32_t reg, val;
   11034 	int x;
   11035 
   11036 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11037 
   11038 	val = 0;
   11039 	for (x = nbits; x > 0; x--) {
   11040 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11041 		CSR_WRITE_FLUSH(sc);
   11042 		delay(2);
   11043 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11044 			val |= (1U << (x - 1));
   11045 		CSR_WRITE(sc, WMREG_EECD, reg);
   11046 		CSR_WRITE_FLUSH(sc);
   11047 		delay(2);
   11048 	}
   11049 	*valp = val;
   11050 }
   11051 
   11052 /* Microwire */
   11053 
   11054 /*
   11055  * wm_nvm_read_uwire:
   11056  *
   11057  *	Read a word from the EEPROM using the MicroWire protocol.
   11058  */
   11059 static int
   11060 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11061 {
   11062 	uint32_t reg, val;
   11063 	int i;
   11064 
   11065 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11066 		device_xname(sc->sc_dev), __func__));
   11067 
   11068 	for (i = 0; i < wordcnt; i++) {
   11069 		/* Clear SK and DI. */
   11070 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11071 		CSR_WRITE(sc, WMREG_EECD, reg);
   11072 
   11073 		/*
   11074 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11075 		 * and Xen.
   11076 		 *
   11077 		 * We use this workaround only for 82540 because qemu's
   11078 		 * e1000 act as 82540.
   11079 		 */
   11080 		if (sc->sc_type == WM_T_82540) {
   11081 			reg |= EECD_SK;
   11082 			CSR_WRITE(sc, WMREG_EECD, reg);
   11083 			reg &= ~EECD_SK;
   11084 			CSR_WRITE(sc, WMREG_EECD, reg);
   11085 			CSR_WRITE_FLUSH(sc);
   11086 			delay(2);
   11087 		}
   11088 		/* XXX: end of workaround */
   11089 
   11090 		/* Set CHIP SELECT. */
   11091 		reg |= EECD_CS;
   11092 		CSR_WRITE(sc, WMREG_EECD, reg);
   11093 		CSR_WRITE_FLUSH(sc);
   11094 		delay(2);
   11095 
   11096 		/* Shift in the READ command. */
   11097 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11098 
   11099 		/* Shift in address. */
   11100 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11101 
   11102 		/* Shift out the data. */
   11103 		wm_eeprom_recvbits(sc, &val, 16);
   11104 		data[i] = val & 0xffff;
   11105 
   11106 		/* Clear CHIP SELECT. */
   11107 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11108 		CSR_WRITE(sc, WMREG_EECD, reg);
   11109 		CSR_WRITE_FLUSH(sc);
   11110 		delay(2);
   11111 	}
   11112 
   11113 	return 0;
   11114 }
   11115 
   11116 /* SPI */
   11117 
   11118 /*
   11119  * Set SPI and FLASH related information from the EECD register.
   11120  * For 82541 and 82547, the word size is taken from EEPROM.
   11121  */
   11122 static int
   11123 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11124 {
   11125 	int size;
   11126 	uint32_t reg;
   11127 	uint16_t data;
   11128 
   11129 	reg = CSR_READ(sc, WMREG_EECD);
   11130 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11131 
   11132 	/* Read the size of NVM from EECD by default */
   11133 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11134 	switch (sc->sc_type) {
   11135 	case WM_T_82541:
   11136 	case WM_T_82541_2:
   11137 	case WM_T_82547:
   11138 	case WM_T_82547_2:
   11139 		/* Set dummy value to access EEPROM */
   11140 		sc->sc_nvm_wordsize = 64;
   11141 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11142 		reg = data;
   11143 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11144 		if (size == 0)
   11145 			size = 6; /* 64 word size */
   11146 		else
   11147 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11148 		break;
   11149 	case WM_T_80003:
   11150 	case WM_T_82571:
   11151 	case WM_T_82572:
   11152 	case WM_T_82573: /* SPI case */
   11153 	case WM_T_82574: /* SPI case */
   11154 	case WM_T_82583: /* SPI case */
   11155 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11156 		if (size > 14)
   11157 			size = 14;
   11158 		break;
   11159 	case WM_T_82575:
   11160 	case WM_T_82576:
   11161 	case WM_T_82580:
   11162 	case WM_T_I350:
   11163 	case WM_T_I354:
   11164 	case WM_T_I210:
   11165 	case WM_T_I211:
   11166 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11167 		if (size > 15)
   11168 			size = 15;
   11169 		break;
   11170 	default:
   11171 		aprint_error_dev(sc->sc_dev,
   11172 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11173 		return -1;
   11174 		break;
   11175 	}
   11176 
   11177 	sc->sc_nvm_wordsize = 1 << size;
   11178 
   11179 	return 0;
   11180 }
   11181 
   11182 /*
   11183  * wm_nvm_ready_spi:
   11184  *
   11185  *	Wait for a SPI EEPROM to be ready for commands.
   11186  */
   11187 static int
   11188 wm_nvm_ready_spi(struct wm_softc *sc)
   11189 {
   11190 	uint32_t val;
   11191 	int usec;
   11192 
   11193 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11194 		device_xname(sc->sc_dev), __func__));
   11195 
   11196 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11197 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11198 		wm_eeprom_recvbits(sc, &val, 8);
   11199 		if ((val & SPI_SR_RDY) == 0)
   11200 			break;
   11201 	}
   11202 	if (usec >= SPI_MAX_RETRIES) {
   11203 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11204 		return 1;
   11205 	}
   11206 	return 0;
   11207 }
   11208 
   11209 /*
   11210  * wm_nvm_read_spi:
   11211  *
   11212  *	Read a work from the EEPROM using the SPI protocol.
   11213  */
   11214 static int
   11215 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11216 {
   11217 	uint32_t reg, val;
   11218 	int i;
   11219 	uint8_t opc;
   11220 
   11221 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11222 		device_xname(sc->sc_dev), __func__));
   11223 
   11224 	/* Clear SK and CS. */
   11225 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11226 	CSR_WRITE(sc, WMREG_EECD, reg);
   11227 	CSR_WRITE_FLUSH(sc);
   11228 	delay(2);
   11229 
   11230 	if (wm_nvm_ready_spi(sc))
   11231 		return 1;
   11232 
   11233 	/* Toggle CS to flush commands. */
   11234 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11235 	CSR_WRITE_FLUSH(sc);
   11236 	delay(2);
   11237 	CSR_WRITE(sc, WMREG_EECD, reg);
   11238 	CSR_WRITE_FLUSH(sc);
   11239 	delay(2);
   11240 
   11241 	opc = SPI_OPC_READ;
   11242 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11243 		opc |= SPI_OPC_A8;
   11244 
   11245 	wm_eeprom_sendbits(sc, opc, 8);
   11246 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11247 
   11248 	for (i = 0; i < wordcnt; i++) {
   11249 		wm_eeprom_recvbits(sc, &val, 16);
   11250 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11251 	}
   11252 
   11253 	/* Raise CS and clear SK. */
   11254 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11255 	CSR_WRITE(sc, WMREG_EECD, reg);
   11256 	CSR_WRITE_FLUSH(sc);
   11257 	delay(2);
   11258 
   11259 	return 0;
   11260 }
   11261 
   11262 /* Using with EERD */
   11263 
   11264 static int
   11265 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11266 {
   11267 	uint32_t attempts = 100000;
   11268 	uint32_t i, reg = 0;
   11269 	int32_t done = -1;
   11270 
   11271 	for (i = 0; i < attempts; i++) {
   11272 		reg = CSR_READ(sc, rw);
   11273 
   11274 		if (reg & EERD_DONE) {
   11275 			done = 0;
   11276 			break;
   11277 		}
   11278 		delay(5);
   11279 	}
   11280 
   11281 	return done;
   11282 }
   11283 
   11284 static int
   11285 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11286     uint16_t *data)
   11287 {
   11288 	int i, eerd = 0;
   11289 	int error = 0;
   11290 
   11291 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11292 		device_xname(sc->sc_dev), __func__));
   11293 
   11294 	for (i = 0; i < wordcnt; i++) {
   11295 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11296 
   11297 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11298 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11299 		if (error != 0)
   11300 			break;
   11301 
   11302 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11303 	}
   11304 
   11305 	return error;
   11306 }
   11307 
   11308 /* Flash */
   11309 
   11310 static int
   11311 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11312 {
   11313 	uint32_t eecd;
   11314 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11315 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11316 	uint8_t sig_byte = 0;
   11317 
   11318 	switch (sc->sc_type) {
   11319 	case WM_T_PCH_SPT:
   11320 		/*
   11321 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11322 		 * sector valid bits from the NVM.
   11323 		 */
   11324 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11325 		if ((*bank == 0) || (*bank == 1)) {
   11326 			aprint_error_dev(sc->sc_dev,
   11327 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11328 				*bank);
   11329 			return -1;
   11330 		} else {
   11331 			*bank = *bank - 2;
   11332 			return 0;
   11333 		}
   11334 	case WM_T_ICH8:
   11335 	case WM_T_ICH9:
   11336 		eecd = CSR_READ(sc, WMREG_EECD);
   11337 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11338 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11339 			return 0;
   11340 		}
   11341 		/* FALLTHROUGH */
   11342 	default:
   11343 		/* Default to 0 */
   11344 		*bank = 0;
   11345 
   11346 		/* Check bank 0 */
   11347 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11348 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11349 			*bank = 0;
   11350 			return 0;
   11351 		}
   11352 
   11353 		/* Check bank 1 */
   11354 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11355 		    &sig_byte);
   11356 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11357 			*bank = 1;
   11358 			return 0;
   11359 		}
   11360 	}
   11361 
   11362 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11363 		device_xname(sc->sc_dev)));
   11364 	return -1;
   11365 }
   11366 
   11367 /******************************************************************************
   11368  * This function does initial flash setup so that a new read/write/erase cycle
   11369  * can be started.
   11370  *
   11371  * sc - The pointer to the hw structure
   11372  ****************************************************************************/
   11373 static int32_t
   11374 wm_ich8_cycle_init(struct wm_softc *sc)
   11375 {
   11376 	uint16_t hsfsts;
   11377 	int32_t error = 1;
   11378 	int32_t i     = 0;
   11379 
   11380 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11381 
   11382 	/* May be check the Flash Des Valid bit in Hw status */
   11383 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11384 		return error;
   11385 	}
   11386 
   11387 	/* Clear FCERR in Hw status by writing 1 */
   11388 	/* Clear DAEL in Hw status by writing a 1 */
   11389 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11390 
   11391 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11392 
   11393 	/*
   11394 	 * Either we should have a hardware SPI cycle in progress bit to check
   11395 	 * against, in order to start a new cycle or FDONE bit should be
   11396 	 * changed in the hardware so that it is 1 after harware reset, which
   11397 	 * can then be used as an indication whether a cycle is in progress or
   11398 	 * has been completed .. we should also have some software semaphore
   11399 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11400 	 * threads access to those bits can be sequentiallized or a way so that
   11401 	 * 2 threads dont start the cycle at the same time
   11402 	 */
   11403 
   11404 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11405 		/*
   11406 		 * There is no cycle running at present, so we can start a
   11407 		 * cycle
   11408 		 */
   11409 
   11410 		/* Begin by setting Flash Cycle Done. */
   11411 		hsfsts |= HSFSTS_DONE;
   11412 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11413 		error = 0;
   11414 	} else {
   11415 		/*
   11416 		 * otherwise poll for sometime so the current cycle has a
   11417 		 * chance to end before giving up.
   11418 		 */
   11419 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11420 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11421 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11422 				error = 0;
   11423 				break;
   11424 			}
   11425 			delay(1);
   11426 		}
   11427 		if (error == 0) {
   11428 			/*
   11429 			 * Successful in waiting for previous cycle to timeout,
   11430 			 * now set the Flash Cycle Done.
   11431 			 */
   11432 			hsfsts |= HSFSTS_DONE;
   11433 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11434 		}
   11435 	}
   11436 	return error;
   11437 }
   11438 
   11439 /******************************************************************************
   11440  * This function starts a flash cycle and waits for its completion
   11441  *
   11442  * sc - The pointer to the hw structure
   11443  ****************************************************************************/
   11444 static int32_t
   11445 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11446 {
   11447 	uint16_t hsflctl;
   11448 	uint16_t hsfsts;
   11449 	int32_t error = 1;
   11450 	uint32_t i = 0;
   11451 
   11452 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11453 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11454 	hsflctl |= HSFCTL_GO;
   11455 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11456 
   11457 	/* Wait till FDONE bit is set to 1 */
   11458 	do {
   11459 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11460 		if (hsfsts & HSFSTS_DONE)
   11461 			break;
   11462 		delay(1);
   11463 		i++;
   11464 	} while (i < timeout);
   11465 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11466 		error = 0;
   11467 
   11468 	return error;
   11469 }
   11470 
   11471 /******************************************************************************
   11472  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11473  *
   11474  * sc - The pointer to the hw structure
   11475  * index - The index of the byte or word to read.
   11476  * size - Size of data to read, 1=byte 2=word, 4=dword
   11477  * data - Pointer to the word to store the value read.
   11478  *****************************************************************************/
   11479 static int32_t
   11480 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11481     uint32_t size, uint32_t *data)
   11482 {
   11483 	uint16_t hsfsts;
   11484 	uint16_t hsflctl;
   11485 	uint32_t flash_linear_address;
   11486 	uint32_t flash_data = 0;
   11487 	int32_t error = 1;
   11488 	int32_t count = 0;
   11489 
   11490 	if (size < 1  || size > 4 || data == 0x0 ||
   11491 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11492 		return error;
   11493 
   11494 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11495 	    sc->sc_ich8_flash_base;
   11496 
   11497 	do {
   11498 		delay(1);
   11499 		/* Steps */
   11500 		error = wm_ich8_cycle_init(sc);
   11501 		if (error)
   11502 			break;
   11503 
   11504 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11505 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11506 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11507 		    & HSFCTL_BCOUNT_MASK;
   11508 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11509 		if (sc->sc_type == WM_T_PCH_SPT) {
   11510 			/*
   11511 			 * In SPT, This register is in Lan memory space, not
   11512 			 * flash. Therefore, only 32 bit access is supported.
   11513 			 */
   11514 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11515 			    (uint32_t)hsflctl);
   11516 		} else
   11517 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11518 
   11519 		/*
   11520 		 * Write the last 24 bits of index into Flash Linear address
   11521 		 * field in Flash Address
   11522 		 */
   11523 		/* TODO: TBD maybe check the index against the size of flash */
   11524 
   11525 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11526 
   11527 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11528 
   11529 		/*
   11530 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11531 		 * the whole sequence a few more times, else read in (shift in)
   11532 		 * the Flash Data0, the order is least significant byte first
   11533 		 * msb to lsb
   11534 		 */
   11535 		if (error == 0) {
   11536 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11537 			if (size == 1)
   11538 				*data = (uint8_t)(flash_data & 0x000000FF);
   11539 			else if (size == 2)
   11540 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11541 			else if (size == 4)
   11542 				*data = (uint32_t)flash_data;
   11543 			break;
   11544 		} else {
   11545 			/*
   11546 			 * If we've gotten here, then things are probably
   11547 			 * completely hosed, but if the error condition is
   11548 			 * detected, it won't hurt to give it another try...
   11549 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11550 			 */
   11551 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11552 			if (hsfsts & HSFSTS_ERR) {
   11553 				/* Repeat for some time before giving up. */
   11554 				continue;
   11555 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11556 				break;
   11557 		}
   11558 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11559 
   11560 	return error;
   11561 }
   11562 
   11563 /******************************************************************************
   11564  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11565  *
   11566  * sc - pointer to wm_hw structure
   11567  * index - The index of the byte to read.
   11568  * data - Pointer to a byte to store the value read.
   11569  *****************************************************************************/
   11570 static int32_t
   11571 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11572 {
   11573 	int32_t status;
   11574 	uint32_t word = 0;
   11575 
   11576 	status = wm_read_ich8_data(sc, index, 1, &word);
   11577 	if (status == 0)
   11578 		*data = (uint8_t)word;
   11579 	else
   11580 		*data = 0;
   11581 
   11582 	return status;
   11583 }
   11584 
   11585 /******************************************************************************
   11586  * Reads a word from the NVM using the ICH8 flash access registers.
   11587  *
   11588  * sc - pointer to wm_hw structure
   11589  * index - The starting byte index of the word to read.
   11590  * data - Pointer to a word to store the value read.
   11591  *****************************************************************************/
   11592 static int32_t
   11593 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11594 {
   11595 	int32_t status;
   11596 	uint32_t word = 0;
   11597 
   11598 	status = wm_read_ich8_data(sc, index, 2, &word);
   11599 	if (status == 0)
   11600 		*data = (uint16_t)word;
   11601 	else
   11602 		*data = 0;
   11603 
   11604 	return status;
   11605 }
   11606 
   11607 /******************************************************************************
   11608  * Reads a dword from the NVM using the ICH8 flash access registers.
   11609  *
   11610  * sc - pointer to wm_hw structure
   11611  * index - The starting byte index of the word to read.
   11612  * data - Pointer to a word to store the value read.
   11613  *****************************************************************************/
   11614 static int32_t
   11615 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11616 {
   11617 	int32_t status;
   11618 
   11619 	status = wm_read_ich8_data(sc, index, 4, data);
   11620 	return status;
   11621 }
   11622 
   11623 /******************************************************************************
   11624  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11625  * register.
   11626  *
   11627  * sc - Struct containing variables accessed by shared code
   11628  * offset - offset of word in the EEPROM to read
   11629  * data - word read from the EEPROM
   11630  * words - number of words to read
   11631  *****************************************************************************/
   11632 static int
   11633 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11634 {
   11635 	int32_t  error = 0;
   11636 	uint32_t flash_bank = 0;
   11637 	uint32_t act_offset = 0;
   11638 	uint32_t bank_offset = 0;
   11639 	uint16_t word = 0;
   11640 	uint16_t i = 0;
   11641 
   11642 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11643 		device_xname(sc->sc_dev), __func__));
   11644 
   11645 	/*
   11646 	 * We need to know which is the valid flash bank.  In the event
   11647 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11648 	 * managing flash_bank.  So it cannot be trusted and needs
   11649 	 * to be updated with each read.
   11650 	 */
   11651 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11652 	if (error) {
   11653 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11654 			device_xname(sc->sc_dev)));
   11655 		flash_bank = 0;
   11656 	}
   11657 
   11658 	/*
   11659 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11660 	 * size
   11661 	 */
   11662 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11663 
   11664 	error = wm_get_swfwhw_semaphore(sc);
   11665 	if (error) {
   11666 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11667 		    __func__);
   11668 		return error;
   11669 	}
   11670 
   11671 	for (i = 0; i < words; i++) {
   11672 		/* The NVM part needs a byte offset, hence * 2 */
   11673 		act_offset = bank_offset + ((offset + i) * 2);
   11674 		error = wm_read_ich8_word(sc, act_offset, &word);
   11675 		if (error) {
   11676 			aprint_error_dev(sc->sc_dev,
   11677 			    "%s: failed to read NVM\n", __func__);
   11678 			break;
   11679 		}
   11680 		data[i] = word;
   11681 	}
   11682 
   11683 	wm_put_swfwhw_semaphore(sc);
   11684 	return error;
   11685 }
   11686 
   11687 /******************************************************************************
   11688  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11689  * register.
   11690  *
   11691  * sc - Struct containing variables accessed by shared code
   11692  * offset - offset of word in the EEPROM to read
   11693  * data - word read from the EEPROM
   11694  * words - number of words to read
   11695  *****************************************************************************/
   11696 static int
   11697 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11698 {
   11699 	int32_t  error = 0;
   11700 	uint32_t flash_bank = 0;
   11701 	uint32_t act_offset = 0;
   11702 	uint32_t bank_offset = 0;
   11703 	uint32_t dword = 0;
   11704 	uint16_t i = 0;
   11705 
   11706 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11707 		device_xname(sc->sc_dev), __func__));
   11708 
   11709 	/*
   11710 	 * We need to know which is the valid flash bank.  In the event
   11711 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11712 	 * managing flash_bank.  So it cannot be trusted and needs
   11713 	 * to be updated with each read.
   11714 	 */
   11715 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11716 	if (error) {
   11717 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11718 			device_xname(sc->sc_dev)));
   11719 		flash_bank = 0;
   11720 	}
   11721 
   11722 	/*
   11723 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11724 	 * size
   11725 	 */
   11726 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11727 
   11728 	error = wm_get_swfwhw_semaphore(sc);
   11729 	if (error) {
   11730 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11731 		    __func__);
   11732 		return error;
   11733 	}
   11734 
   11735 	for (i = 0; i < words; i++) {
   11736 		/* The NVM part needs a byte offset, hence * 2 */
   11737 		act_offset = bank_offset + ((offset + i) * 2);
   11738 		/* but we must read dword aligned, so mask ... */
   11739 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11740 		if (error) {
   11741 			aprint_error_dev(sc->sc_dev,
   11742 			    "%s: failed to read NVM\n", __func__);
   11743 			break;
   11744 		}
   11745 		/* ... and pick out low or high word */
   11746 		if ((act_offset & 0x2) == 0)
   11747 			data[i] = (uint16_t)(dword & 0xFFFF);
   11748 		else
   11749 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11750 	}
   11751 
   11752 	wm_put_swfwhw_semaphore(sc);
   11753 	return error;
   11754 }
   11755 
   11756 /* iNVM */
   11757 
   11758 static int
   11759 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11760 {
   11761 	int32_t  rv = 0;
   11762 	uint32_t invm_dword;
   11763 	uint16_t i;
   11764 	uint8_t record_type, word_address;
   11765 
   11766 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11767 		device_xname(sc->sc_dev), __func__));
   11768 
   11769 	for (i = 0; i < INVM_SIZE; i++) {
   11770 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11771 		/* Get record type */
   11772 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11773 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11774 			break;
   11775 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11776 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11777 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11778 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11779 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11780 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11781 			if (word_address == address) {
   11782 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11783 				rv = 0;
   11784 				break;
   11785 			}
   11786 		}
   11787 	}
   11788 
   11789 	return rv;
   11790 }
   11791 
   11792 static int
   11793 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11794 {
   11795 	int rv = 0;
   11796 	int i;
   11797 
   11798 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11799 		device_xname(sc->sc_dev), __func__));
   11800 
   11801 	for (i = 0; i < words; i++) {
   11802 		switch (offset + i) {
   11803 		case NVM_OFF_MACADDR:
   11804 		case NVM_OFF_MACADDR1:
   11805 		case NVM_OFF_MACADDR2:
   11806 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11807 			if (rv != 0) {
   11808 				data[i] = 0xffff;
   11809 				rv = -1;
   11810 			}
   11811 			break;
   11812 		case NVM_OFF_CFG2:
   11813 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11814 			if (rv != 0) {
   11815 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11816 				rv = 0;
   11817 			}
   11818 			break;
   11819 		case NVM_OFF_CFG4:
   11820 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11821 			if (rv != 0) {
   11822 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11823 				rv = 0;
   11824 			}
   11825 			break;
   11826 		case NVM_OFF_LED_1_CFG:
   11827 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11828 			if (rv != 0) {
   11829 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11830 				rv = 0;
   11831 			}
   11832 			break;
   11833 		case NVM_OFF_LED_0_2_CFG:
   11834 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11835 			if (rv != 0) {
   11836 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11837 				rv = 0;
   11838 			}
   11839 			break;
   11840 		case NVM_OFF_ID_LED_SETTINGS:
   11841 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11842 			if (rv != 0) {
   11843 				*data = ID_LED_RESERVED_FFFF;
   11844 				rv = 0;
   11845 			}
   11846 			break;
   11847 		default:
   11848 			DPRINTF(WM_DEBUG_NVM,
   11849 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11850 			*data = NVM_RESERVED_WORD;
   11851 			break;
   11852 		}
   11853 	}
   11854 
   11855 	return rv;
   11856 }
   11857 
   11858 /* Lock, detecting NVM type, validate checksum, version and read */
   11859 
   11860 /*
   11861  * wm_nvm_acquire:
   11862  *
   11863  *	Perform the EEPROM handshake required on some chips.
   11864  */
   11865 static int
   11866 wm_nvm_acquire(struct wm_softc *sc)
   11867 {
   11868 	uint32_t reg;
   11869 	int x;
   11870 	int ret = 0;
   11871 
   11872 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11873 		device_xname(sc->sc_dev), __func__));
   11874 
   11875 	if (sc->sc_type >= WM_T_ICH8) {
   11876 		ret = wm_get_nvm_ich8lan(sc);
   11877 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   11878 		ret = wm_get_swfwhw_semaphore(sc);
   11879 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   11880 		/* This will also do wm_get_swsm_semaphore() if needed */
   11881 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   11882 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11883 		ret = wm_get_swsm_semaphore(sc);
   11884 	}
   11885 
   11886 	if (ret) {
   11887 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11888 			__func__);
   11889 		return 1;
   11890 	}
   11891 
   11892 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11893 		reg = CSR_READ(sc, WMREG_EECD);
   11894 
   11895 		/* Request EEPROM access. */
   11896 		reg |= EECD_EE_REQ;
   11897 		CSR_WRITE(sc, WMREG_EECD, reg);
   11898 
   11899 		/* ..and wait for it to be granted. */
   11900 		for (x = 0; x < 1000; x++) {
   11901 			reg = CSR_READ(sc, WMREG_EECD);
   11902 			if (reg & EECD_EE_GNT)
   11903 				break;
   11904 			delay(5);
   11905 		}
   11906 		if ((reg & EECD_EE_GNT) == 0) {
   11907 			aprint_error_dev(sc->sc_dev,
   11908 			    "could not acquire EEPROM GNT\n");
   11909 			reg &= ~EECD_EE_REQ;
   11910 			CSR_WRITE(sc, WMREG_EECD, reg);
   11911 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11912 				wm_put_swfwhw_semaphore(sc);
   11913 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   11914 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11915 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11916 				wm_put_swsm_semaphore(sc);
   11917 			return 1;
   11918 		}
   11919 	}
   11920 
   11921 	return 0;
   11922 }
   11923 
   11924 /*
   11925  * wm_nvm_release:
   11926  *
   11927  *	Release the EEPROM mutex.
   11928  */
   11929 static void
   11930 wm_nvm_release(struct wm_softc *sc)
   11931 {
   11932 	uint32_t reg;
   11933 
   11934 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11935 		device_xname(sc->sc_dev), __func__));
   11936 
   11937 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11938 		reg = CSR_READ(sc, WMREG_EECD);
   11939 		reg &= ~EECD_EE_REQ;
   11940 		CSR_WRITE(sc, WMREG_EECD, reg);
   11941 	}
   11942 
   11943 	if (sc->sc_type >= WM_T_ICH8) {
   11944 		wm_put_nvm_ich8lan(sc);
   11945 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11946 		wm_put_swfwhw_semaphore(sc);
   11947 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   11948 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11949 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11950 		wm_put_swsm_semaphore(sc);
   11951 }
   11952 
   11953 static int
   11954 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   11955 {
   11956 	uint32_t eecd = 0;
   11957 
   11958 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   11959 	    || sc->sc_type == WM_T_82583) {
   11960 		eecd = CSR_READ(sc, WMREG_EECD);
   11961 
   11962 		/* Isolate bits 15 & 16 */
   11963 		eecd = ((eecd >> 15) & 0x03);
   11964 
   11965 		/* If both bits are set, device is Flash type */
   11966 		if (eecd == 0x03)
   11967 			return 0;
   11968 	}
   11969 	return 1;
   11970 }
   11971 
   11972 static int
   11973 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   11974 {
   11975 	uint32_t eec;
   11976 
   11977 	eec = CSR_READ(sc, WMREG_EEC);
   11978 	if ((eec & EEC_FLASH_DETECTED) != 0)
   11979 		return 1;
   11980 
   11981 	return 0;
   11982 }
   11983 
   11984 /*
   11985  * wm_nvm_validate_checksum
   11986  *
   11987  * The checksum is defined as the sum of the first 64 (16 bit) words.
   11988  */
   11989 static int
   11990 wm_nvm_validate_checksum(struct wm_softc *sc)
   11991 {
   11992 	uint16_t checksum;
   11993 	uint16_t eeprom_data;
   11994 #ifdef WM_DEBUG
   11995 	uint16_t csum_wordaddr, valid_checksum;
   11996 #endif
   11997 	int i;
   11998 
   11999 	checksum = 0;
   12000 
   12001 	/* Don't check for I211 */
   12002 	if (sc->sc_type == WM_T_I211)
   12003 		return 0;
   12004 
   12005 #ifdef WM_DEBUG
   12006 	if (sc->sc_type == WM_T_PCH_LPT) {
   12007 		csum_wordaddr = NVM_OFF_COMPAT;
   12008 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12009 	} else {
   12010 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12011 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12012 	}
   12013 
   12014 	/* Dump EEPROM image for debug */
   12015 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12016 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12017 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12018 		/* XXX PCH_SPT? */
   12019 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12020 		if ((eeprom_data & valid_checksum) == 0) {
   12021 			DPRINTF(WM_DEBUG_NVM,
   12022 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12023 				device_xname(sc->sc_dev), eeprom_data,
   12024 				    valid_checksum));
   12025 		}
   12026 	}
   12027 
   12028 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12029 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12030 		for (i = 0; i < NVM_SIZE; i++) {
   12031 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12032 				printf("XXXX ");
   12033 			else
   12034 				printf("%04hx ", eeprom_data);
   12035 			if (i % 8 == 7)
   12036 				printf("\n");
   12037 		}
   12038 	}
   12039 
   12040 #endif /* WM_DEBUG */
   12041 
   12042 	for (i = 0; i < NVM_SIZE; i++) {
   12043 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12044 			return 1;
   12045 		checksum += eeprom_data;
   12046 	}
   12047 
   12048 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12049 #ifdef WM_DEBUG
   12050 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12051 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12052 #endif
   12053 	}
   12054 
   12055 	return 0;
   12056 }
   12057 
   12058 static void
   12059 wm_nvm_version_invm(struct wm_softc *sc)
   12060 {
   12061 	uint32_t dword;
   12062 
   12063 	/*
   12064 	 * Linux's code to decode version is very strange, so we don't
   12065 	 * obey that algorithm and just use word 61 as the document.
   12066 	 * Perhaps it's not perfect though...
   12067 	 *
   12068 	 * Example:
   12069 	 *
   12070 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12071 	 */
   12072 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12073 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12074 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12075 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12076 }
   12077 
   12078 static void
   12079 wm_nvm_version(struct wm_softc *sc)
   12080 {
   12081 	uint16_t major, minor, build, patch;
   12082 	uint16_t uid0, uid1;
   12083 	uint16_t nvm_data;
   12084 	uint16_t off;
   12085 	bool check_version = false;
   12086 	bool check_optionrom = false;
   12087 	bool have_build = false;
   12088 
   12089 	/*
   12090 	 * Version format:
   12091 	 *
   12092 	 * XYYZ
   12093 	 * X0YZ
   12094 	 * X0YY
   12095 	 *
   12096 	 * Example:
   12097 	 *
   12098 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12099 	 *	82571	0x50a6	5.10.6?
   12100 	 *	82572	0x506a	5.6.10?
   12101 	 *	82572EI	0x5069	5.6.9?
   12102 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12103 	 *		0x2013	2.1.3?
   12104 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12105 	 */
   12106 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12107 	switch (sc->sc_type) {
   12108 	case WM_T_82571:
   12109 	case WM_T_82572:
   12110 	case WM_T_82574:
   12111 	case WM_T_82583:
   12112 		check_version = true;
   12113 		check_optionrom = true;
   12114 		have_build = true;
   12115 		break;
   12116 	case WM_T_82575:
   12117 	case WM_T_82576:
   12118 	case WM_T_82580:
   12119 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12120 			check_version = true;
   12121 		break;
   12122 	case WM_T_I211:
   12123 		wm_nvm_version_invm(sc);
   12124 		goto printver;
   12125 	case WM_T_I210:
   12126 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12127 			wm_nvm_version_invm(sc);
   12128 			goto printver;
   12129 		}
   12130 		/* FALLTHROUGH */
   12131 	case WM_T_I350:
   12132 	case WM_T_I354:
   12133 		check_version = true;
   12134 		check_optionrom = true;
   12135 		break;
   12136 	default:
   12137 		return;
   12138 	}
   12139 	if (check_version) {
   12140 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12141 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12142 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12143 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12144 			build = nvm_data & NVM_BUILD_MASK;
   12145 			have_build = true;
   12146 		} else
   12147 			minor = nvm_data & 0x00ff;
   12148 
   12149 		/* Decimal */
   12150 		minor = (minor / 16) * 10 + (minor % 16);
   12151 		sc->sc_nvm_ver_major = major;
   12152 		sc->sc_nvm_ver_minor = minor;
   12153 
   12154 printver:
   12155 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12156 		    sc->sc_nvm_ver_minor);
   12157 		if (have_build) {
   12158 			sc->sc_nvm_ver_build = build;
   12159 			aprint_verbose(".%d", build);
   12160 		}
   12161 	}
   12162 	if (check_optionrom) {
   12163 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12164 		/* Option ROM Version */
   12165 		if ((off != 0x0000) && (off != 0xffff)) {
   12166 			off += NVM_COMBO_VER_OFF;
   12167 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12168 			wm_nvm_read(sc, off, 1, &uid0);
   12169 			if ((uid0 != 0) && (uid0 != 0xffff)
   12170 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12171 				/* 16bits */
   12172 				major = uid0 >> 8;
   12173 				build = (uid0 << 8) | (uid1 >> 8);
   12174 				patch = uid1 & 0x00ff;
   12175 				aprint_verbose(", option ROM Version %d.%d.%d",
   12176 				    major, build, patch);
   12177 			}
   12178 		}
   12179 	}
   12180 
   12181 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12182 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12183 }
   12184 
   12185 /*
   12186  * wm_nvm_read:
   12187  *
   12188  *	Read data from the serial EEPROM.
   12189  */
   12190 static int
   12191 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12192 {
   12193 	int rv;
   12194 
   12195 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12196 		device_xname(sc->sc_dev), __func__));
   12197 
   12198 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12199 		return 1;
   12200 
   12201 	if (wm_nvm_acquire(sc))
   12202 		return 1;
   12203 
   12204 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12205 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12206 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   12207 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   12208 	else if (sc->sc_type == WM_T_PCH_SPT)
   12209 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   12210 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   12211 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   12212 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   12213 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   12214 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   12215 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   12216 	else
   12217 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   12218 
   12219 	wm_nvm_release(sc);
   12220 	return rv;
   12221 }
   12222 
   12223 /*
   12224  * Hardware semaphores.
   12225  * Very complexed...
   12226  */
   12227 
   12228 static int
   12229 wm_get_null(struct wm_softc *sc)
   12230 {
   12231 
   12232 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12233 		device_xname(sc->sc_dev), __func__));
   12234 	return 0;
   12235 }
   12236 
   12237 static void
   12238 wm_put_null(struct wm_softc *sc)
   12239 {
   12240 
   12241 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12242 		device_xname(sc->sc_dev), __func__));
   12243 	return;
   12244 }
   12245 
   12246 /*
   12247  * Get hardware semaphore.
   12248  * Same as e1000_get_hw_semaphore_generic()
   12249  */
   12250 static int
   12251 wm_get_swsm_semaphore(struct wm_softc *sc)
   12252 {
   12253 	int32_t timeout;
   12254 	uint32_t swsm;
   12255 
   12256 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12257 		device_xname(sc->sc_dev), __func__));
   12258 	KASSERT(sc->sc_nvm_wordsize > 0);
   12259 
   12260 	/* Get the SW semaphore. */
   12261 	timeout = sc->sc_nvm_wordsize + 1;
   12262 	while (timeout) {
   12263 		swsm = CSR_READ(sc, WMREG_SWSM);
   12264 
   12265 		if ((swsm & SWSM_SMBI) == 0)
   12266 			break;
   12267 
   12268 		delay(50);
   12269 		timeout--;
   12270 	}
   12271 
   12272 	if (timeout == 0) {
   12273 		aprint_error_dev(sc->sc_dev,
   12274 		    "could not acquire SWSM SMBI\n");
   12275 		return 1;
   12276 	}
   12277 
   12278 	/* Get the FW semaphore. */
   12279 	timeout = sc->sc_nvm_wordsize + 1;
   12280 	while (timeout) {
   12281 		swsm = CSR_READ(sc, WMREG_SWSM);
   12282 		swsm |= SWSM_SWESMBI;
   12283 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12284 		/* If we managed to set the bit we got the semaphore. */
   12285 		swsm = CSR_READ(sc, WMREG_SWSM);
   12286 		if (swsm & SWSM_SWESMBI)
   12287 			break;
   12288 
   12289 		delay(50);
   12290 		timeout--;
   12291 	}
   12292 
   12293 	if (timeout == 0) {
   12294 		aprint_error_dev(sc->sc_dev,
   12295 		    "could not acquire SWSM SWESMBI\n");
   12296 		/* Release semaphores */
   12297 		wm_put_swsm_semaphore(sc);
   12298 		return 1;
   12299 	}
   12300 	return 0;
   12301 }
   12302 
   12303 /*
   12304  * Put hardware semaphore.
   12305  * Same as e1000_put_hw_semaphore_generic()
   12306  */
   12307 static void
   12308 wm_put_swsm_semaphore(struct wm_softc *sc)
   12309 {
   12310 	uint32_t swsm;
   12311 
   12312 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12313 		device_xname(sc->sc_dev), __func__));
   12314 
   12315 	swsm = CSR_READ(sc, WMREG_SWSM);
   12316 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12317 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12318 }
   12319 
   12320 /*
   12321  * Get SW/FW semaphore.
   12322  * Same as e1000_acquire_swfw_sync_82575().
   12323  */
   12324 static int
   12325 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12326 {
   12327 	uint32_t swfw_sync;
   12328 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12329 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12330 	int timeout = 200;
   12331 
   12332 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12333 		device_xname(sc->sc_dev), __func__));
   12334 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12335 
   12336 	for (timeout = 0; timeout < 200; timeout++) {
   12337 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12338 			if (wm_get_swsm_semaphore(sc)) {
   12339 				aprint_error_dev(sc->sc_dev,
   12340 				    "%s: failed to get semaphore\n",
   12341 				    __func__);
   12342 				return 1;
   12343 			}
   12344 		}
   12345 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12346 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12347 			swfw_sync |= swmask;
   12348 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12349 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   12350 				wm_put_swsm_semaphore(sc);
   12351 			return 0;
   12352 		}
   12353 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   12354 			wm_put_swsm_semaphore(sc);
   12355 		delay(5000);
   12356 	}
   12357 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12358 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12359 	return 1;
   12360 }
   12361 
   12362 static void
   12363 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12364 {
   12365 	uint32_t swfw_sync;
   12366 
   12367 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12368 		device_xname(sc->sc_dev), __func__));
   12369 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12370 
   12371 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12372 		while (wm_get_swsm_semaphore(sc) != 0)
   12373 			continue;
   12374 	}
   12375 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12376 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12377 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12378 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   12379 		wm_put_swsm_semaphore(sc);
   12380 }
   12381 
   12382 static int
   12383 wm_get_phy_82575(struct wm_softc *sc)
   12384 {
   12385 
   12386 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12387 		device_xname(sc->sc_dev), __func__));
   12388 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12389 }
   12390 
   12391 static void
   12392 wm_put_phy_82575(struct wm_softc *sc)
   12393 {
   12394 
   12395 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12396 		device_xname(sc->sc_dev), __func__));
   12397 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12398 }
   12399 
   12400 static int
   12401 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12402 {
   12403 	uint32_t ext_ctrl;
   12404 	int timeout = 200;
   12405 
   12406 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12407 		device_xname(sc->sc_dev), __func__));
   12408 
   12409 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12410 	for (timeout = 0; timeout < 200; timeout++) {
   12411 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12412 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12413 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12414 
   12415 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12416 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12417 			return 0;
   12418 		delay(5000);
   12419 	}
   12420 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12421 	    device_xname(sc->sc_dev), ext_ctrl);
   12422 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12423 	return 1;
   12424 }
   12425 
   12426 static void
   12427 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12428 {
   12429 	uint32_t ext_ctrl;
   12430 
   12431 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12432 		device_xname(sc->sc_dev), __func__));
   12433 
   12434 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12435 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12436 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12437 
   12438 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12439 }
   12440 
   12441 static int
   12442 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12443 {
   12444 	uint32_t ext_ctrl;
   12445 	int timeout;
   12446 
   12447 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12448 		device_xname(sc->sc_dev), __func__));
   12449 	mutex_enter(sc->sc_ich_phymtx);
   12450 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12451 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12452 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12453 			break;
   12454 		delay(1000);
   12455 	}
   12456 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12457 		printf("%s: SW has already locked the resource\n",
   12458 		    device_xname(sc->sc_dev));
   12459 		goto out;
   12460 	}
   12461 
   12462 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12463 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12464 	for (timeout = 0; timeout < 1000; timeout++) {
   12465 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12466 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12467 			break;
   12468 		delay(1000);
   12469 	}
   12470 	if (timeout >= 1000) {
   12471 		printf("%s: failed to acquire semaphore\n",
   12472 		    device_xname(sc->sc_dev));
   12473 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12474 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12475 		goto out;
   12476 	}
   12477 	return 0;
   12478 
   12479 out:
   12480 	mutex_exit(sc->sc_ich_phymtx);
   12481 	return 1;
   12482 }
   12483 
   12484 static void
   12485 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12486 {
   12487 	uint32_t ext_ctrl;
   12488 
   12489 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12490 		device_xname(sc->sc_dev), __func__));
   12491 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12492 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12493 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12494 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12495 	} else {
   12496 		printf("%s: Semaphore unexpectedly released\n",
   12497 		    device_xname(sc->sc_dev));
   12498 	}
   12499 
   12500 	mutex_exit(sc->sc_ich_phymtx);
   12501 }
   12502 
   12503 static int
   12504 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12505 {
   12506 
   12507 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12508 		device_xname(sc->sc_dev), __func__));
   12509 	mutex_enter(sc->sc_ich_nvmmtx);
   12510 
   12511 	return 0;
   12512 }
   12513 
   12514 static void
   12515 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12516 {
   12517 
   12518 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12519 		device_xname(sc->sc_dev), __func__));
   12520 	mutex_exit(sc->sc_ich_nvmmtx);
   12521 }
   12522 
   12523 static int
   12524 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12525 {
   12526 	int i = 0;
   12527 	uint32_t reg;
   12528 
   12529 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12530 		device_xname(sc->sc_dev), __func__));
   12531 
   12532 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12533 	do {
   12534 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12535 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12536 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12537 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12538 			break;
   12539 		delay(2*1000);
   12540 		i++;
   12541 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12542 
   12543 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12544 		wm_put_hw_semaphore_82573(sc);
   12545 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12546 		    device_xname(sc->sc_dev));
   12547 		return -1;
   12548 	}
   12549 
   12550 	return 0;
   12551 }
   12552 
   12553 static void
   12554 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12555 {
   12556 	uint32_t reg;
   12557 
   12558 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12559 		device_xname(sc->sc_dev), __func__));
   12560 
   12561 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12562 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12563 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12564 }
   12565 
   12566 /*
   12567  * Management mode and power management related subroutines.
   12568  * BMC, AMT, suspend/resume and EEE.
   12569  */
   12570 
   12571 #ifdef WM_WOL
   12572 static int
   12573 wm_check_mng_mode(struct wm_softc *sc)
   12574 {
   12575 	int rv;
   12576 
   12577 	switch (sc->sc_type) {
   12578 	case WM_T_ICH8:
   12579 	case WM_T_ICH9:
   12580 	case WM_T_ICH10:
   12581 	case WM_T_PCH:
   12582 	case WM_T_PCH2:
   12583 	case WM_T_PCH_LPT:
   12584 	case WM_T_PCH_SPT:
   12585 		rv = wm_check_mng_mode_ich8lan(sc);
   12586 		break;
   12587 	case WM_T_82574:
   12588 	case WM_T_82583:
   12589 		rv = wm_check_mng_mode_82574(sc);
   12590 		break;
   12591 	case WM_T_82571:
   12592 	case WM_T_82572:
   12593 	case WM_T_82573:
   12594 	case WM_T_80003:
   12595 		rv = wm_check_mng_mode_generic(sc);
   12596 		break;
   12597 	default:
   12598 		/* noting to do */
   12599 		rv = 0;
   12600 		break;
   12601 	}
   12602 
   12603 	return rv;
   12604 }
   12605 
   12606 static int
   12607 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12608 {
   12609 	uint32_t fwsm;
   12610 
   12611 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12612 
   12613 	if (((fwsm & FWSM_FW_VALID) != 0)
   12614 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12615 		return 1;
   12616 
   12617 	return 0;
   12618 }
   12619 
   12620 static int
   12621 wm_check_mng_mode_82574(struct wm_softc *sc)
   12622 {
   12623 	uint16_t data;
   12624 
   12625 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12626 
   12627 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12628 		return 1;
   12629 
   12630 	return 0;
   12631 }
   12632 
   12633 static int
   12634 wm_check_mng_mode_generic(struct wm_softc *sc)
   12635 {
   12636 	uint32_t fwsm;
   12637 
   12638 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12639 
   12640 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12641 		return 1;
   12642 
   12643 	return 0;
   12644 }
   12645 #endif /* WM_WOL */
   12646 
   12647 static int
   12648 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12649 {
   12650 	uint32_t manc, fwsm, factps;
   12651 
   12652 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12653 		return 0;
   12654 
   12655 	manc = CSR_READ(sc, WMREG_MANC);
   12656 
   12657 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   12658 		device_xname(sc->sc_dev), manc));
   12659 	if ((manc & MANC_RECV_TCO_EN) == 0)
   12660 		return 0;
   12661 
   12662 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   12663 		fwsm = CSR_READ(sc, WMREG_FWSM);
   12664 		factps = CSR_READ(sc, WMREG_FACTPS);
   12665 		if (((factps & FACTPS_MNGCG) == 0)
   12666 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12667 			return 1;
   12668 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   12669 		uint16_t data;
   12670 
   12671 		factps = CSR_READ(sc, WMREG_FACTPS);
   12672 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12673 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   12674 			device_xname(sc->sc_dev), factps, data));
   12675 		if (((factps & FACTPS_MNGCG) == 0)
   12676 		    && ((data & NVM_CFG2_MNGM_MASK)
   12677 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   12678 			return 1;
   12679 	} else if (((manc & MANC_SMBUS_EN) != 0)
   12680 	    && ((manc & MANC_ASF_EN) == 0))
   12681 		return 1;
   12682 
   12683 	return 0;
   12684 }
   12685 
   12686 static bool
   12687 wm_phy_resetisblocked(struct wm_softc *sc)
   12688 {
   12689 	bool blocked = false;
   12690 	uint32_t reg;
   12691 	int i = 0;
   12692 
   12693 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12694 		device_xname(sc->sc_dev), __func__));
   12695 
   12696 	switch (sc->sc_type) {
   12697 	case WM_T_ICH8:
   12698 	case WM_T_ICH9:
   12699 	case WM_T_ICH10:
   12700 	case WM_T_PCH:
   12701 	case WM_T_PCH2:
   12702 	case WM_T_PCH_LPT:
   12703 	case WM_T_PCH_SPT:
   12704 		do {
   12705 			reg = CSR_READ(sc, WMREG_FWSM);
   12706 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12707 				blocked = true;
   12708 				delay(10*1000);
   12709 				continue;
   12710 			}
   12711 			blocked = false;
   12712 		} while (blocked && (i++ < 30));
   12713 		return blocked;
   12714 		break;
   12715 	case WM_T_82571:
   12716 	case WM_T_82572:
   12717 	case WM_T_82573:
   12718 	case WM_T_82574:
   12719 	case WM_T_82583:
   12720 	case WM_T_80003:
   12721 		reg = CSR_READ(sc, WMREG_MANC);
   12722 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12723 			return true;
   12724 		else
   12725 			return false;
   12726 		break;
   12727 	default:
   12728 		/* no problem */
   12729 		break;
   12730 	}
   12731 
   12732 	return false;
   12733 }
   12734 
   12735 static void
   12736 wm_get_hw_control(struct wm_softc *sc)
   12737 {
   12738 	uint32_t reg;
   12739 
   12740 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12741 		device_xname(sc->sc_dev), __func__));
   12742 
   12743 	if (sc->sc_type == WM_T_82573) {
   12744 		reg = CSR_READ(sc, WMREG_SWSM);
   12745 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12746 	} else if (sc->sc_type >= WM_T_82571) {
   12747 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12748 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12749 	}
   12750 }
   12751 
   12752 static void
   12753 wm_release_hw_control(struct wm_softc *sc)
   12754 {
   12755 	uint32_t reg;
   12756 
   12757 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12758 		device_xname(sc->sc_dev), __func__));
   12759 
   12760 	if (sc->sc_type == WM_T_82573) {
   12761 		reg = CSR_READ(sc, WMREG_SWSM);
   12762 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12763 	} else if (sc->sc_type >= WM_T_82571) {
   12764 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12765 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12766 	}
   12767 }
   12768 
   12769 static void
   12770 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12771 {
   12772 	uint32_t reg;
   12773 
   12774 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12775 		device_xname(sc->sc_dev), __func__));
   12776 
   12777 	if (sc->sc_type < WM_T_PCH2)
   12778 		return;
   12779 
   12780 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12781 
   12782 	if (gate)
   12783 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12784 	else
   12785 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12786 
   12787 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12788 }
   12789 
   12790 static void
   12791 wm_smbustopci(struct wm_softc *sc)
   12792 {
   12793 	uint32_t fwsm, reg;
   12794 	int rv = 0;
   12795 
   12796 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12797 		device_xname(sc->sc_dev), __func__));
   12798 
   12799 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12800 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12801 
   12802 	/* Disable ULP */
   12803 	wm_ulp_disable(sc);
   12804 
   12805 	/* Acquire PHY semaphore */
   12806 	sc->phy.acquire(sc);
   12807 
   12808 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12809 	switch (sc->sc_type) {
   12810 	case WM_T_PCH_LPT:
   12811 	case WM_T_PCH_SPT:
   12812 		if (wm_phy_is_accessible_pchlan(sc))
   12813 			break;
   12814 
   12815 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12816 		reg |= CTRL_EXT_FORCE_SMBUS;
   12817 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12818 #if 0
   12819 		/* XXX Isn't this required??? */
   12820 		CSR_WRITE_FLUSH(sc);
   12821 #endif
   12822 		delay(50 * 1000);
   12823 		/* FALLTHROUGH */
   12824 	case WM_T_PCH2:
   12825 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12826 			break;
   12827 		/* FALLTHROUGH */
   12828 	case WM_T_PCH:
   12829 		if (sc->sc_type == WM_T_PCH)
   12830 			if ((fwsm & FWSM_FW_VALID) != 0)
   12831 				break;
   12832 
   12833 		if (wm_phy_resetisblocked(sc) == true) {
   12834 			printf("XXX reset is blocked(3)\n");
   12835 			break;
   12836 		}
   12837 
   12838 		wm_toggle_lanphypc_pch_lpt(sc);
   12839 
   12840 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12841 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12842 				break;
   12843 
   12844 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12845 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12846 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12847 
   12848 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12849 				break;
   12850 			rv = -1;
   12851 		}
   12852 		break;
   12853 	default:
   12854 		break;
   12855 	}
   12856 
   12857 	/* Release semaphore */
   12858 	sc->phy.release(sc);
   12859 
   12860 	if (rv == 0) {
   12861 		if (wm_phy_resetisblocked(sc)) {
   12862 			printf("XXX reset is blocked(4)\n");
   12863 			goto out;
   12864 		}
   12865 		wm_reset_phy(sc);
   12866 		if (wm_phy_resetisblocked(sc))
   12867 			printf("XXX reset is blocked(4)\n");
   12868 	}
   12869 
   12870 out:
   12871 	/*
   12872 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   12873 	 */
   12874 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   12875 		delay(10*1000);
   12876 		wm_gate_hw_phy_config_ich8lan(sc, false);
   12877 	}
   12878 }
   12879 
   12880 static void
   12881 wm_init_manageability(struct wm_softc *sc)
   12882 {
   12883 
   12884 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12885 		device_xname(sc->sc_dev), __func__));
   12886 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12887 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   12888 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12889 
   12890 		/* Disable hardware interception of ARP */
   12891 		manc &= ~MANC_ARP_EN;
   12892 
   12893 		/* Enable receiving management packets to the host */
   12894 		if (sc->sc_type >= WM_T_82571) {
   12895 			manc |= MANC_EN_MNG2HOST;
   12896 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   12897 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   12898 		}
   12899 
   12900 		CSR_WRITE(sc, WMREG_MANC, manc);
   12901 	}
   12902 }
   12903 
   12904 static void
   12905 wm_release_manageability(struct wm_softc *sc)
   12906 {
   12907 
   12908 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12909 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12910 
   12911 		manc |= MANC_ARP_EN;
   12912 		if (sc->sc_type >= WM_T_82571)
   12913 			manc &= ~MANC_EN_MNG2HOST;
   12914 
   12915 		CSR_WRITE(sc, WMREG_MANC, manc);
   12916 	}
   12917 }
   12918 
   12919 static void
   12920 wm_get_wakeup(struct wm_softc *sc)
   12921 {
   12922 
   12923 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   12924 	switch (sc->sc_type) {
   12925 	case WM_T_82573:
   12926 	case WM_T_82583:
   12927 		sc->sc_flags |= WM_F_HAS_AMT;
   12928 		/* FALLTHROUGH */
   12929 	case WM_T_80003:
   12930 	case WM_T_82575:
   12931 	case WM_T_82576:
   12932 	case WM_T_82580:
   12933 	case WM_T_I350:
   12934 	case WM_T_I354:
   12935 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   12936 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   12937 		/* FALLTHROUGH */
   12938 	case WM_T_82541:
   12939 	case WM_T_82541_2:
   12940 	case WM_T_82547:
   12941 	case WM_T_82547_2:
   12942 	case WM_T_82571:
   12943 	case WM_T_82572:
   12944 	case WM_T_82574:
   12945 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12946 		break;
   12947 	case WM_T_ICH8:
   12948 	case WM_T_ICH9:
   12949 	case WM_T_ICH10:
   12950 	case WM_T_PCH:
   12951 	case WM_T_PCH2:
   12952 	case WM_T_PCH_LPT:
   12953 	case WM_T_PCH_SPT:
   12954 		sc->sc_flags |= WM_F_HAS_AMT;
   12955 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12956 		break;
   12957 	default:
   12958 		break;
   12959 	}
   12960 
   12961 	/* 1: HAS_MANAGE */
   12962 	if (wm_enable_mng_pass_thru(sc) != 0)
   12963 		sc->sc_flags |= WM_F_HAS_MANAGE;
   12964 
   12965 #ifdef WM_DEBUG
   12966 	printf("\n");
   12967 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   12968 		printf("HAS_AMT,");
   12969 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   12970 		printf("ARC_SUBSYS_VALID,");
   12971 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   12972 		printf("ASF_FIRMWARE_PRES,");
   12973 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   12974 		printf("HAS_MANAGE,");
   12975 	printf("\n");
   12976 #endif
   12977 	/*
   12978 	 * Note that the WOL flags is set after the resetting of the eeprom
   12979 	 * stuff
   12980 	 */
   12981 }
   12982 
   12983 /*
   12984  * Unconfigure Ultra Low Power mode.
   12985  * Only for I217 and newer (see below).
   12986  */
   12987 static void
   12988 wm_ulp_disable(struct wm_softc *sc)
   12989 {
   12990 	uint32_t reg;
   12991 	int i = 0;
   12992 
   12993 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12994 		device_xname(sc->sc_dev), __func__));
   12995 	/* Exclude old devices */
   12996 	if ((sc->sc_type < WM_T_PCH_LPT)
   12997 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   12998 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   12999 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13000 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13001 		return;
   13002 
   13003 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13004 		/* Request ME un-configure ULP mode in the PHY */
   13005 		reg = CSR_READ(sc, WMREG_H2ME);
   13006 		reg &= ~H2ME_ULP;
   13007 		reg |= H2ME_ENFORCE_SETTINGS;
   13008 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13009 
   13010 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13011 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13012 			if (i++ == 30) {
   13013 				printf("%s timed out\n", __func__);
   13014 				return;
   13015 			}
   13016 			delay(10 * 1000);
   13017 		}
   13018 		reg = CSR_READ(sc, WMREG_H2ME);
   13019 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13020 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13021 
   13022 		return;
   13023 	}
   13024 
   13025 	/* Acquire semaphore */
   13026 	sc->phy.acquire(sc);
   13027 
   13028 	/* Toggle LANPHYPC */
   13029 	wm_toggle_lanphypc_pch_lpt(sc);
   13030 
   13031 	/* Unforce SMBus mode in PHY */
   13032 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13033 	if (reg == 0x0000 || reg == 0xffff) {
   13034 		uint32_t reg2;
   13035 
   13036 		printf("%s: Force SMBus first.\n", __func__);
   13037 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13038 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13039 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13040 		delay(50 * 1000);
   13041 
   13042 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13043 	}
   13044 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13045 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13046 
   13047 	/* Unforce SMBus mode in MAC */
   13048 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13049 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13050 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13051 
   13052 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13053 	reg |= HV_PM_CTRL_K1_ENA;
   13054 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13055 
   13056 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13057 	reg &= ~(I218_ULP_CONFIG1_IND
   13058 	    | I218_ULP_CONFIG1_STICKY_ULP
   13059 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13060 	    | I218_ULP_CONFIG1_WOL_HOST
   13061 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13062 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13063 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13064 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13065 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13066 	reg |= I218_ULP_CONFIG1_START;
   13067 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13068 
   13069 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13070 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13071 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13072 
   13073 	/* Release semaphore */
   13074 	sc->phy.release(sc);
   13075 	wm_gmii_reset(sc);
   13076 	delay(50 * 1000);
   13077 }
   13078 
   13079 /* WOL in the newer chipset interfaces (pchlan) */
   13080 static void
   13081 wm_enable_phy_wakeup(struct wm_softc *sc)
   13082 {
   13083 #if 0
   13084 	uint16_t preg;
   13085 
   13086 	/* Copy MAC RARs to PHY RARs */
   13087 
   13088 	/* Copy MAC MTA to PHY MTA */
   13089 
   13090 	/* Configure PHY Rx Control register */
   13091 
   13092 	/* Enable PHY wakeup in MAC register */
   13093 
   13094 	/* Configure and enable PHY wakeup in PHY registers */
   13095 
   13096 	/* Activate PHY wakeup */
   13097 
   13098 	/* XXX */
   13099 #endif
   13100 }
   13101 
   13102 /* Power down workaround on D3 */
   13103 static void
   13104 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13105 {
   13106 	uint32_t reg;
   13107 	int i;
   13108 
   13109 	for (i = 0; i < 2; i++) {
   13110 		/* Disable link */
   13111 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13112 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13113 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13114 
   13115 		/*
   13116 		 * Call gig speed drop workaround on Gig disable before
   13117 		 * accessing any PHY registers
   13118 		 */
   13119 		if (sc->sc_type == WM_T_ICH8)
   13120 			wm_gig_downshift_workaround_ich8lan(sc);
   13121 
   13122 		/* Write VR power-down enable */
   13123 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13124 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13125 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13126 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13127 
   13128 		/* Read it back and test */
   13129 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13130 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13131 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13132 			break;
   13133 
   13134 		/* Issue PHY reset and repeat at most one more time */
   13135 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13136 	}
   13137 }
   13138 
   13139 static void
   13140 wm_enable_wakeup(struct wm_softc *sc)
   13141 {
   13142 	uint32_t reg, pmreg;
   13143 	pcireg_t pmode;
   13144 
   13145 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13146 		device_xname(sc->sc_dev), __func__));
   13147 
   13148 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13149 		&pmreg, NULL) == 0)
   13150 		return;
   13151 
   13152 	/* Advertise the wakeup capability */
   13153 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13154 	    | CTRL_SWDPIN(3));
   13155 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13156 
   13157 	/* ICH workaround */
   13158 	switch (sc->sc_type) {
   13159 	case WM_T_ICH8:
   13160 	case WM_T_ICH9:
   13161 	case WM_T_ICH10:
   13162 	case WM_T_PCH:
   13163 	case WM_T_PCH2:
   13164 	case WM_T_PCH_LPT:
   13165 	case WM_T_PCH_SPT:
   13166 		/* Disable gig during WOL */
   13167 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13168 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13169 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13170 		if (sc->sc_type == WM_T_PCH)
   13171 			wm_gmii_reset(sc);
   13172 
   13173 		/* Power down workaround */
   13174 		if (sc->sc_phytype == WMPHY_82577) {
   13175 			struct mii_softc *child;
   13176 
   13177 			/* Assume that the PHY is copper */
   13178 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13179 			if (child->mii_mpd_rev <= 2)
   13180 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13181 				    (768 << 5) | 25, 0x0444); /* magic num */
   13182 		}
   13183 		break;
   13184 	default:
   13185 		break;
   13186 	}
   13187 
   13188 	/* Keep the laser running on fiber adapters */
   13189 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13190 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13191 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13192 		reg |= CTRL_EXT_SWDPIN(3);
   13193 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13194 	}
   13195 
   13196 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13197 #if 0	/* for the multicast packet */
   13198 	reg |= WUFC_MC;
   13199 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13200 #endif
   13201 
   13202 	if (sc->sc_type >= WM_T_PCH)
   13203 		wm_enable_phy_wakeup(sc);
   13204 	else {
   13205 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13206 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13207 	}
   13208 
   13209 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13210 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13211 		|| (sc->sc_type == WM_T_PCH2))
   13212 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13213 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13214 
   13215 	/* Request PME */
   13216 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13217 #if 0
   13218 	/* Disable WOL */
   13219 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13220 #else
   13221 	/* For WOL */
   13222 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13223 #endif
   13224 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13225 }
   13226 
   13227 /* LPLU */
   13228 
   13229 static void
   13230 wm_lplu_d0_disable(struct wm_softc *sc)
   13231 {
   13232 	uint32_t reg;
   13233 
   13234 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13235 		device_xname(sc->sc_dev), __func__));
   13236 
   13237 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13238 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13239 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13240 }
   13241 
   13242 static void
   13243 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   13244 {
   13245 	uint32_t reg;
   13246 
   13247 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13248 		device_xname(sc->sc_dev), __func__));
   13249 
   13250 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13251 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13252 	reg |= HV_OEM_BITS_ANEGNOW;
   13253 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13254 }
   13255 
   13256 /* EEE */
   13257 
   13258 static void
   13259 wm_set_eee_i350(struct wm_softc *sc)
   13260 {
   13261 	uint32_t ipcnfg, eeer;
   13262 
   13263 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13264 	eeer = CSR_READ(sc, WMREG_EEER);
   13265 
   13266 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13267 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13268 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13269 		    | EEER_LPI_FC);
   13270 	} else {
   13271 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13272 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13273 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13274 		    | EEER_LPI_FC);
   13275 	}
   13276 
   13277 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13278 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13279 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13280 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13281 }
   13282 
   13283 /*
   13284  * Workarounds (mainly PHY related).
   13285  * Basically, PHY's workarounds are in the PHY drivers.
   13286  */
   13287 
   13288 /* Work-around for 82566 Kumeran PCS lock loss */
   13289 static void
   13290 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13291 {
   13292 #if 0
   13293 	int miistatus, active, i;
   13294 	int reg;
   13295 
   13296 	miistatus = sc->sc_mii.mii_media_status;
   13297 
   13298 	/* If the link is not up, do nothing */
   13299 	if ((miistatus & IFM_ACTIVE) == 0)
   13300 		return;
   13301 
   13302 	active = sc->sc_mii.mii_media_active;
   13303 
   13304 	/* Nothing to do if the link is other than 1Gbps */
   13305 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   13306 		return;
   13307 
   13308 	for (i = 0; i < 10; i++) {
   13309 		/* read twice */
   13310 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13311 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13312 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13313 			goto out;	/* GOOD! */
   13314 
   13315 		/* Reset the PHY */
   13316 		wm_gmii_reset(sc);
   13317 		delay(5*1000);
   13318 	}
   13319 
   13320 	/* Disable GigE link negotiation */
   13321 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13322 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13323 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13324 
   13325 	/*
   13326 	 * Call gig speed drop workaround on Gig disable before accessing
   13327 	 * any PHY registers.
   13328 	 */
   13329 	wm_gig_downshift_workaround_ich8lan(sc);
   13330 
   13331 out:
   13332 	return;
   13333 #endif
   13334 }
   13335 
   13336 /* WOL from S5 stops working */
   13337 static void
   13338 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13339 {
   13340 	uint16_t kmrn_reg;
   13341 
   13342 	/* Only for igp3 */
   13343 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13344 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13345 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13346 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13347 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13348 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13349 	}
   13350 }
   13351 
   13352 /*
   13353  * Workaround for pch's PHYs
   13354  * XXX should be moved to new PHY driver?
   13355  */
   13356 static void
   13357 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13358 {
   13359 
   13360 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13361 		device_xname(sc->sc_dev), __func__));
   13362 	KASSERT(sc->sc_type == WM_T_PCH);
   13363 
   13364 	if (sc->sc_phytype == WMPHY_82577)
   13365 		wm_set_mdio_slow_mode_hv(sc);
   13366 
   13367 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13368 
   13369 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13370 
   13371 	/* 82578 */
   13372 	if (sc->sc_phytype == WMPHY_82578) {
   13373 		struct mii_softc *child;
   13374 
   13375 		/*
   13376 		 * Return registers to default by doing a soft reset then
   13377 		 * writing 0x3140 to the control register
   13378 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13379 		 */
   13380 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13381 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13382 			PHY_RESET(child);
   13383 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13384 			    0x3140);
   13385 		}
   13386 	}
   13387 
   13388 	/* Select page 0 */
   13389 	sc->phy.acquire(sc);
   13390 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13391 	sc->phy.release(sc);
   13392 
   13393 	/*
   13394 	 * Configure the K1 Si workaround during phy reset assuming there is
   13395 	 * link so that it disables K1 if link is in 1Gbps.
   13396 	 */
   13397 	wm_k1_gig_workaround_hv(sc, 1);
   13398 }
   13399 
   13400 static void
   13401 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13402 {
   13403 
   13404 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13405 		device_xname(sc->sc_dev), __func__));
   13406 	KASSERT(sc->sc_type == WM_T_PCH2);
   13407 
   13408 	wm_set_mdio_slow_mode_hv(sc);
   13409 }
   13410 
   13411 static int
   13412 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13413 {
   13414 	int k1_enable = sc->sc_nvm_k1_enabled;
   13415 
   13416 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13417 		device_xname(sc->sc_dev), __func__));
   13418 
   13419 	if (sc->phy.acquire(sc) != 0)
   13420 		return -1;
   13421 
   13422 	if (link) {
   13423 		k1_enable = 0;
   13424 
   13425 		/* Link stall fix for link up */
   13426 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13427 	} else {
   13428 		/* Link stall fix for link down */
   13429 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13430 	}
   13431 
   13432 	wm_configure_k1_ich8lan(sc, k1_enable);
   13433 	sc->phy.release(sc);
   13434 
   13435 	return 0;
   13436 }
   13437 
   13438 static void
   13439 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13440 {
   13441 	uint32_t reg;
   13442 
   13443 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13444 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13445 	    reg | HV_KMRN_MDIO_SLOW);
   13446 }
   13447 
   13448 static void
   13449 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13450 {
   13451 	uint32_t ctrl, ctrl_ext, tmp;
   13452 	uint16_t kmrn_reg;
   13453 
   13454 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13455 
   13456 	if (k1_enable)
   13457 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13458 	else
   13459 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13460 
   13461 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13462 
   13463 	delay(20);
   13464 
   13465 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13466 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13467 
   13468 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13469 	tmp |= CTRL_FRCSPD;
   13470 
   13471 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13472 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13473 	CSR_WRITE_FLUSH(sc);
   13474 	delay(20);
   13475 
   13476 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13477 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13478 	CSR_WRITE_FLUSH(sc);
   13479 	delay(20);
   13480 }
   13481 
   13482 /* special case - for 82575 - need to do manual init ... */
   13483 static void
   13484 wm_reset_init_script_82575(struct wm_softc *sc)
   13485 {
   13486 	/*
   13487 	 * remark: this is untested code - we have no board without EEPROM
   13488 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13489 	 */
   13490 
   13491 	/* SerDes configuration via SERDESCTRL */
   13492 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13493 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13494 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13495 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13496 
   13497 	/* CCM configuration via CCMCTL register */
   13498 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13499 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13500 
   13501 	/* PCIe lanes configuration */
   13502 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13503 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13504 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13505 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13506 
   13507 	/* PCIe PLL Configuration */
   13508 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13509 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13510 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13511 }
   13512 
   13513 static void
   13514 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13515 {
   13516 	uint32_t reg;
   13517 	uint16_t nvmword;
   13518 	int rv;
   13519 
   13520 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13521 		return;
   13522 
   13523 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13524 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13525 	if (rv != 0) {
   13526 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13527 		    __func__);
   13528 		return;
   13529 	}
   13530 
   13531 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13532 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13533 		reg |= MDICNFG_DEST;
   13534 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13535 		reg |= MDICNFG_COM_MDIO;
   13536 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13537 }
   13538 
   13539 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13540 
   13541 static bool
   13542 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13543 {
   13544 	int i;
   13545 	uint32_t reg;
   13546 	uint16_t id1, id2;
   13547 
   13548 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13549 		device_xname(sc->sc_dev), __func__));
   13550 	id1 = id2 = 0xffff;
   13551 	for (i = 0; i < 2; i++) {
   13552 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13553 		if (MII_INVALIDID(id1))
   13554 			continue;
   13555 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13556 		if (MII_INVALIDID(id2))
   13557 			continue;
   13558 		break;
   13559 	}
   13560 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13561 		goto out;
   13562 	}
   13563 
   13564 	if (sc->sc_type < WM_T_PCH_LPT) {
   13565 		sc->phy.release(sc);
   13566 		wm_set_mdio_slow_mode_hv(sc);
   13567 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13568 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13569 		sc->phy.acquire(sc);
   13570 	}
   13571 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13572 		printf("XXX return with false\n");
   13573 		return false;
   13574 	}
   13575 out:
   13576 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13577 		/* Only unforce SMBus if ME is not active */
   13578 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13579 			/* Unforce SMBus mode in PHY */
   13580 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13581 			    CV_SMB_CTRL);
   13582 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13583 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13584 			    CV_SMB_CTRL, reg);
   13585 
   13586 			/* Unforce SMBus mode in MAC */
   13587 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13588 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13589 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13590 		}
   13591 	}
   13592 	return true;
   13593 }
   13594 
   13595 static void
   13596 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13597 {
   13598 	uint32_t reg;
   13599 	int i;
   13600 
   13601 	/* Set PHY Config Counter to 50msec */
   13602 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13603 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13604 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13605 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13606 
   13607 	/* Toggle LANPHYPC */
   13608 	reg = CSR_READ(sc, WMREG_CTRL);
   13609 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13610 	reg &= ~CTRL_LANPHYPC_VALUE;
   13611 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13612 	CSR_WRITE_FLUSH(sc);
   13613 	delay(1000);
   13614 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13615 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13616 	CSR_WRITE_FLUSH(sc);
   13617 
   13618 	if (sc->sc_type < WM_T_PCH_LPT)
   13619 		delay(50 * 1000);
   13620 	else {
   13621 		i = 20;
   13622 
   13623 		do {
   13624 			delay(5 * 1000);
   13625 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13626 		    && i--);
   13627 
   13628 		delay(30 * 1000);
   13629 	}
   13630 }
   13631 
   13632 static int
   13633 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13634 {
   13635 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13636 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13637 	uint32_t rxa;
   13638 	uint16_t scale = 0, lat_enc = 0;
   13639 	int64_t lat_ns, value;
   13640 
   13641 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13642 		device_xname(sc->sc_dev), __func__));
   13643 
   13644 	if (link) {
   13645 		pcireg_t preg;
   13646 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   13647 
   13648 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   13649 
   13650 		/*
   13651 		 * Determine the maximum latency tolerated by the device.
   13652 		 *
   13653 		 * Per the PCIe spec, the tolerated latencies are encoded as
   13654 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   13655 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   13656 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   13657 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   13658 		 */
   13659 		lat_ns = ((int64_t)rxa * 1024 -
   13660 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   13661 		if (lat_ns < 0)
   13662 			lat_ns = 0;
   13663 		else {
   13664 			uint32_t status;
   13665 			uint16_t speed;
   13666 
   13667 			status = CSR_READ(sc, WMREG_STATUS);
   13668 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   13669 			case STATUS_SPEED_10:
   13670 				speed = 10;
   13671 				break;
   13672 			case STATUS_SPEED_100:
   13673 				speed = 100;
   13674 				break;
   13675 			case STATUS_SPEED_1000:
   13676 				speed = 1000;
   13677 				break;
   13678 			default:
   13679 				printf("%s: Unknown speed (status = %08x)\n",
   13680 				    device_xname(sc->sc_dev), status);
   13681 				return -1;
   13682 			}
   13683 			lat_ns /= speed;
   13684 		}
   13685 		value = lat_ns;
   13686 
   13687 		while (value > LTRV_VALUE) {
   13688 			scale ++;
   13689 			value = howmany(value, __BIT(5));
   13690 		}
   13691 		if (scale > LTRV_SCALE_MAX) {
   13692 			printf("%s: Invalid LTR latency scale %d\n",
   13693 			    device_xname(sc->sc_dev), scale);
   13694 			return -1;
   13695 		}
   13696 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   13697 
   13698 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13699 		    WM_PCI_LTR_CAP_LPT);
   13700 		max_snoop = preg & 0xffff;
   13701 		max_nosnoop = preg >> 16;
   13702 
   13703 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   13704 
   13705 		if (lat_enc > max_ltr_enc) {
   13706 			lat_enc = max_ltr_enc;
   13707 		}
   13708 	}
   13709 	/* Snoop and No-Snoop latencies the same */
   13710 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   13711 	CSR_WRITE(sc, WMREG_LTRV, reg);
   13712 
   13713 	return 0;
   13714 }
   13715 
   13716 /*
   13717  * I210 Errata 25 and I211 Errata 10
   13718  * Slow System Clock.
   13719  */
   13720 static void
   13721 wm_pll_workaround_i210(struct wm_softc *sc)
   13722 {
   13723 	uint32_t mdicnfg, wuc;
   13724 	uint32_t reg;
   13725 	pcireg_t pcireg;
   13726 	uint32_t pmreg;
   13727 	uint16_t nvmword, tmp_nvmword;
   13728 	int phyval;
   13729 	bool wa_done = false;
   13730 	int i;
   13731 
   13732 	/* Save WUC and MDICNFG registers */
   13733 	wuc = CSR_READ(sc, WMREG_WUC);
   13734 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13735 
   13736 	reg = mdicnfg & ~MDICNFG_DEST;
   13737 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13738 
   13739 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13740 		nvmword = INVM_DEFAULT_AL;
   13741 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13742 
   13743 	/* Get Power Management cap offset */
   13744 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13745 		&pmreg, NULL) == 0)
   13746 		return;
   13747 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13748 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13749 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13750 
   13751 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13752 			break; /* OK */
   13753 		}
   13754 
   13755 		wa_done = true;
   13756 		/* Directly reset the internal PHY */
   13757 		reg = CSR_READ(sc, WMREG_CTRL);
   13758 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13759 
   13760 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13761 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13762 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13763 
   13764 		CSR_WRITE(sc, WMREG_WUC, 0);
   13765 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13766 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13767 
   13768 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13769 		    pmreg + PCI_PMCSR);
   13770 		pcireg |= PCI_PMCSR_STATE_D3;
   13771 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13772 		    pmreg + PCI_PMCSR, pcireg);
   13773 		delay(1000);
   13774 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13775 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13776 		    pmreg + PCI_PMCSR, pcireg);
   13777 
   13778 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13779 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13780 
   13781 		/* Restore WUC register */
   13782 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13783 	}
   13784 
   13785 	/* Restore MDICNFG setting */
   13786 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13787 	if (wa_done)
   13788 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13789 }
   13790