Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.494
      1 /*	$NetBSD: if_wm.c,v 1.494 2017/03/03 07:38:52 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Split header buffer for newer descriptors
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.494 2017/03/03 07:38:52 knakahara Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #include "opt_if_wm.h"
     92 #endif
     93 
     94 #include <sys/param.h>
     95 #include <sys/systm.h>
     96 #include <sys/callout.h>
     97 #include <sys/mbuf.h>
     98 #include <sys/malloc.h>
     99 #include <sys/kmem.h>
    100 #include <sys/kernel.h>
    101 #include <sys/socket.h>
    102 #include <sys/ioctl.h>
    103 #include <sys/errno.h>
    104 #include <sys/device.h>
    105 #include <sys/queue.h>
    106 #include <sys/syslog.h>
    107 #include <sys/interrupt.h>
    108 #include <sys/cpu.h>
    109 #include <sys/pcq.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/miivar.h>
    132 #include <dev/mii/miidevs.h>
    133 #include <dev/mii/mii_bitbang.h>
    134 #include <dev/mii/ikphyreg.h>
    135 #include <dev/mii/igphyreg.h>
    136 #include <dev/mii/igphyvar.h>
    137 #include <dev/mii/inbmphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 /*
    177  * Transmit descriptor list size.  Due to errata, we can only have
    178  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    179  * on >= 82544.  We tell the upper layers that they can queue a lot
    180  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    181  * of them at a time.
    182  *
    183  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    184  * chains containing many small mbufs have been observed in zero-copy
    185  * situations with jumbo frames.
    186  */
    187 #define	WM_NTXSEGS		256
    188 #define	WM_IFQUEUELEN		256
    189 #define	WM_TXQUEUELEN_MAX	64
    190 #define	WM_TXQUEUELEN_MAX_82547	16
    191 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    192 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    193 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    194 #define	WM_NTXDESC_82542	256
    195 #define	WM_NTXDESC_82544	4096
    196 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    197 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    198 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    199 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    200 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    201 
    202 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    203 
    204 #define	WM_TXINTERQSIZE		256
    205 
    206 /*
    207  * Receive descriptor list size.  We have one Rx buffer for normal
    208  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    209  * packet.  We allocate 256 receive descriptors, each with a 2k
    210  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    211  */
    212 #define	WM_NRXDESC		256
    213 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    214 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    215 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    216 
    217 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    218 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    219 #endif
    220 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    221 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    222 #endif
    223 
    224 typedef union txdescs {
    225 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    226 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    227 } txdescs_t;
    228 
    229 typedef union rxdescs {
    230 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    231 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    232 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    233 } rxdescs_t;
    234 
    235 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    236 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    237 
    238 /*
    239  * Software state for transmit jobs.
    240  */
    241 struct wm_txsoft {
    242 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    243 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    244 	int txs_firstdesc;		/* first descriptor in packet */
    245 	int txs_lastdesc;		/* last descriptor in packet */
    246 	int txs_ndesc;			/* # of descriptors used */
    247 };
    248 
    249 /*
    250  * Software state for receive buffers.  Each descriptor gets a
    251  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    252  * more than one buffer, we chain them together.
    253  */
    254 struct wm_rxsoft {
    255 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    256 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    257 };
    258 
    259 #define WM_LINKUP_TIMEOUT	50
    260 
    261 static uint16_t swfwphysem[] = {
    262 	SWFW_PHY0_SM,
    263 	SWFW_PHY1_SM,
    264 	SWFW_PHY2_SM,
    265 	SWFW_PHY3_SM
    266 };
    267 
    268 static const uint32_t wm_82580_rxpbs_table[] = {
    269 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    270 };
    271 
    272 struct wm_softc;
    273 
    274 #ifdef WM_EVENT_COUNTERS
    275 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    276 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    277 	struct evcnt qname##_ev_##evname;
    278 
    279 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    280 	do{								\
    281 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    282 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    283 		    "%s%02d%s", #qname, (qnum), #evname);		\
    284 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    285 		    (evtype), NULL, (xname),				\
    286 		    (q)->qname##_##evname##_evcnt_name);		\
    287 	}while(0)
    288 
    289 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    290 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    291 
    292 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    293 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    294 
    295 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    296 	evcnt_detach(&(q)->qname##_ev_##evname);
    297 #endif /* WM_EVENT_COUNTERS */
    298 
    299 struct wm_txqueue {
    300 	kmutex_t *txq_lock;		/* lock for tx operations */
    301 
    302 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    303 
    304 	/* Software state for the transmit descriptors. */
    305 	int txq_num;			/* must be a power of two */
    306 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    307 
    308 	/* TX control data structures. */
    309 	int txq_ndesc;			/* must be a power of two */
    310 	size_t txq_descsize;		/* a tx descriptor size */
    311 	txdescs_t *txq_descs_u;
    312         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    313 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    314 	int txq_desc_rseg;		/* real number of control segment */
    315 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    316 #define	txq_descs	txq_descs_u->sctxu_txdescs
    317 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    318 
    319 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    320 
    321 	int txq_free;			/* number of free Tx descriptors */
    322 	int txq_next;			/* next ready Tx descriptor */
    323 
    324 	int txq_sfree;			/* number of free Tx jobs */
    325 	int txq_snext;			/* next free Tx job */
    326 	int txq_sdirty;			/* dirty Tx jobs */
    327 
    328 	/* These 4 variables are used only on the 82547. */
    329 	int txq_fifo_size;		/* Tx FIFO size */
    330 	int txq_fifo_head;		/* current head of FIFO */
    331 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    332 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    333 
    334 	/*
    335 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    336 	 * CPUs. This queue intermediate them without block.
    337 	 */
    338 	pcq_t *txq_interq;
    339 
    340 	/*
    341 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    342 	 * to manage Tx H/W queue's busy flag.
    343 	 */
    344 	int txq_flags;			/* flags for H/W queue, see below */
    345 #define	WM_TXQ_NO_SPACE	0x1
    346 
    347 	bool txq_stopping;
    348 
    349 #ifdef WM_EVENT_COUNTERS
    350 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    351 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    352 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    353 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    354 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    355 						/* XXX not used? */
    356 
    357 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    358 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    359 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    360 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    361 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    362 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    363 
    364 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    365 
    366 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    367 
    368 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    369 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    370 #endif /* WM_EVENT_COUNTERS */
    371 };
    372 
    373 struct wm_rxqueue {
    374 	kmutex_t *rxq_lock;		/* lock for rx operations */
    375 
    376 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    377 
    378 	/* Software state for the receive descriptors. */
    379 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    380 
    381 	/* RX control data structures. */
    382 	int rxq_ndesc;			/* must be a power of two */
    383 	size_t rxq_descsize;		/* a rx descriptor size */
    384 	rxdescs_t *rxq_descs_u;
    385 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    386 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    387 	int rxq_desc_rseg;		/* real number of control segment */
    388 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    389 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    390 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    391 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    392 
    393 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    394 
    395 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    396 	int rxq_discard;
    397 	int rxq_len;
    398 	struct mbuf *rxq_head;
    399 	struct mbuf *rxq_tail;
    400 	struct mbuf **rxq_tailp;
    401 
    402 	bool rxq_stopping;
    403 
    404 #ifdef WM_EVENT_COUNTERS
    405 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    406 
    407 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    408 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    409 #endif
    410 };
    411 
    412 struct wm_queue {
    413 	int wmq_id;			/* index of transmit and receive queues */
    414 	int wmq_intr_idx;		/* index of MSI-X tables */
    415 
    416 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    417 
    418 	struct wm_txqueue wmq_txq;
    419 	struct wm_rxqueue wmq_rxq;
    420 
    421 	void *wmq_si;
    422 };
    423 
    424 struct wm_phyop {
    425 	int (*acquire)(struct wm_softc *);
    426 	void (*release)(struct wm_softc *);
    427 	int reset_delay_us;
    428 };
    429 
    430 /*
    431  * Software state per device.
    432  */
    433 struct wm_softc {
    434 	device_t sc_dev;		/* generic device information */
    435 	bus_space_tag_t sc_st;		/* bus space tag */
    436 	bus_space_handle_t sc_sh;	/* bus space handle */
    437 	bus_size_t sc_ss;		/* bus space size */
    438 	bus_space_tag_t sc_iot;		/* I/O space tag */
    439 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    440 	bus_size_t sc_ios;		/* I/O space size */
    441 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    442 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    443 	bus_size_t sc_flashs;		/* flash registers space size */
    444 	off_t sc_flashreg_offset;	/*
    445 					 * offset to flash registers from
    446 					 * start of BAR
    447 					 */
    448 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    449 
    450 	struct ethercom sc_ethercom;	/* ethernet common data */
    451 	struct mii_data sc_mii;		/* MII/media information */
    452 
    453 	pci_chipset_tag_t sc_pc;
    454 	pcitag_t sc_pcitag;
    455 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    456 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    457 
    458 	uint16_t sc_pcidevid;		/* PCI device ID */
    459 	wm_chip_type sc_type;		/* MAC type */
    460 	int sc_rev;			/* MAC revision */
    461 	wm_phy_type sc_phytype;		/* PHY type */
    462 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    463 #define	WM_MEDIATYPE_UNKNOWN		0x00
    464 #define	WM_MEDIATYPE_FIBER		0x01
    465 #define	WM_MEDIATYPE_COPPER		0x02
    466 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    467 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    468 	int sc_flags;			/* flags; see below */
    469 	int sc_if_flags;		/* last if_flags */
    470 	int sc_flowflags;		/* 802.3x flow control flags */
    471 	int sc_align_tweak;
    472 
    473 	void *sc_ihs[WM_MAX_NINTR];	/*
    474 					 * interrupt cookie.
    475 					 * legacy and msi use sc_ihs[0].
    476 					 */
    477 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    478 	int sc_nintrs;			/* number of interrupts */
    479 
    480 	int sc_link_intr_idx;		/* index of MSI-X tables */
    481 
    482 	callout_t sc_tick_ch;		/* tick callout */
    483 	bool sc_core_stopping;
    484 
    485 	int sc_nvm_ver_major;
    486 	int sc_nvm_ver_minor;
    487 	int sc_nvm_ver_build;
    488 	int sc_nvm_addrbits;		/* NVM address bits */
    489 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    490 	int sc_ich8_flash_base;
    491 	int sc_ich8_flash_bank_size;
    492 	int sc_nvm_k1_enabled;
    493 
    494 	int sc_nqueues;
    495 	struct wm_queue *sc_queue;
    496 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    497 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    498 
    499 	int sc_affinity_offset;
    500 
    501 #ifdef WM_EVENT_COUNTERS
    502 	/* Event counters. */
    503 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    504 
    505         /* WM_T_82542_2_1 only */
    506 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    507 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    508 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    509 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    510 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    511 #endif /* WM_EVENT_COUNTERS */
    512 
    513 	/* This variable are used only on the 82547. */
    514 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    515 
    516 	uint32_t sc_ctrl;		/* prototype CTRL register */
    517 #if 0
    518 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    519 #endif
    520 	uint32_t sc_icr;		/* prototype interrupt bits */
    521 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    522 	uint32_t sc_tctl;		/* prototype TCTL register */
    523 	uint32_t sc_rctl;		/* prototype RCTL register */
    524 	uint32_t sc_txcw;		/* prototype TXCW register */
    525 	uint32_t sc_tipg;		/* prototype TIPG register */
    526 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    527 	uint32_t sc_pba;		/* prototype PBA register */
    528 
    529 	int sc_tbi_linkup;		/* TBI link status */
    530 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    531 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    532 
    533 	int sc_mchash_type;		/* multicast filter offset */
    534 
    535 	krndsource_t rnd_source;	/* random source */
    536 
    537 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    538 
    539 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    540 	kmutex_t *sc_ich_phymtx;	/*
    541 					 * 82574/82583/ICH/PCH specific PHY
    542 					 * mutex. For 82574/82583, the mutex
    543 					 * is used for both PHY and NVM.
    544 					 */
    545 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    546 
    547 	struct wm_phyop phy;
    548 };
    549 
    550 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    551 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    552 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    553 
    554 #define	WM_RXCHAIN_RESET(rxq)						\
    555 do {									\
    556 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    557 	*(rxq)->rxq_tailp = NULL;					\
    558 	(rxq)->rxq_len = 0;						\
    559 } while (/*CONSTCOND*/0)
    560 
    561 #define	WM_RXCHAIN_LINK(rxq, m)						\
    562 do {									\
    563 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    564 	(rxq)->rxq_tailp = &(m)->m_next;				\
    565 } while (/*CONSTCOND*/0)
    566 
    567 #ifdef WM_EVENT_COUNTERS
    568 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    569 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    570 
    571 #define WM_Q_EVCNT_INCR(qname, evname)			\
    572 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    573 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    574 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    575 #else /* !WM_EVENT_COUNTERS */
    576 #define	WM_EVCNT_INCR(ev)	/* nothing */
    577 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    578 
    579 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    580 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    581 #endif /* !WM_EVENT_COUNTERS */
    582 
    583 #define	CSR_READ(sc, reg)						\
    584 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    585 #define	CSR_WRITE(sc, reg, val)						\
    586 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    587 #define	CSR_WRITE_FLUSH(sc)						\
    588 	(void) CSR_READ((sc), WMREG_STATUS)
    589 
    590 #define ICH8_FLASH_READ32(sc, reg)					\
    591 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    592 	    (reg) + sc->sc_flashreg_offset)
    593 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    594 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    595 	    (reg) + sc->sc_flashreg_offset, (data))
    596 
    597 #define ICH8_FLASH_READ16(sc, reg)					\
    598 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    599 	    (reg) + sc->sc_flashreg_offset)
    600 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    601 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    602 	    (reg) + sc->sc_flashreg_offset, (data))
    603 
    604 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    605 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    606 
    607 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    608 #define	WM_CDTXADDR_HI(txq, x)						\
    609 	(sizeof(bus_addr_t) == 8 ?					\
    610 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    611 
    612 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    613 #define	WM_CDRXADDR_HI(rxq, x)						\
    614 	(sizeof(bus_addr_t) == 8 ?					\
    615 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    616 
    617 /*
    618  * Register read/write functions.
    619  * Other than CSR_{READ|WRITE}().
    620  */
    621 #if 0
    622 static inline uint32_t wm_io_read(struct wm_softc *, int);
    623 #endif
    624 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    625 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    626 	uint32_t, uint32_t);
    627 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    628 
    629 /*
    630  * Descriptor sync/init functions.
    631  */
    632 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    633 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    634 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    635 
    636 /*
    637  * Device driver interface functions and commonly used functions.
    638  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    639  */
    640 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    641 static int	wm_match(device_t, cfdata_t, void *);
    642 static void	wm_attach(device_t, device_t, void *);
    643 static int	wm_detach(device_t, int);
    644 static bool	wm_suspend(device_t, const pmf_qual_t *);
    645 static bool	wm_resume(device_t, const pmf_qual_t *);
    646 static void	wm_watchdog(struct ifnet *);
    647 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    648 static void	wm_tick(void *);
    649 static int	wm_ifflags_cb(struct ethercom *);
    650 static int	wm_ioctl(struct ifnet *, u_long, void *);
    651 /* MAC address related */
    652 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    653 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    654 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    655 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    656 static void	wm_set_filter(struct wm_softc *);
    657 /* Reset and init related */
    658 static void	wm_set_vlan(struct wm_softc *);
    659 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    660 static void	wm_get_auto_rd_done(struct wm_softc *);
    661 static void	wm_lan_init_done(struct wm_softc *);
    662 static void	wm_get_cfg_done(struct wm_softc *);
    663 static void	wm_initialize_hardware_bits(struct wm_softc *);
    664 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    665 static void	wm_reset_phy(struct wm_softc *);
    666 static void	wm_flush_desc_rings(struct wm_softc *);
    667 static void	wm_reset(struct wm_softc *);
    668 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    669 static void	wm_rxdrain(struct wm_rxqueue *);
    670 static void	wm_rss_getkey(uint8_t *);
    671 static void	wm_init_rss(struct wm_softc *);
    672 static void	wm_adjust_qnum(struct wm_softc *, int);
    673 static int	wm_setup_legacy(struct wm_softc *);
    674 static int	wm_setup_msix(struct wm_softc *);
    675 static int	wm_init(struct ifnet *);
    676 static int	wm_init_locked(struct ifnet *);
    677 static void	wm_turnon(struct wm_softc *);
    678 static void	wm_turnoff(struct wm_softc *);
    679 static void	wm_stop(struct ifnet *, int);
    680 static void	wm_stop_locked(struct ifnet *, int);
    681 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    682 static void	wm_82547_txfifo_stall(void *);
    683 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    684 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    685 /* DMA related */
    686 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    687 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    688 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    689 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    690     struct wm_txqueue *);
    691 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    692 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    693 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    694     struct wm_rxqueue *);
    695 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    696 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    697 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    698 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    699 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    700 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    701 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    702     struct wm_txqueue *);
    703 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    704     struct wm_rxqueue *);
    705 static int	wm_alloc_txrx_queues(struct wm_softc *);
    706 static void	wm_free_txrx_queues(struct wm_softc *);
    707 static int	wm_init_txrx_queues(struct wm_softc *);
    708 /* Start */
    709 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    710     uint32_t *, uint8_t *);
    711 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    712 static void	wm_start(struct ifnet *);
    713 static void	wm_start_locked(struct ifnet *);
    714 static int	wm_transmit(struct ifnet *, struct mbuf *);
    715 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    716 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    717 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    718     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    719 static void	wm_nq_start(struct ifnet *);
    720 static void	wm_nq_start_locked(struct ifnet *);
    721 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    722 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    723 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    724 static void	wm_deferred_start_locked(struct wm_txqueue *);
    725 static void	wm_handle_queue(void *);
    726 /* Interrupt */
    727 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    728 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    729 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    730 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    731 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    732 static void	wm_linkintr(struct wm_softc *, uint32_t);
    733 static int	wm_intr_legacy(void *);
    734 static inline void	wm_txrxintr_disable(struct wm_queue *);
    735 static inline void	wm_txrxintr_enable(struct wm_queue *);
    736 static int	wm_txrxintr_msix(void *);
    737 static int	wm_linkintr_msix(void *);
    738 
    739 /*
    740  * Media related.
    741  * GMII, SGMII, TBI, SERDES and SFP.
    742  */
    743 /* Common */
    744 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    745 /* GMII related */
    746 static void	wm_gmii_reset(struct wm_softc *);
    747 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    748 static int	wm_get_phy_id_82575(struct wm_softc *);
    749 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    750 static int	wm_gmii_mediachange(struct ifnet *);
    751 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    752 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    753 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    754 static int	wm_gmii_i82543_readreg(device_t, int, int);
    755 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    756 static int	wm_gmii_mdic_readreg(device_t, int, int);
    757 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    758 static int	wm_gmii_i82544_readreg(device_t, int, int);
    759 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    760 static int	wm_gmii_i80003_readreg(device_t, int, int);
    761 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    762 static int	wm_gmii_bm_readreg(device_t, int, int);
    763 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    764 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    765 static int	wm_gmii_hv_readreg(device_t, int, int);
    766 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    767 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    768 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    769 static int	wm_gmii_82580_readreg(device_t, int, int);
    770 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    771 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    772 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    773 static void	wm_gmii_statchg(struct ifnet *);
    774 /*
    775  * kumeran related (80003, ICH* and PCH*).
    776  * These functions are not for accessing MII registers but for accessing
    777  * kumeran specific registers.
    778  */
    779 static int	wm_kmrn_readreg(struct wm_softc *, int);
    780 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    781 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    782 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    783 /* SGMII */
    784 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    785 static int	wm_sgmii_readreg(device_t, int, int);
    786 static void	wm_sgmii_writereg(device_t, int, int, int);
    787 /* TBI related */
    788 static void	wm_tbi_mediainit(struct wm_softc *);
    789 static int	wm_tbi_mediachange(struct ifnet *);
    790 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    791 static int	wm_check_for_link(struct wm_softc *);
    792 static void	wm_tbi_tick(struct wm_softc *);
    793 /* SERDES related */
    794 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    795 static int	wm_serdes_mediachange(struct ifnet *);
    796 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    797 static void	wm_serdes_tick(struct wm_softc *);
    798 /* SFP related */
    799 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    800 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    801 
    802 /*
    803  * NVM related.
    804  * Microwire, SPI (w/wo EERD) and Flash.
    805  */
    806 /* Misc functions */
    807 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    808 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    809 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    810 /* Microwire */
    811 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    812 /* SPI */
    813 static int	wm_nvm_ready_spi(struct wm_softc *);
    814 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    815 /* Using with EERD */
    816 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    817 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    818 /* Flash */
    819 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    820     unsigned int *);
    821 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    822 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    823 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    824 	uint32_t *);
    825 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    826 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    827 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    828 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    829 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    830 /* iNVM */
    831 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    832 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    833 /* Lock, detecting NVM type, validate checksum and read */
    834 static int	wm_nvm_acquire(struct wm_softc *);
    835 static void	wm_nvm_release(struct wm_softc *);
    836 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    837 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    838 static int	wm_nvm_validate_checksum(struct wm_softc *);
    839 static void	wm_nvm_version_invm(struct wm_softc *);
    840 static void	wm_nvm_version(struct wm_softc *);
    841 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    842 
    843 /*
    844  * Hardware semaphores.
    845  * Very complexed...
    846  */
    847 static int	wm_get_null(struct wm_softc *);
    848 static void	wm_put_null(struct wm_softc *);
    849 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    850 static void	wm_put_swsm_semaphore(struct wm_softc *);
    851 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    852 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    853 static int	wm_get_phy_82575(struct wm_softc *);
    854 static void	wm_put_phy_82575(struct wm_softc *);
    855 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    856 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    857 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    858 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    859 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    860 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    861 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    862 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    863 
    864 /*
    865  * Management mode and power management related subroutines.
    866  * BMC, AMT, suspend/resume and EEE.
    867  */
    868 #if 0
    869 static int	wm_check_mng_mode(struct wm_softc *);
    870 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    871 static int	wm_check_mng_mode_82574(struct wm_softc *);
    872 static int	wm_check_mng_mode_generic(struct wm_softc *);
    873 #endif
    874 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    875 static bool	wm_phy_resetisblocked(struct wm_softc *);
    876 static void	wm_get_hw_control(struct wm_softc *);
    877 static void	wm_release_hw_control(struct wm_softc *);
    878 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    879 static void	wm_smbustopci(struct wm_softc *);
    880 static void	wm_init_manageability(struct wm_softc *);
    881 static void	wm_release_manageability(struct wm_softc *);
    882 static void	wm_get_wakeup(struct wm_softc *);
    883 static void	wm_ulp_disable(struct wm_softc *);
    884 static void	wm_enable_phy_wakeup(struct wm_softc *);
    885 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    886 static void	wm_enable_wakeup(struct wm_softc *);
    887 /* LPLU (Low Power Link Up) */
    888 static void	wm_lplu_d0_disable(struct wm_softc *);
    889 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    890 /* EEE */
    891 static void	wm_set_eee_i350(struct wm_softc *);
    892 
    893 /*
    894  * Workarounds (mainly PHY related).
    895  * Basically, PHY's workarounds are in the PHY drivers.
    896  */
    897 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    898 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    899 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    900 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    901 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    902 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    903 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    904 static void	wm_reset_init_script_82575(struct wm_softc *);
    905 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    906 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    907 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    908 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    909 static void	wm_pll_workaround_i210(struct wm_softc *);
    910 
    911 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    912     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    913 
    914 /*
    915  * Devices supported by this driver.
    916  */
    917 static const struct wm_product {
    918 	pci_vendor_id_t		wmp_vendor;
    919 	pci_product_id_t	wmp_product;
    920 	const char		*wmp_name;
    921 	wm_chip_type		wmp_type;
    922 	uint32_t		wmp_flags;
    923 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    924 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    925 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    926 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    927 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    928 } wm_products[] = {
    929 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    930 	  "Intel i82542 1000BASE-X Ethernet",
    931 	  WM_T_82542_2_1,	WMP_F_FIBER },
    932 
    933 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    934 	  "Intel i82543GC 1000BASE-X Ethernet",
    935 	  WM_T_82543,		WMP_F_FIBER },
    936 
    937 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    938 	  "Intel i82543GC 1000BASE-T Ethernet",
    939 	  WM_T_82543,		WMP_F_COPPER },
    940 
    941 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    942 	  "Intel i82544EI 1000BASE-T Ethernet",
    943 	  WM_T_82544,		WMP_F_COPPER },
    944 
    945 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    946 	  "Intel i82544EI 1000BASE-X Ethernet",
    947 	  WM_T_82544,		WMP_F_FIBER },
    948 
    949 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    950 	  "Intel i82544GC 1000BASE-T Ethernet",
    951 	  WM_T_82544,		WMP_F_COPPER },
    952 
    953 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    954 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    955 	  WM_T_82544,		WMP_F_COPPER },
    956 
    957 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    958 	  "Intel i82540EM 1000BASE-T Ethernet",
    959 	  WM_T_82540,		WMP_F_COPPER },
    960 
    961 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    962 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    963 	  WM_T_82540,		WMP_F_COPPER },
    964 
    965 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    966 	  "Intel i82540EP 1000BASE-T Ethernet",
    967 	  WM_T_82540,		WMP_F_COPPER },
    968 
    969 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    970 	  "Intel i82540EP 1000BASE-T Ethernet",
    971 	  WM_T_82540,		WMP_F_COPPER },
    972 
    973 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    974 	  "Intel i82540EP 1000BASE-T Ethernet",
    975 	  WM_T_82540,		WMP_F_COPPER },
    976 
    977 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    978 	  "Intel i82545EM 1000BASE-T Ethernet",
    979 	  WM_T_82545,		WMP_F_COPPER },
    980 
    981 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    982 	  "Intel i82545GM 1000BASE-T Ethernet",
    983 	  WM_T_82545_3,		WMP_F_COPPER },
    984 
    985 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    986 	  "Intel i82545GM 1000BASE-X Ethernet",
    987 	  WM_T_82545_3,		WMP_F_FIBER },
    988 
    989 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    990 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    991 	  WM_T_82545_3,		WMP_F_SERDES },
    992 
    993 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    994 	  "Intel i82546EB 1000BASE-T Ethernet",
    995 	  WM_T_82546,		WMP_F_COPPER },
    996 
    997 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    998 	  "Intel i82546EB 1000BASE-T Ethernet",
    999 	  WM_T_82546,		WMP_F_COPPER },
   1000 
   1001 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1002 	  "Intel i82545EM 1000BASE-X Ethernet",
   1003 	  WM_T_82545,		WMP_F_FIBER },
   1004 
   1005 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1006 	  "Intel i82546EB 1000BASE-X Ethernet",
   1007 	  WM_T_82546,		WMP_F_FIBER },
   1008 
   1009 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1010 	  "Intel i82546GB 1000BASE-T Ethernet",
   1011 	  WM_T_82546_3,		WMP_F_COPPER },
   1012 
   1013 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1014 	  "Intel i82546GB 1000BASE-X Ethernet",
   1015 	  WM_T_82546_3,		WMP_F_FIBER },
   1016 
   1017 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1018 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1019 	  WM_T_82546_3,		WMP_F_SERDES },
   1020 
   1021 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1022 	  "i82546GB quad-port Gigabit Ethernet",
   1023 	  WM_T_82546_3,		WMP_F_COPPER },
   1024 
   1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1026 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1027 	  WM_T_82546_3,		WMP_F_COPPER },
   1028 
   1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1030 	  "Intel PRO/1000MT (82546GB)",
   1031 	  WM_T_82546_3,		WMP_F_COPPER },
   1032 
   1033 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1034 	  "Intel i82541EI 1000BASE-T Ethernet",
   1035 	  WM_T_82541,		WMP_F_COPPER },
   1036 
   1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1038 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1039 	  WM_T_82541,		WMP_F_COPPER },
   1040 
   1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1042 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1043 	  WM_T_82541,		WMP_F_COPPER },
   1044 
   1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1046 	  "Intel i82541ER 1000BASE-T Ethernet",
   1047 	  WM_T_82541_2,		WMP_F_COPPER },
   1048 
   1049 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1050 	  "Intel i82541GI 1000BASE-T Ethernet",
   1051 	  WM_T_82541_2,		WMP_F_COPPER },
   1052 
   1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1054 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1055 	  WM_T_82541_2,		WMP_F_COPPER },
   1056 
   1057 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1058 	  "Intel i82541PI 1000BASE-T Ethernet",
   1059 	  WM_T_82541_2,		WMP_F_COPPER },
   1060 
   1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1062 	  "Intel i82547EI 1000BASE-T Ethernet",
   1063 	  WM_T_82547,		WMP_F_COPPER },
   1064 
   1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1066 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1067 	  WM_T_82547,		WMP_F_COPPER },
   1068 
   1069 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1070 	  "Intel i82547GI 1000BASE-T Ethernet",
   1071 	  WM_T_82547_2,		WMP_F_COPPER },
   1072 
   1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1074 	  "Intel PRO/1000 PT (82571EB)",
   1075 	  WM_T_82571,		WMP_F_COPPER },
   1076 
   1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1078 	  "Intel PRO/1000 PF (82571EB)",
   1079 	  WM_T_82571,		WMP_F_FIBER },
   1080 
   1081 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1082 	  "Intel PRO/1000 PB (82571EB)",
   1083 	  WM_T_82571,		WMP_F_SERDES },
   1084 
   1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1086 	  "Intel PRO/1000 QT (82571EB)",
   1087 	  WM_T_82571,		WMP_F_COPPER },
   1088 
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1090 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1091 	  WM_T_82571,		WMP_F_COPPER, },
   1092 
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1094 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1095 	  WM_T_82571,		WMP_F_COPPER, },
   1096 
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1098 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1099 	  WM_T_82571,		WMP_F_SERDES, },
   1100 
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1102 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1103 	  WM_T_82571,		WMP_F_SERDES, },
   1104 
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1106 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1107 	  WM_T_82571,		WMP_F_FIBER, },
   1108 
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1110 	  "Intel i82572EI 1000baseT Ethernet",
   1111 	  WM_T_82572,		WMP_F_COPPER },
   1112 
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1114 	  "Intel i82572EI 1000baseX Ethernet",
   1115 	  WM_T_82572,		WMP_F_FIBER },
   1116 
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1118 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1119 	  WM_T_82572,		WMP_F_SERDES },
   1120 
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1122 	  "Intel i82572EI 1000baseT Ethernet",
   1123 	  WM_T_82572,		WMP_F_COPPER },
   1124 
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1126 	  "Intel i82573E",
   1127 	  WM_T_82573,		WMP_F_COPPER },
   1128 
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1130 	  "Intel i82573E IAMT",
   1131 	  WM_T_82573,		WMP_F_COPPER },
   1132 
   1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1134 	  "Intel i82573L Gigabit Ethernet",
   1135 	  WM_T_82573,		WMP_F_COPPER },
   1136 
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1138 	  "Intel i82574L",
   1139 	  WM_T_82574,		WMP_F_COPPER },
   1140 
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1142 	  "Intel i82574L",
   1143 	  WM_T_82574,		WMP_F_COPPER },
   1144 
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1146 	  "Intel i82583V",
   1147 	  WM_T_82583,		WMP_F_COPPER },
   1148 
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1150 	  "i80003 dual 1000baseT Ethernet",
   1151 	  WM_T_80003,		WMP_F_COPPER },
   1152 
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1154 	  "i80003 dual 1000baseX Ethernet",
   1155 	  WM_T_80003,		WMP_F_COPPER },
   1156 
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1158 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1159 	  WM_T_80003,		WMP_F_SERDES },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1162 	  "Intel i80003 1000baseT Ethernet",
   1163 	  WM_T_80003,		WMP_F_COPPER },
   1164 
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1166 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1167 	  WM_T_80003,		WMP_F_SERDES },
   1168 
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1170 	  "Intel i82801H (M_AMT) LAN Controller",
   1171 	  WM_T_ICH8,		WMP_F_COPPER },
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1173 	  "Intel i82801H (AMT) LAN Controller",
   1174 	  WM_T_ICH8,		WMP_F_COPPER },
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1176 	  "Intel i82801H LAN Controller",
   1177 	  WM_T_ICH8,		WMP_F_COPPER },
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1179 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1180 	  WM_T_ICH8,		WMP_F_COPPER },
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1182 	  "Intel i82801H (M) LAN Controller",
   1183 	  WM_T_ICH8,		WMP_F_COPPER },
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1185 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1186 	  WM_T_ICH8,		WMP_F_COPPER },
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1188 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1189 	  WM_T_ICH8,		WMP_F_COPPER },
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1191 	  "82567V-3 LAN Controller",
   1192 	  WM_T_ICH8,		WMP_F_COPPER },
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1194 	  "82801I (AMT) LAN Controller",
   1195 	  WM_T_ICH9,		WMP_F_COPPER },
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1197 	  "82801I 10/100 LAN Controller",
   1198 	  WM_T_ICH9,		WMP_F_COPPER },
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1200 	  "82801I (G) 10/100 LAN Controller",
   1201 	  WM_T_ICH9,		WMP_F_COPPER },
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1203 	  "82801I (GT) 10/100 LAN Controller",
   1204 	  WM_T_ICH9,		WMP_F_COPPER },
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1206 	  "82801I (C) LAN Controller",
   1207 	  WM_T_ICH9,		WMP_F_COPPER },
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1209 	  "82801I mobile LAN Controller",
   1210 	  WM_T_ICH9,		WMP_F_COPPER },
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1212 	  "82801I mobile (V) LAN Controller",
   1213 	  WM_T_ICH9,		WMP_F_COPPER },
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1215 	  "82801I mobile (AMT) LAN Controller",
   1216 	  WM_T_ICH9,		WMP_F_COPPER },
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1218 	  "82567LM-4 LAN Controller",
   1219 	  WM_T_ICH9,		WMP_F_COPPER },
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1221 	  "82567LM-2 LAN Controller",
   1222 	  WM_T_ICH10,		WMP_F_COPPER },
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1224 	  "82567LF-2 LAN Controller",
   1225 	  WM_T_ICH10,		WMP_F_COPPER },
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1227 	  "82567LM-3 LAN Controller",
   1228 	  WM_T_ICH10,		WMP_F_COPPER },
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1230 	  "82567LF-3 LAN Controller",
   1231 	  WM_T_ICH10,		WMP_F_COPPER },
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1233 	  "82567V-2 LAN Controller",
   1234 	  WM_T_ICH10,		WMP_F_COPPER },
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1236 	  "82567V-3? LAN Controller",
   1237 	  WM_T_ICH10,		WMP_F_COPPER },
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1239 	  "HANKSVILLE LAN Controller",
   1240 	  WM_T_ICH10,		WMP_F_COPPER },
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1242 	  "PCH LAN (82577LM) Controller",
   1243 	  WM_T_PCH,		WMP_F_COPPER },
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1245 	  "PCH LAN (82577LC) Controller",
   1246 	  WM_T_PCH,		WMP_F_COPPER },
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1248 	  "PCH LAN (82578DM) Controller",
   1249 	  WM_T_PCH,		WMP_F_COPPER },
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1251 	  "PCH LAN (82578DC) Controller",
   1252 	  WM_T_PCH,		WMP_F_COPPER },
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1254 	  "PCH2 LAN (82579LM) Controller",
   1255 	  WM_T_PCH2,		WMP_F_COPPER },
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1257 	  "PCH2 LAN (82579V) Controller",
   1258 	  WM_T_PCH2,		WMP_F_COPPER },
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1260 	  "82575EB dual-1000baseT Ethernet",
   1261 	  WM_T_82575,		WMP_F_COPPER },
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1263 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1264 	  WM_T_82575,		WMP_F_SERDES },
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1266 	  "82575GB quad-1000baseT Ethernet",
   1267 	  WM_T_82575,		WMP_F_COPPER },
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1269 	  "82575GB quad-1000baseT Ethernet (PM)",
   1270 	  WM_T_82575,		WMP_F_COPPER },
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1272 	  "82576 1000BaseT Ethernet",
   1273 	  WM_T_82576,		WMP_F_COPPER },
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1275 	  "82576 1000BaseX Ethernet",
   1276 	  WM_T_82576,		WMP_F_FIBER },
   1277 
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1279 	  "82576 gigabit Ethernet (SERDES)",
   1280 	  WM_T_82576,		WMP_F_SERDES },
   1281 
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1283 	  "82576 quad-1000BaseT Ethernet",
   1284 	  WM_T_82576,		WMP_F_COPPER },
   1285 
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1287 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1288 	  WM_T_82576,		WMP_F_COPPER },
   1289 
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1291 	  "82576 gigabit Ethernet",
   1292 	  WM_T_82576,		WMP_F_COPPER },
   1293 
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1295 	  "82576 gigabit Ethernet (SERDES)",
   1296 	  WM_T_82576,		WMP_F_SERDES },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1298 	  "82576 quad-gigabit Ethernet (SERDES)",
   1299 	  WM_T_82576,		WMP_F_SERDES },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1302 	  "82580 1000BaseT Ethernet",
   1303 	  WM_T_82580,		WMP_F_COPPER },
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1305 	  "82580 1000BaseX Ethernet",
   1306 	  WM_T_82580,		WMP_F_FIBER },
   1307 
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1309 	  "82580 1000BaseT Ethernet (SERDES)",
   1310 	  WM_T_82580,		WMP_F_SERDES },
   1311 
   1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1313 	  "82580 gigabit Ethernet (SGMII)",
   1314 	  WM_T_82580,		WMP_F_COPPER },
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1316 	  "82580 dual-1000BaseT Ethernet",
   1317 	  WM_T_82580,		WMP_F_COPPER },
   1318 
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1320 	  "82580 quad-1000BaseX Ethernet",
   1321 	  WM_T_82580,		WMP_F_FIBER },
   1322 
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1324 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1325 	  WM_T_82580,		WMP_F_COPPER },
   1326 
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1328 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1329 	  WM_T_82580,		WMP_F_SERDES },
   1330 
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1332 	  "DH89XXCC 1000BASE-KX Ethernet",
   1333 	  WM_T_82580,		WMP_F_SERDES },
   1334 
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1336 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1337 	  WM_T_82580,		WMP_F_SERDES },
   1338 
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1340 	  "I350 Gigabit Network Connection",
   1341 	  WM_T_I350,		WMP_F_COPPER },
   1342 
   1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1344 	  "I350 Gigabit Fiber Network Connection",
   1345 	  WM_T_I350,		WMP_F_FIBER },
   1346 
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1348 	  "I350 Gigabit Backplane Connection",
   1349 	  WM_T_I350,		WMP_F_SERDES },
   1350 
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1352 	  "I350 Quad Port Gigabit Ethernet",
   1353 	  WM_T_I350,		WMP_F_SERDES },
   1354 
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1356 	  "I350 Gigabit Connection",
   1357 	  WM_T_I350,		WMP_F_COPPER },
   1358 
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1360 	  "I354 Gigabit Ethernet (KX)",
   1361 	  WM_T_I354,		WMP_F_SERDES },
   1362 
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1364 	  "I354 Gigabit Ethernet (SGMII)",
   1365 	  WM_T_I354,		WMP_F_COPPER },
   1366 
   1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1368 	  "I354 Gigabit Ethernet (2.5G)",
   1369 	  WM_T_I354,		WMP_F_COPPER },
   1370 
   1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1372 	  "I210-T1 Ethernet Server Adapter",
   1373 	  WM_T_I210,		WMP_F_COPPER },
   1374 
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1376 	  "I210 Ethernet (Copper OEM)",
   1377 	  WM_T_I210,		WMP_F_COPPER },
   1378 
   1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1380 	  "I210 Ethernet (Copper IT)",
   1381 	  WM_T_I210,		WMP_F_COPPER },
   1382 
   1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1384 	  "I210 Ethernet (FLASH less)",
   1385 	  WM_T_I210,		WMP_F_COPPER },
   1386 
   1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1388 	  "I210 Gigabit Ethernet (Fiber)",
   1389 	  WM_T_I210,		WMP_F_FIBER },
   1390 
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1392 	  "I210 Gigabit Ethernet (SERDES)",
   1393 	  WM_T_I210,		WMP_F_SERDES },
   1394 
   1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1396 	  "I210 Gigabit Ethernet (FLASH less)",
   1397 	  WM_T_I210,		WMP_F_SERDES },
   1398 
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1400 	  "I210 Gigabit Ethernet (SGMII)",
   1401 	  WM_T_I210,		WMP_F_COPPER },
   1402 
   1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1404 	  "I211 Ethernet (COPPER)",
   1405 	  WM_T_I211,		WMP_F_COPPER },
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1407 	  "I217 V Ethernet Connection",
   1408 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1410 	  "I217 LM Ethernet Connection",
   1411 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1413 	  "I218 V Ethernet Connection",
   1414 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1416 	  "I218 V Ethernet Connection",
   1417 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1419 	  "I218 V Ethernet Connection",
   1420 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1422 	  "I218 LM Ethernet Connection",
   1423 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1425 	  "I218 LM Ethernet Connection",
   1426 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1427 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1428 	  "I218 LM Ethernet Connection",
   1429 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1430 #if 0
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1432 	  "I219 V Ethernet Connection",
   1433 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1435 	  "I219 V Ethernet Connection",
   1436 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1438 	  "I219 V Ethernet Connection",
   1439 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1441 	  "I219 V Ethernet Connection",
   1442 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1444 	  "I219 LM Ethernet Connection",
   1445 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1447 	  "I219 LM Ethernet Connection",
   1448 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1450 	  "I219 LM Ethernet Connection",
   1451 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1453 	  "I219 LM Ethernet Connection",
   1454 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1456 	  "I219 LM Ethernet Connection",
   1457 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1458 #endif
   1459 	{ 0,			0,
   1460 	  NULL,
   1461 	  0,			0 },
   1462 };
   1463 
   1464 /*
   1465  * Register read/write functions.
   1466  * Other than CSR_{READ|WRITE}().
   1467  */
   1468 
   1469 #if 0 /* Not currently used */
   1470 static inline uint32_t
   1471 wm_io_read(struct wm_softc *sc, int reg)
   1472 {
   1473 
   1474 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1475 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1476 }
   1477 #endif
   1478 
   1479 static inline void
   1480 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1481 {
   1482 
   1483 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1484 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1485 }
   1486 
   1487 static inline void
   1488 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1489     uint32_t data)
   1490 {
   1491 	uint32_t regval;
   1492 	int i;
   1493 
   1494 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1495 
   1496 	CSR_WRITE(sc, reg, regval);
   1497 
   1498 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1499 		delay(5);
   1500 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1501 			break;
   1502 	}
   1503 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1504 		aprint_error("%s: WARNING:"
   1505 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1506 		    device_xname(sc->sc_dev), reg);
   1507 	}
   1508 }
   1509 
   1510 static inline void
   1511 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1512 {
   1513 	wa->wa_low = htole32(v & 0xffffffffU);
   1514 	if (sizeof(bus_addr_t) == 8)
   1515 		wa->wa_high = htole32((uint64_t) v >> 32);
   1516 	else
   1517 		wa->wa_high = 0;
   1518 }
   1519 
   1520 /*
   1521  * Descriptor sync/init functions.
   1522  */
   1523 static inline void
   1524 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1525 {
   1526 	struct wm_softc *sc = txq->txq_sc;
   1527 
   1528 	/* If it will wrap around, sync to the end of the ring. */
   1529 	if ((start + num) > WM_NTXDESC(txq)) {
   1530 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1531 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1532 		    (WM_NTXDESC(txq) - start), ops);
   1533 		num -= (WM_NTXDESC(txq) - start);
   1534 		start = 0;
   1535 	}
   1536 
   1537 	/* Now sync whatever is left. */
   1538 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1539 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1540 }
   1541 
   1542 static inline void
   1543 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1544 {
   1545 	struct wm_softc *sc = rxq->rxq_sc;
   1546 
   1547 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1548 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1549 }
   1550 
   1551 static inline void
   1552 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1553 {
   1554 	struct wm_softc *sc = rxq->rxq_sc;
   1555 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1556 	struct mbuf *m = rxs->rxs_mbuf;
   1557 
   1558 	/*
   1559 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1560 	 * so that the payload after the Ethernet header is aligned
   1561 	 * to a 4-byte boundary.
   1562 
   1563 	 * XXX BRAINDAMAGE ALERT!
   1564 	 * The stupid chip uses the same size for every buffer, which
   1565 	 * is set in the Receive Control register.  We are using the 2K
   1566 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1567 	 * reason, we can't "scoot" packets longer than the standard
   1568 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1569 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1570 	 * the upper layer copy the headers.
   1571 	 */
   1572 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1573 
   1574 	if (sc->sc_type == WM_T_82574) {
   1575 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1576 		rxd->erx_data.erxd_addr =
   1577 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1578 		rxd->erx_data.erxd_dd = 0;
   1579 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1580 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1581 
   1582 		rxd->nqrx_data.nrxd_paddr =
   1583 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1584 		/* Currently, split header is not supported. */
   1585 		rxd->nqrx_data.nrxd_haddr = 0;
   1586 	} else {
   1587 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1588 
   1589 		wm_set_dma_addr(&rxd->wrx_addr,
   1590 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1591 		rxd->wrx_len = 0;
   1592 		rxd->wrx_cksum = 0;
   1593 		rxd->wrx_status = 0;
   1594 		rxd->wrx_errors = 0;
   1595 		rxd->wrx_special = 0;
   1596 	}
   1597 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1598 
   1599 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1600 }
   1601 
   1602 /*
   1603  * Device driver interface functions and commonly used functions.
   1604  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1605  */
   1606 
   1607 /* Lookup supported device table */
   1608 static const struct wm_product *
   1609 wm_lookup(const struct pci_attach_args *pa)
   1610 {
   1611 	const struct wm_product *wmp;
   1612 
   1613 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1614 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1615 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1616 			return wmp;
   1617 	}
   1618 	return NULL;
   1619 }
   1620 
   1621 /* The match function (ca_match) */
   1622 static int
   1623 wm_match(device_t parent, cfdata_t cf, void *aux)
   1624 {
   1625 	struct pci_attach_args *pa = aux;
   1626 
   1627 	if (wm_lookup(pa) != NULL)
   1628 		return 1;
   1629 
   1630 	return 0;
   1631 }
   1632 
   1633 /* The attach function (ca_attach) */
   1634 static void
   1635 wm_attach(device_t parent, device_t self, void *aux)
   1636 {
   1637 	struct wm_softc *sc = device_private(self);
   1638 	struct pci_attach_args *pa = aux;
   1639 	prop_dictionary_t dict;
   1640 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1641 	pci_chipset_tag_t pc = pa->pa_pc;
   1642 	int counts[PCI_INTR_TYPE_SIZE];
   1643 	pci_intr_type_t max_type;
   1644 	const char *eetype, *xname;
   1645 	bus_space_tag_t memt;
   1646 	bus_space_handle_t memh;
   1647 	bus_size_t memsize;
   1648 	int memh_valid;
   1649 	int i, error;
   1650 	const struct wm_product *wmp;
   1651 	prop_data_t ea;
   1652 	prop_number_t pn;
   1653 	uint8_t enaddr[ETHER_ADDR_LEN];
   1654 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1655 	pcireg_t preg, memtype;
   1656 	uint16_t eeprom_data, apme_mask;
   1657 	bool force_clear_smbi;
   1658 	uint32_t link_mode;
   1659 	uint32_t reg;
   1660 
   1661 	sc->sc_dev = self;
   1662 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1663 	sc->sc_core_stopping = false;
   1664 
   1665 	wmp = wm_lookup(pa);
   1666 #ifdef DIAGNOSTIC
   1667 	if (wmp == NULL) {
   1668 		printf("\n");
   1669 		panic("wm_attach: impossible");
   1670 	}
   1671 #endif
   1672 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1673 
   1674 	sc->sc_pc = pa->pa_pc;
   1675 	sc->sc_pcitag = pa->pa_tag;
   1676 
   1677 	if (pci_dma64_available(pa))
   1678 		sc->sc_dmat = pa->pa_dmat64;
   1679 	else
   1680 		sc->sc_dmat = pa->pa_dmat;
   1681 
   1682 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1683 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1684 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1685 
   1686 	sc->sc_type = wmp->wmp_type;
   1687 
   1688 	/* Set default function pointers */
   1689 	sc->phy.acquire = wm_get_null;
   1690 	sc->phy.release = wm_put_null;
   1691 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1692 
   1693 	if (sc->sc_type < WM_T_82543) {
   1694 		if (sc->sc_rev < 2) {
   1695 			aprint_error_dev(sc->sc_dev,
   1696 			    "i82542 must be at least rev. 2\n");
   1697 			return;
   1698 		}
   1699 		if (sc->sc_rev < 3)
   1700 			sc->sc_type = WM_T_82542_2_0;
   1701 	}
   1702 
   1703 	/*
   1704 	 * Disable MSI for Errata:
   1705 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1706 	 *
   1707 	 *  82544: Errata 25
   1708 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1709 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1710 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1711 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1712 	 *
   1713 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1714 	 *
   1715 	 *  82571 & 82572: Errata 63
   1716 	 */
   1717 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1718 	    || (sc->sc_type == WM_T_82572))
   1719 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1720 
   1721 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1722 	    || (sc->sc_type == WM_T_82580)
   1723 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1724 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1725 		sc->sc_flags |= WM_F_NEWQUEUE;
   1726 
   1727 	/* Set device properties (mactype) */
   1728 	dict = device_properties(sc->sc_dev);
   1729 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1730 
   1731 	/*
   1732 	 * Map the device.  All devices support memory-mapped acccess,
   1733 	 * and it is really required for normal operation.
   1734 	 */
   1735 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1736 	switch (memtype) {
   1737 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1738 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1739 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1740 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1741 		break;
   1742 	default:
   1743 		memh_valid = 0;
   1744 		break;
   1745 	}
   1746 
   1747 	if (memh_valid) {
   1748 		sc->sc_st = memt;
   1749 		sc->sc_sh = memh;
   1750 		sc->sc_ss = memsize;
   1751 	} else {
   1752 		aprint_error_dev(sc->sc_dev,
   1753 		    "unable to map device registers\n");
   1754 		return;
   1755 	}
   1756 
   1757 	/*
   1758 	 * In addition, i82544 and later support I/O mapped indirect
   1759 	 * register access.  It is not desirable (nor supported in
   1760 	 * this driver) to use it for normal operation, though it is
   1761 	 * required to work around bugs in some chip versions.
   1762 	 */
   1763 	if (sc->sc_type >= WM_T_82544) {
   1764 		/* First we have to find the I/O BAR. */
   1765 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1766 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1767 			if (memtype == PCI_MAPREG_TYPE_IO)
   1768 				break;
   1769 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1770 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1771 				i += 4;	/* skip high bits, too */
   1772 		}
   1773 		if (i < PCI_MAPREG_END) {
   1774 			/*
   1775 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1776 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1777 			 * It's no problem because newer chips has no this
   1778 			 * bug.
   1779 			 *
   1780 			 * The i8254x doesn't apparently respond when the
   1781 			 * I/O BAR is 0, which looks somewhat like it's not
   1782 			 * been configured.
   1783 			 */
   1784 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1785 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1786 				aprint_error_dev(sc->sc_dev,
   1787 				    "WARNING: I/O BAR at zero.\n");
   1788 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1789 					0, &sc->sc_iot, &sc->sc_ioh,
   1790 					NULL, &sc->sc_ios) == 0) {
   1791 				sc->sc_flags |= WM_F_IOH_VALID;
   1792 			} else {
   1793 				aprint_error_dev(sc->sc_dev,
   1794 				    "WARNING: unable to map I/O space\n");
   1795 			}
   1796 		}
   1797 
   1798 	}
   1799 
   1800 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1801 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1802 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1803 	if (sc->sc_type < WM_T_82542_2_1)
   1804 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1805 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1806 
   1807 	/* power up chip */
   1808 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1809 	    NULL)) && error != EOPNOTSUPP) {
   1810 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1811 		return;
   1812 	}
   1813 
   1814 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1815 
   1816 	/* Allocation settings */
   1817 	max_type = PCI_INTR_TYPE_MSIX;
   1818 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1819 	counts[PCI_INTR_TYPE_MSI] = 1;
   1820 	counts[PCI_INTR_TYPE_INTX] = 1;
   1821 
   1822 alloc_retry:
   1823 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1824 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1825 		return;
   1826 	}
   1827 
   1828 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1829 		error = wm_setup_msix(sc);
   1830 		if (error) {
   1831 			pci_intr_release(pc, sc->sc_intrs,
   1832 			    counts[PCI_INTR_TYPE_MSIX]);
   1833 
   1834 			/* Setup for MSI: Disable MSI-X */
   1835 			max_type = PCI_INTR_TYPE_MSI;
   1836 			counts[PCI_INTR_TYPE_MSI] = 1;
   1837 			counts[PCI_INTR_TYPE_INTX] = 1;
   1838 			goto alloc_retry;
   1839 		}
   1840 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1841 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1842 		error = wm_setup_legacy(sc);
   1843 		if (error) {
   1844 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1845 			    counts[PCI_INTR_TYPE_MSI]);
   1846 
   1847 			/* The next try is for INTx: Disable MSI */
   1848 			max_type = PCI_INTR_TYPE_INTX;
   1849 			counts[PCI_INTR_TYPE_INTX] = 1;
   1850 			goto alloc_retry;
   1851 		}
   1852 	} else {
   1853 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1854 		error = wm_setup_legacy(sc);
   1855 		if (error) {
   1856 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1857 			    counts[PCI_INTR_TYPE_INTX]);
   1858 			return;
   1859 		}
   1860 	}
   1861 
   1862 	/*
   1863 	 * Check the function ID (unit number of the chip).
   1864 	 */
   1865 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1866 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1867 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1868 	    || (sc->sc_type == WM_T_82580)
   1869 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1870 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1871 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1872 	else
   1873 		sc->sc_funcid = 0;
   1874 
   1875 	/*
   1876 	 * Determine a few things about the bus we're connected to.
   1877 	 */
   1878 	if (sc->sc_type < WM_T_82543) {
   1879 		/* We don't really know the bus characteristics here. */
   1880 		sc->sc_bus_speed = 33;
   1881 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1882 		/*
   1883 		 * CSA (Communication Streaming Architecture) is about as fast
   1884 		 * a 32-bit 66MHz PCI Bus.
   1885 		 */
   1886 		sc->sc_flags |= WM_F_CSA;
   1887 		sc->sc_bus_speed = 66;
   1888 		aprint_verbose_dev(sc->sc_dev,
   1889 		    "Communication Streaming Architecture\n");
   1890 		if (sc->sc_type == WM_T_82547) {
   1891 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1892 			callout_setfunc(&sc->sc_txfifo_ch,
   1893 					wm_82547_txfifo_stall, sc);
   1894 			aprint_verbose_dev(sc->sc_dev,
   1895 			    "using 82547 Tx FIFO stall work-around\n");
   1896 		}
   1897 	} else if (sc->sc_type >= WM_T_82571) {
   1898 		sc->sc_flags |= WM_F_PCIE;
   1899 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1900 		    && (sc->sc_type != WM_T_ICH10)
   1901 		    && (sc->sc_type != WM_T_PCH)
   1902 		    && (sc->sc_type != WM_T_PCH2)
   1903 		    && (sc->sc_type != WM_T_PCH_LPT)
   1904 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1905 			/* ICH* and PCH* have no PCIe capability registers */
   1906 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1907 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1908 				NULL) == 0)
   1909 				aprint_error_dev(sc->sc_dev,
   1910 				    "unable to find PCIe capability\n");
   1911 		}
   1912 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1913 	} else {
   1914 		reg = CSR_READ(sc, WMREG_STATUS);
   1915 		if (reg & STATUS_BUS64)
   1916 			sc->sc_flags |= WM_F_BUS64;
   1917 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1918 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1919 
   1920 			sc->sc_flags |= WM_F_PCIX;
   1921 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1922 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1923 				aprint_error_dev(sc->sc_dev,
   1924 				    "unable to find PCIX capability\n");
   1925 			else if (sc->sc_type != WM_T_82545_3 &&
   1926 				 sc->sc_type != WM_T_82546_3) {
   1927 				/*
   1928 				 * Work around a problem caused by the BIOS
   1929 				 * setting the max memory read byte count
   1930 				 * incorrectly.
   1931 				 */
   1932 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1933 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1934 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1935 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1936 
   1937 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1938 				    PCIX_CMD_BYTECNT_SHIFT;
   1939 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1940 				    PCIX_STATUS_MAXB_SHIFT;
   1941 				if (bytecnt > maxb) {
   1942 					aprint_verbose_dev(sc->sc_dev,
   1943 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1944 					    512 << bytecnt, 512 << maxb);
   1945 					pcix_cmd = (pcix_cmd &
   1946 					    ~PCIX_CMD_BYTECNT_MASK) |
   1947 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1948 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1949 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1950 					    pcix_cmd);
   1951 				}
   1952 			}
   1953 		}
   1954 		/*
   1955 		 * The quad port adapter is special; it has a PCIX-PCIX
   1956 		 * bridge on the board, and can run the secondary bus at
   1957 		 * a higher speed.
   1958 		 */
   1959 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1960 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1961 								      : 66;
   1962 		} else if (sc->sc_flags & WM_F_PCIX) {
   1963 			switch (reg & STATUS_PCIXSPD_MASK) {
   1964 			case STATUS_PCIXSPD_50_66:
   1965 				sc->sc_bus_speed = 66;
   1966 				break;
   1967 			case STATUS_PCIXSPD_66_100:
   1968 				sc->sc_bus_speed = 100;
   1969 				break;
   1970 			case STATUS_PCIXSPD_100_133:
   1971 				sc->sc_bus_speed = 133;
   1972 				break;
   1973 			default:
   1974 				aprint_error_dev(sc->sc_dev,
   1975 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1976 				    reg & STATUS_PCIXSPD_MASK);
   1977 				sc->sc_bus_speed = 66;
   1978 				break;
   1979 			}
   1980 		} else
   1981 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1982 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1983 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1984 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1985 	}
   1986 
   1987 	/* clear interesting stat counters */
   1988 	CSR_READ(sc, WMREG_COLC);
   1989 	CSR_READ(sc, WMREG_RXERRC);
   1990 
   1991 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   1992 	    || (sc->sc_type >= WM_T_ICH8))
   1993 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1994 	if (sc->sc_type >= WM_T_ICH8)
   1995 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1996 
   1997 	/* Set PHY, NVM mutex related stuff */
   1998 	switch (sc->sc_type) {
   1999 	case WM_T_82542_2_0:
   2000 	case WM_T_82542_2_1:
   2001 	case WM_T_82543:
   2002 	case WM_T_82544:
   2003 		/* Microwire */
   2004 		sc->sc_nvm_wordsize = 64;
   2005 		sc->sc_nvm_addrbits = 6;
   2006 		break;
   2007 	case WM_T_82540:
   2008 	case WM_T_82545:
   2009 	case WM_T_82545_3:
   2010 	case WM_T_82546:
   2011 	case WM_T_82546_3:
   2012 		/* Microwire */
   2013 		reg = CSR_READ(sc, WMREG_EECD);
   2014 		if (reg & EECD_EE_SIZE) {
   2015 			sc->sc_nvm_wordsize = 256;
   2016 			sc->sc_nvm_addrbits = 8;
   2017 		} else {
   2018 			sc->sc_nvm_wordsize = 64;
   2019 			sc->sc_nvm_addrbits = 6;
   2020 		}
   2021 		sc->sc_flags |= WM_F_LOCK_EECD;
   2022 		break;
   2023 	case WM_T_82541:
   2024 	case WM_T_82541_2:
   2025 	case WM_T_82547:
   2026 	case WM_T_82547_2:
   2027 		sc->sc_flags |= WM_F_LOCK_EECD;
   2028 		reg = CSR_READ(sc, WMREG_EECD);
   2029 		if (reg & EECD_EE_TYPE) {
   2030 			/* SPI */
   2031 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2032 			wm_nvm_set_addrbits_size_eecd(sc);
   2033 		} else {
   2034 			/* Microwire */
   2035 			if ((reg & EECD_EE_ABITS) != 0) {
   2036 				sc->sc_nvm_wordsize = 256;
   2037 				sc->sc_nvm_addrbits = 8;
   2038 			} else {
   2039 				sc->sc_nvm_wordsize = 64;
   2040 				sc->sc_nvm_addrbits = 6;
   2041 			}
   2042 		}
   2043 		break;
   2044 	case WM_T_82571:
   2045 	case WM_T_82572:
   2046 		/* SPI */
   2047 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2048 		wm_nvm_set_addrbits_size_eecd(sc);
   2049 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2050 		sc->phy.acquire = wm_get_swsm_semaphore;
   2051 		sc->phy.release = wm_put_swsm_semaphore;
   2052 		break;
   2053 	case WM_T_82573:
   2054 	case WM_T_82574:
   2055 	case WM_T_82583:
   2056 		if (sc->sc_type == WM_T_82573) {
   2057 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2058 			sc->phy.acquire = wm_get_swsm_semaphore;
   2059 			sc->phy.release = wm_put_swsm_semaphore;
   2060 		} else {
   2061 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2062 			/* Both PHY and NVM use the same semaphore. */
   2063 			sc->phy.acquire
   2064 			    = wm_get_swfwhw_semaphore;
   2065 			sc->phy.release
   2066 			    = wm_put_swfwhw_semaphore;
   2067 		}
   2068 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2069 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2070 			sc->sc_nvm_wordsize = 2048;
   2071 		} else {
   2072 			/* SPI */
   2073 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2074 			wm_nvm_set_addrbits_size_eecd(sc);
   2075 		}
   2076 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2077 		break;
   2078 	case WM_T_82575:
   2079 	case WM_T_82576:
   2080 	case WM_T_82580:
   2081 	case WM_T_I350:
   2082 	case WM_T_I354:
   2083 	case WM_T_80003:
   2084 		/* SPI */
   2085 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2086 		wm_nvm_set_addrbits_size_eecd(sc);
   2087 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2088 		    | WM_F_LOCK_SWSM;
   2089 		sc->phy.acquire = wm_get_phy_82575;
   2090 		sc->phy.release = wm_put_phy_82575;
   2091 		break;
   2092 	case WM_T_ICH8:
   2093 	case WM_T_ICH9:
   2094 	case WM_T_ICH10:
   2095 	case WM_T_PCH:
   2096 	case WM_T_PCH2:
   2097 	case WM_T_PCH_LPT:
   2098 		/* FLASH */
   2099 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2100 		sc->sc_nvm_wordsize = 2048;
   2101 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2102 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2103 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2104 			aprint_error_dev(sc->sc_dev,
   2105 			    "can't map FLASH registers\n");
   2106 			goto out;
   2107 		}
   2108 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2109 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2110 		    ICH_FLASH_SECTOR_SIZE;
   2111 		sc->sc_ich8_flash_bank_size =
   2112 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2113 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2114 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2115 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2116 		sc->sc_flashreg_offset = 0;
   2117 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2118 		sc->phy.release = wm_put_swflag_ich8lan;
   2119 		break;
   2120 	case WM_T_PCH_SPT:
   2121 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2122 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2123 		sc->sc_flasht = sc->sc_st;
   2124 		sc->sc_flashh = sc->sc_sh;
   2125 		sc->sc_ich8_flash_base = 0;
   2126 		sc->sc_nvm_wordsize =
   2127 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2128 			* NVM_SIZE_MULTIPLIER;
   2129 		/* It is size in bytes, we want words */
   2130 		sc->sc_nvm_wordsize /= 2;
   2131 		/* assume 2 banks */
   2132 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2133 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2134 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2135 		sc->phy.release = wm_put_swflag_ich8lan;
   2136 		break;
   2137 	case WM_T_I210:
   2138 	case WM_T_I211:
   2139 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2140 			wm_nvm_set_addrbits_size_eecd(sc);
   2141 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2142 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2143 		} else {
   2144 			sc->sc_nvm_wordsize = INVM_SIZE;
   2145 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2146 		}
   2147 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2148 		sc->phy.acquire = wm_get_phy_82575;
   2149 		sc->phy.release = wm_put_phy_82575;
   2150 		break;
   2151 	default:
   2152 		break;
   2153 	}
   2154 
   2155 	/* Reset the chip to a known state. */
   2156 	wm_reset(sc);
   2157 
   2158 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2159 	switch (sc->sc_type) {
   2160 	case WM_T_82571:
   2161 	case WM_T_82572:
   2162 		reg = CSR_READ(sc, WMREG_SWSM2);
   2163 		if ((reg & SWSM2_LOCK) == 0) {
   2164 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2165 			force_clear_smbi = true;
   2166 		} else
   2167 			force_clear_smbi = false;
   2168 		break;
   2169 	case WM_T_82573:
   2170 	case WM_T_82574:
   2171 	case WM_T_82583:
   2172 		force_clear_smbi = true;
   2173 		break;
   2174 	default:
   2175 		force_clear_smbi = false;
   2176 		break;
   2177 	}
   2178 	if (force_clear_smbi) {
   2179 		reg = CSR_READ(sc, WMREG_SWSM);
   2180 		if ((reg & SWSM_SMBI) != 0)
   2181 			aprint_error_dev(sc->sc_dev,
   2182 			    "Please update the Bootagent\n");
   2183 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2184 	}
   2185 
   2186 	/*
   2187 	 * Defer printing the EEPROM type until after verifying the checksum
   2188 	 * This allows the EEPROM type to be printed correctly in the case
   2189 	 * that no EEPROM is attached.
   2190 	 */
   2191 	/*
   2192 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2193 	 * this for later, so we can fail future reads from the EEPROM.
   2194 	 */
   2195 	if (wm_nvm_validate_checksum(sc)) {
   2196 		/*
   2197 		 * Read twice again because some PCI-e parts fail the
   2198 		 * first check due to the link being in sleep state.
   2199 		 */
   2200 		if (wm_nvm_validate_checksum(sc))
   2201 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2202 	}
   2203 
   2204 	/* Set device properties (macflags) */
   2205 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2206 
   2207 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2208 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2209 	else {
   2210 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2211 		    sc->sc_nvm_wordsize);
   2212 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2213 			aprint_verbose("iNVM");
   2214 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2215 			aprint_verbose("FLASH(HW)");
   2216 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2217 			aprint_verbose("FLASH");
   2218 		else {
   2219 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2220 				eetype = "SPI";
   2221 			else
   2222 				eetype = "MicroWire";
   2223 			aprint_verbose("(%d address bits) %s EEPROM",
   2224 			    sc->sc_nvm_addrbits, eetype);
   2225 		}
   2226 	}
   2227 	wm_nvm_version(sc);
   2228 	aprint_verbose("\n");
   2229 
   2230 	/* Check for I21[01] PLL workaround */
   2231 	if (sc->sc_type == WM_T_I210)
   2232 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2233 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2234 		/* NVM image release 3.25 has a workaround */
   2235 		if ((sc->sc_nvm_ver_major < 3)
   2236 		    || ((sc->sc_nvm_ver_major == 3)
   2237 			&& (sc->sc_nvm_ver_minor < 25))) {
   2238 			aprint_verbose_dev(sc->sc_dev,
   2239 			    "ROM image version %d.%d is older than 3.25\n",
   2240 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2241 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2242 		}
   2243 	}
   2244 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2245 		wm_pll_workaround_i210(sc);
   2246 
   2247 	wm_get_wakeup(sc);
   2248 
   2249 	/* Non-AMT based hardware can now take control from firmware */
   2250 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2251 		wm_get_hw_control(sc);
   2252 
   2253 	/*
   2254 	 * Read the Ethernet address from the EEPROM, if not first found
   2255 	 * in device properties.
   2256 	 */
   2257 	ea = prop_dictionary_get(dict, "mac-address");
   2258 	if (ea != NULL) {
   2259 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2260 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2261 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2262 	} else {
   2263 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2264 			aprint_error_dev(sc->sc_dev,
   2265 			    "unable to read Ethernet address\n");
   2266 			goto out;
   2267 		}
   2268 	}
   2269 
   2270 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2271 	    ether_sprintf(enaddr));
   2272 
   2273 	/*
   2274 	 * Read the config info from the EEPROM, and set up various
   2275 	 * bits in the control registers based on their contents.
   2276 	 */
   2277 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2278 	if (pn != NULL) {
   2279 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2280 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2281 	} else {
   2282 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2283 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2284 			goto out;
   2285 		}
   2286 	}
   2287 
   2288 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2289 	if (pn != NULL) {
   2290 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2291 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2292 	} else {
   2293 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2294 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2295 			goto out;
   2296 		}
   2297 	}
   2298 
   2299 	/* check for WM_F_WOL */
   2300 	switch (sc->sc_type) {
   2301 	case WM_T_82542_2_0:
   2302 	case WM_T_82542_2_1:
   2303 	case WM_T_82543:
   2304 		/* dummy? */
   2305 		eeprom_data = 0;
   2306 		apme_mask = NVM_CFG3_APME;
   2307 		break;
   2308 	case WM_T_82544:
   2309 		apme_mask = NVM_CFG2_82544_APM_EN;
   2310 		eeprom_data = cfg2;
   2311 		break;
   2312 	case WM_T_82546:
   2313 	case WM_T_82546_3:
   2314 	case WM_T_82571:
   2315 	case WM_T_82572:
   2316 	case WM_T_82573:
   2317 	case WM_T_82574:
   2318 	case WM_T_82583:
   2319 	case WM_T_80003:
   2320 	default:
   2321 		apme_mask = NVM_CFG3_APME;
   2322 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2323 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2324 		break;
   2325 	case WM_T_82575:
   2326 	case WM_T_82576:
   2327 	case WM_T_82580:
   2328 	case WM_T_I350:
   2329 	case WM_T_I354: /* XXX ok? */
   2330 	case WM_T_ICH8:
   2331 	case WM_T_ICH9:
   2332 	case WM_T_ICH10:
   2333 	case WM_T_PCH:
   2334 	case WM_T_PCH2:
   2335 	case WM_T_PCH_LPT:
   2336 	case WM_T_PCH_SPT:
   2337 		/* XXX The funcid should be checked on some devices */
   2338 		apme_mask = WUC_APME;
   2339 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2340 		break;
   2341 	}
   2342 
   2343 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2344 	if ((eeprom_data & apme_mask) != 0)
   2345 		sc->sc_flags |= WM_F_WOL;
   2346 #ifdef WM_DEBUG
   2347 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2348 		printf("WOL\n");
   2349 #endif
   2350 
   2351 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2352 		/* Check NVM for autonegotiation */
   2353 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2354 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2355 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2356 		}
   2357 	}
   2358 
   2359 	/*
   2360 	 * XXX need special handling for some multiple port cards
   2361 	 * to disable a paticular port.
   2362 	 */
   2363 
   2364 	if (sc->sc_type >= WM_T_82544) {
   2365 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2366 		if (pn != NULL) {
   2367 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2368 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2369 		} else {
   2370 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2371 				aprint_error_dev(sc->sc_dev,
   2372 				    "unable to read SWDPIN\n");
   2373 				goto out;
   2374 			}
   2375 		}
   2376 	}
   2377 
   2378 	if (cfg1 & NVM_CFG1_ILOS)
   2379 		sc->sc_ctrl |= CTRL_ILOS;
   2380 
   2381 	/*
   2382 	 * XXX
   2383 	 * This code isn't correct because pin 2 and 3 are located
   2384 	 * in different position on newer chips. Check all datasheet.
   2385 	 *
   2386 	 * Until resolve this problem, check if a chip < 82580
   2387 	 */
   2388 	if (sc->sc_type <= WM_T_82580) {
   2389 		if (sc->sc_type >= WM_T_82544) {
   2390 			sc->sc_ctrl |=
   2391 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2392 			    CTRL_SWDPIO_SHIFT;
   2393 			sc->sc_ctrl |=
   2394 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2395 			    CTRL_SWDPINS_SHIFT;
   2396 		} else {
   2397 			sc->sc_ctrl |=
   2398 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2399 			    CTRL_SWDPIO_SHIFT;
   2400 		}
   2401 	}
   2402 
   2403 	/* XXX For other than 82580? */
   2404 	if (sc->sc_type == WM_T_82580) {
   2405 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2406 		if (nvmword & __BIT(13))
   2407 			sc->sc_ctrl |= CTRL_ILOS;
   2408 	}
   2409 
   2410 #if 0
   2411 	if (sc->sc_type >= WM_T_82544) {
   2412 		if (cfg1 & NVM_CFG1_IPS0)
   2413 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2414 		if (cfg1 & NVM_CFG1_IPS1)
   2415 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2416 		sc->sc_ctrl_ext |=
   2417 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2418 		    CTRL_EXT_SWDPIO_SHIFT;
   2419 		sc->sc_ctrl_ext |=
   2420 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2421 		    CTRL_EXT_SWDPINS_SHIFT;
   2422 	} else {
   2423 		sc->sc_ctrl_ext |=
   2424 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2425 		    CTRL_EXT_SWDPIO_SHIFT;
   2426 	}
   2427 #endif
   2428 
   2429 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2430 #if 0
   2431 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2432 #endif
   2433 
   2434 	if (sc->sc_type == WM_T_PCH) {
   2435 		uint16_t val;
   2436 
   2437 		/* Save the NVM K1 bit setting */
   2438 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2439 
   2440 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2441 			sc->sc_nvm_k1_enabled = 1;
   2442 		else
   2443 			sc->sc_nvm_k1_enabled = 0;
   2444 	}
   2445 
   2446 	/*
   2447 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2448 	 * media structures accordingly.
   2449 	 */
   2450 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2451 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2452 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2453 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2454 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2455 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2456 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2457 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2458 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2459 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2460 	    || (sc->sc_type ==WM_T_I211)) {
   2461 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2462 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2463 		switch (link_mode) {
   2464 		case CTRL_EXT_LINK_MODE_1000KX:
   2465 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2466 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2467 			break;
   2468 		case CTRL_EXT_LINK_MODE_SGMII:
   2469 			if (wm_sgmii_uses_mdio(sc)) {
   2470 				aprint_verbose_dev(sc->sc_dev,
   2471 				    "SGMII(MDIO)\n");
   2472 				sc->sc_flags |= WM_F_SGMII;
   2473 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2474 				break;
   2475 			}
   2476 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2477 			/*FALLTHROUGH*/
   2478 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2479 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2480 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2481 				if (link_mode
   2482 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2483 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2484 					sc->sc_flags |= WM_F_SGMII;
   2485 				} else {
   2486 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2487 					aprint_verbose_dev(sc->sc_dev,
   2488 					    "SERDES\n");
   2489 				}
   2490 				break;
   2491 			}
   2492 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2493 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2494 
   2495 			/* Change current link mode setting */
   2496 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2497 			switch (sc->sc_mediatype) {
   2498 			case WM_MEDIATYPE_COPPER:
   2499 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2500 				break;
   2501 			case WM_MEDIATYPE_SERDES:
   2502 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2503 				break;
   2504 			default:
   2505 				break;
   2506 			}
   2507 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2508 			break;
   2509 		case CTRL_EXT_LINK_MODE_GMII:
   2510 		default:
   2511 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2512 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2513 			break;
   2514 		}
   2515 
   2516 		reg &= ~CTRL_EXT_I2C_ENA;
   2517 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2518 			reg |= CTRL_EXT_I2C_ENA;
   2519 		else
   2520 			reg &= ~CTRL_EXT_I2C_ENA;
   2521 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2522 
   2523 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2524 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2525 		else
   2526 			wm_tbi_mediainit(sc);
   2527 	} else if (sc->sc_type < WM_T_82543 ||
   2528 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2529 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2530 			aprint_error_dev(sc->sc_dev,
   2531 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2532 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2533 		}
   2534 		wm_tbi_mediainit(sc);
   2535 	} else {
   2536 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2537 			aprint_error_dev(sc->sc_dev,
   2538 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2539 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2540 		}
   2541 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2542 	}
   2543 
   2544 	ifp = &sc->sc_ethercom.ec_if;
   2545 	xname = device_xname(sc->sc_dev);
   2546 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2547 	ifp->if_softc = sc;
   2548 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2549 #ifdef WM_MPSAFE
   2550 	ifp->if_extflags = IFEF_START_MPSAFE;
   2551 #endif
   2552 	ifp->if_ioctl = wm_ioctl;
   2553 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2554 		ifp->if_start = wm_nq_start;
   2555 		if (sc->sc_nqueues > 1)
   2556 			ifp->if_transmit = wm_nq_transmit;
   2557 	} else {
   2558 		ifp->if_start = wm_start;
   2559 		if (sc->sc_nqueues > 1)
   2560 			ifp->if_transmit = wm_transmit;
   2561 	}
   2562 	ifp->if_watchdog = wm_watchdog;
   2563 	ifp->if_init = wm_init;
   2564 	ifp->if_stop = wm_stop;
   2565 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2566 	IFQ_SET_READY(&ifp->if_snd);
   2567 
   2568 	/* Check for jumbo frame */
   2569 	switch (sc->sc_type) {
   2570 	case WM_T_82573:
   2571 		/* XXX limited to 9234 if ASPM is disabled */
   2572 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2573 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2574 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2575 		break;
   2576 	case WM_T_82571:
   2577 	case WM_T_82572:
   2578 	case WM_T_82574:
   2579 	case WM_T_82575:
   2580 	case WM_T_82576:
   2581 	case WM_T_82580:
   2582 	case WM_T_I350:
   2583 	case WM_T_I354: /* XXXX ok? */
   2584 	case WM_T_I210:
   2585 	case WM_T_I211:
   2586 	case WM_T_80003:
   2587 	case WM_T_ICH9:
   2588 	case WM_T_ICH10:
   2589 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2590 	case WM_T_PCH_LPT:
   2591 	case WM_T_PCH_SPT:
   2592 		/* XXX limited to 9234 */
   2593 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2594 		break;
   2595 	case WM_T_PCH:
   2596 		/* XXX limited to 4096 */
   2597 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2598 		break;
   2599 	case WM_T_82542_2_0:
   2600 	case WM_T_82542_2_1:
   2601 	case WM_T_82583:
   2602 	case WM_T_ICH8:
   2603 		/* No support for jumbo frame */
   2604 		break;
   2605 	default:
   2606 		/* ETHER_MAX_LEN_JUMBO */
   2607 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2608 		break;
   2609 	}
   2610 
   2611 	/* If we're a i82543 or greater, we can support VLANs. */
   2612 	if (sc->sc_type >= WM_T_82543)
   2613 		sc->sc_ethercom.ec_capabilities |=
   2614 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2615 
   2616 	/*
   2617 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2618 	 * on i82543 and later.
   2619 	 */
   2620 	if (sc->sc_type >= WM_T_82543) {
   2621 		ifp->if_capabilities |=
   2622 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2623 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2624 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2625 		    IFCAP_CSUM_TCPv6_Tx |
   2626 		    IFCAP_CSUM_UDPv6_Tx;
   2627 	}
   2628 
   2629 	/*
   2630 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2631 	 *
   2632 	 *	82541GI (8086:1076) ... no
   2633 	 *	82572EI (8086:10b9) ... yes
   2634 	 */
   2635 	if (sc->sc_type >= WM_T_82571) {
   2636 		ifp->if_capabilities |=
   2637 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2638 	}
   2639 
   2640 	/*
   2641 	 * If we're a i82544 or greater (except i82547), we can do
   2642 	 * TCP segmentation offload.
   2643 	 */
   2644 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2645 		ifp->if_capabilities |= IFCAP_TSOv4;
   2646 	}
   2647 
   2648 	if (sc->sc_type >= WM_T_82571) {
   2649 		ifp->if_capabilities |= IFCAP_TSOv6;
   2650 	}
   2651 
   2652 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2653 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2654 
   2655 #ifdef WM_MPSAFE
   2656 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2657 #else
   2658 	sc->sc_core_lock = NULL;
   2659 #endif
   2660 
   2661 	/* Attach the interface. */
   2662 	if_initialize(ifp);
   2663 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2664 	ether_ifattach(ifp, enaddr);
   2665 	if_register(ifp);
   2666 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2667 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2668 			  RND_FLAG_DEFAULT);
   2669 
   2670 #ifdef WM_EVENT_COUNTERS
   2671 	/* Attach event counters. */
   2672 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2673 	    NULL, xname, "linkintr");
   2674 
   2675 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2676 	    NULL, xname, "tx_xoff");
   2677 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2678 	    NULL, xname, "tx_xon");
   2679 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2680 	    NULL, xname, "rx_xoff");
   2681 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2682 	    NULL, xname, "rx_xon");
   2683 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2684 	    NULL, xname, "rx_macctl");
   2685 #endif /* WM_EVENT_COUNTERS */
   2686 
   2687 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2688 		pmf_class_network_register(self, ifp);
   2689 	else
   2690 		aprint_error_dev(self, "couldn't establish power handler\n");
   2691 
   2692 	sc->sc_flags |= WM_F_ATTACHED;
   2693  out:
   2694 	return;
   2695 }
   2696 
   2697 /* The detach function (ca_detach) */
   2698 static int
   2699 wm_detach(device_t self, int flags __unused)
   2700 {
   2701 	struct wm_softc *sc = device_private(self);
   2702 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2703 	int i;
   2704 
   2705 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2706 		return 0;
   2707 
   2708 	/* Stop the interface. Callouts are stopped in it. */
   2709 	wm_stop(ifp, 1);
   2710 
   2711 	pmf_device_deregister(self);
   2712 
   2713 #ifdef WM_EVENT_COUNTERS
   2714 	evcnt_detach(&sc->sc_ev_linkintr);
   2715 
   2716 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2717 	evcnt_detach(&sc->sc_ev_tx_xon);
   2718 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2719 	evcnt_detach(&sc->sc_ev_rx_xon);
   2720 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2721 #endif /* WM_EVENT_COUNTERS */
   2722 
   2723 	/* Tell the firmware about the release */
   2724 	WM_CORE_LOCK(sc);
   2725 	wm_release_manageability(sc);
   2726 	wm_release_hw_control(sc);
   2727 	wm_enable_wakeup(sc);
   2728 	WM_CORE_UNLOCK(sc);
   2729 
   2730 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2731 
   2732 	/* Delete all remaining media. */
   2733 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2734 
   2735 	ether_ifdetach(ifp);
   2736 	if_detach(ifp);
   2737 	if_percpuq_destroy(sc->sc_ipq);
   2738 
   2739 	/* Unload RX dmamaps and free mbufs */
   2740 	for (i = 0; i < sc->sc_nqueues; i++) {
   2741 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2742 		mutex_enter(rxq->rxq_lock);
   2743 		wm_rxdrain(rxq);
   2744 		mutex_exit(rxq->rxq_lock);
   2745 	}
   2746 	/* Must unlock here */
   2747 
   2748 	/* Disestablish the interrupt handler */
   2749 	for (i = 0; i < sc->sc_nintrs; i++) {
   2750 		if (sc->sc_ihs[i] != NULL) {
   2751 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2752 			sc->sc_ihs[i] = NULL;
   2753 		}
   2754 	}
   2755 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2756 
   2757 	wm_free_txrx_queues(sc);
   2758 
   2759 	/* Unmap the registers */
   2760 	if (sc->sc_ss) {
   2761 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2762 		sc->sc_ss = 0;
   2763 	}
   2764 	if (sc->sc_ios) {
   2765 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2766 		sc->sc_ios = 0;
   2767 	}
   2768 	if (sc->sc_flashs) {
   2769 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2770 		sc->sc_flashs = 0;
   2771 	}
   2772 
   2773 	if (sc->sc_core_lock)
   2774 		mutex_obj_free(sc->sc_core_lock);
   2775 	if (sc->sc_ich_phymtx)
   2776 		mutex_obj_free(sc->sc_ich_phymtx);
   2777 	if (sc->sc_ich_nvmmtx)
   2778 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2779 
   2780 	return 0;
   2781 }
   2782 
   2783 static bool
   2784 wm_suspend(device_t self, const pmf_qual_t *qual)
   2785 {
   2786 	struct wm_softc *sc = device_private(self);
   2787 
   2788 	wm_release_manageability(sc);
   2789 	wm_release_hw_control(sc);
   2790 	wm_enable_wakeup(sc);
   2791 
   2792 	return true;
   2793 }
   2794 
   2795 static bool
   2796 wm_resume(device_t self, const pmf_qual_t *qual)
   2797 {
   2798 	struct wm_softc *sc = device_private(self);
   2799 
   2800 	wm_init_manageability(sc);
   2801 
   2802 	return true;
   2803 }
   2804 
   2805 /*
   2806  * wm_watchdog:		[ifnet interface function]
   2807  *
   2808  *	Watchdog timer handler.
   2809  */
   2810 static void
   2811 wm_watchdog(struct ifnet *ifp)
   2812 {
   2813 	int qid;
   2814 	struct wm_softc *sc = ifp->if_softc;
   2815 
   2816 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2817 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2818 
   2819 		wm_watchdog_txq(ifp, txq);
   2820 	}
   2821 
   2822 	/* Reset the interface. */
   2823 	(void) wm_init(ifp);
   2824 
   2825 	/*
   2826 	 * There are still some upper layer processing which call
   2827 	 * ifp->if_start(). e.g. ALTQ
   2828 	 */
   2829 	/* Try to get more packets going. */
   2830 	ifp->if_start(ifp);
   2831 }
   2832 
   2833 static void
   2834 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2835 {
   2836 	struct wm_softc *sc = ifp->if_softc;
   2837 
   2838 	/*
   2839 	 * Since we're using delayed interrupts, sweep up
   2840 	 * before we report an error.
   2841 	 */
   2842 	mutex_enter(txq->txq_lock);
   2843 	wm_txeof(sc, txq);
   2844 	mutex_exit(txq->txq_lock);
   2845 
   2846 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2847 #ifdef WM_DEBUG
   2848 		int i, j;
   2849 		struct wm_txsoft *txs;
   2850 #endif
   2851 		log(LOG_ERR,
   2852 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2853 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2854 		    txq->txq_next);
   2855 		ifp->if_oerrors++;
   2856 #ifdef WM_DEBUG
   2857 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2858 		    i = WM_NEXTTXS(txq, i)) {
   2859 		    txs = &txq->txq_soft[i];
   2860 		    printf("txs %d tx %d -> %d\n",
   2861 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2862 		    for (j = txs->txs_firstdesc; ;
   2863 			j = WM_NEXTTX(txq, j)) {
   2864 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2865 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2866 			printf("\t %#08x%08x\n",
   2867 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2868 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2869 			if (j == txs->txs_lastdesc)
   2870 				break;
   2871 			}
   2872 		}
   2873 #endif
   2874 	}
   2875 }
   2876 
   2877 /*
   2878  * wm_tick:
   2879  *
   2880  *	One second timer, used to check link status, sweep up
   2881  *	completed transmit jobs, etc.
   2882  */
   2883 static void
   2884 wm_tick(void *arg)
   2885 {
   2886 	struct wm_softc *sc = arg;
   2887 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2888 #ifndef WM_MPSAFE
   2889 	int s = splnet();
   2890 #endif
   2891 
   2892 	WM_CORE_LOCK(sc);
   2893 
   2894 	if (sc->sc_core_stopping)
   2895 		goto out;
   2896 
   2897 	if (sc->sc_type >= WM_T_82542_2_1) {
   2898 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2899 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2900 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2901 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2902 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2903 	}
   2904 
   2905 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2906 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2907 	    + CSR_READ(sc, WMREG_CRCERRS)
   2908 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2909 	    + CSR_READ(sc, WMREG_SYMERRC)
   2910 	    + CSR_READ(sc, WMREG_RXERRC)
   2911 	    + CSR_READ(sc, WMREG_SEC)
   2912 	    + CSR_READ(sc, WMREG_CEXTERR)
   2913 	    + CSR_READ(sc, WMREG_RLEC);
   2914 	/*
   2915 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2916 	 * memory. It does not mean the number of dropped packet. Because
   2917 	 * ethernet controller can receive packets in such case if there is
   2918 	 * space in phy's FIFO.
   2919 	 *
   2920 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2921 	 * own EVCNT instead of if_iqdrops.
   2922 	 */
   2923 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2924 
   2925 	if (sc->sc_flags & WM_F_HAS_MII)
   2926 		mii_tick(&sc->sc_mii);
   2927 	else if ((sc->sc_type >= WM_T_82575)
   2928 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2929 		wm_serdes_tick(sc);
   2930 	else
   2931 		wm_tbi_tick(sc);
   2932 
   2933 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2934 out:
   2935 	WM_CORE_UNLOCK(sc);
   2936 #ifndef WM_MPSAFE
   2937 	splx(s);
   2938 #endif
   2939 }
   2940 
   2941 static int
   2942 wm_ifflags_cb(struct ethercom *ec)
   2943 {
   2944 	struct ifnet *ifp = &ec->ec_if;
   2945 	struct wm_softc *sc = ifp->if_softc;
   2946 	int rc = 0;
   2947 
   2948 	WM_CORE_LOCK(sc);
   2949 
   2950 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2951 	sc->sc_if_flags = ifp->if_flags;
   2952 
   2953 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2954 		rc = ENETRESET;
   2955 		goto out;
   2956 	}
   2957 
   2958 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2959 		wm_set_filter(sc);
   2960 
   2961 	wm_set_vlan(sc);
   2962 
   2963 out:
   2964 	WM_CORE_UNLOCK(sc);
   2965 
   2966 	return rc;
   2967 }
   2968 
   2969 /*
   2970  * wm_ioctl:		[ifnet interface function]
   2971  *
   2972  *	Handle control requests from the operator.
   2973  */
   2974 static int
   2975 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2976 {
   2977 	struct wm_softc *sc = ifp->if_softc;
   2978 	struct ifreq *ifr = (struct ifreq *) data;
   2979 	struct ifaddr *ifa = (struct ifaddr *)data;
   2980 	struct sockaddr_dl *sdl;
   2981 	int s, error;
   2982 
   2983 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2984 		device_xname(sc->sc_dev), __func__));
   2985 
   2986 #ifndef WM_MPSAFE
   2987 	s = splnet();
   2988 #endif
   2989 	switch (cmd) {
   2990 	case SIOCSIFMEDIA:
   2991 	case SIOCGIFMEDIA:
   2992 		WM_CORE_LOCK(sc);
   2993 		/* Flow control requires full-duplex mode. */
   2994 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2995 		    (ifr->ifr_media & IFM_FDX) == 0)
   2996 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2997 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2998 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2999 				/* We can do both TXPAUSE and RXPAUSE. */
   3000 				ifr->ifr_media |=
   3001 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3002 			}
   3003 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3004 		}
   3005 		WM_CORE_UNLOCK(sc);
   3006 #ifdef WM_MPSAFE
   3007 		s = splnet();
   3008 #endif
   3009 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3010 #ifdef WM_MPSAFE
   3011 		splx(s);
   3012 #endif
   3013 		break;
   3014 	case SIOCINITIFADDR:
   3015 		WM_CORE_LOCK(sc);
   3016 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3017 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3018 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3019 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3020 			/* unicast address is first multicast entry */
   3021 			wm_set_filter(sc);
   3022 			error = 0;
   3023 			WM_CORE_UNLOCK(sc);
   3024 			break;
   3025 		}
   3026 		WM_CORE_UNLOCK(sc);
   3027 		/*FALLTHROUGH*/
   3028 	default:
   3029 #ifdef WM_MPSAFE
   3030 		s = splnet();
   3031 #endif
   3032 		/* It may call wm_start, so unlock here */
   3033 		error = ether_ioctl(ifp, cmd, data);
   3034 #ifdef WM_MPSAFE
   3035 		splx(s);
   3036 #endif
   3037 		if (error != ENETRESET)
   3038 			break;
   3039 
   3040 		error = 0;
   3041 
   3042 		if (cmd == SIOCSIFCAP) {
   3043 			error = (*ifp->if_init)(ifp);
   3044 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3045 			;
   3046 		else if (ifp->if_flags & IFF_RUNNING) {
   3047 			/*
   3048 			 * Multicast list has changed; set the hardware filter
   3049 			 * accordingly.
   3050 			 */
   3051 			WM_CORE_LOCK(sc);
   3052 			wm_set_filter(sc);
   3053 			WM_CORE_UNLOCK(sc);
   3054 		}
   3055 		break;
   3056 	}
   3057 
   3058 #ifndef WM_MPSAFE
   3059 	splx(s);
   3060 #endif
   3061 	return error;
   3062 }
   3063 
   3064 /* MAC address related */
   3065 
   3066 /*
   3067  * Get the offset of MAC address and return it.
   3068  * If error occured, use offset 0.
   3069  */
   3070 static uint16_t
   3071 wm_check_alt_mac_addr(struct wm_softc *sc)
   3072 {
   3073 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3074 	uint16_t offset = NVM_OFF_MACADDR;
   3075 
   3076 	/* Try to read alternative MAC address pointer */
   3077 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3078 		return 0;
   3079 
   3080 	/* Check pointer if it's valid or not. */
   3081 	if ((offset == 0x0000) || (offset == 0xffff))
   3082 		return 0;
   3083 
   3084 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3085 	/*
   3086 	 * Check whether alternative MAC address is valid or not.
   3087 	 * Some cards have non 0xffff pointer but those don't use
   3088 	 * alternative MAC address in reality.
   3089 	 *
   3090 	 * Check whether the broadcast bit is set or not.
   3091 	 */
   3092 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3093 		if (((myea[0] & 0xff) & 0x01) == 0)
   3094 			return offset; /* Found */
   3095 
   3096 	/* Not found */
   3097 	return 0;
   3098 }
   3099 
   3100 static int
   3101 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3102 {
   3103 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3104 	uint16_t offset = NVM_OFF_MACADDR;
   3105 	int do_invert = 0;
   3106 
   3107 	switch (sc->sc_type) {
   3108 	case WM_T_82580:
   3109 	case WM_T_I350:
   3110 	case WM_T_I354:
   3111 		/* EEPROM Top Level Partitioning */
   3112 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3113 		break;
   3114 	case WM_T_82571:
   3115 	case WM_T_82575:
   3116 	case WM_T_82576:
   3117 	case WM_T_80003:
   3118 	case WM_T_I210:
   3119 	case WM_T_I211:
   3120 		offset = wm_check_alt_mac_addr(sc);
   3121 		if (offset == 0)
   3122 			if ((sc->sc_funcid & 0x01) == 1)
   3123 				do_invert = 1;
   3124 		break;
   3125 	default:
   3126 		if ((sc->sc_funcid & 0x01) == 1)
   3127 			do_invert = 1;
   3128 		break;
   3129 	}
   3130 
   3131 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3132 		goto bad;
   3133 
   3134 	enaddr[0] = myea[0] & 0xff;
   3135 	enaddr[1] = myea[0] >> 8;
   3136 	enaddr[2] = myea[1] & 0xff;
   3137 	enaddr[3] = myea[1] >> 8;
   3138 	enaddr[4] = myea[2] & 0xff;
   3139 	enaddr[5] = myea[2] >> 8;
   3140 
   3141 	/*
   3142 	 * Toggle the LSB of the MAC address on the second port
   3143 	 * of some dual port cards.
   3144 	 */
   3145 	if (do_invert != 0)
   3146 		enaddr[5] ^= 1;
   3147 
   3148 	return 0;
   3149 
   3150  bad:
   3151 	return -1;
   3152 }
   3153 
   3154 /*
   3155  * wm_set_ral:
   3156  *
   3157  *	Set an entery in the receive address list.
   3158  */
   3159 static void
   3160 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3161 {
   3162 	uint32_t ral_lo, ral_hi;
   3163 
   3164 	if (enaddr != NULL) {
   3165 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3166 		    (enaddr[3] << 24);
   3167 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3168 		ral_hi |= RAL_AV;
   3169 	} else {
   3170 		ral_lo = 0;
   3171 		ral_hi = 0;
   3172 	}
   3173 
   3174 	if (sc->sc_type >= WM_T_82544) {
   3175 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3176 		    ral_lo);
   3177 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3178 		    ral_hi);
   3179 	} else {
   3180 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3181 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3182 	}
   3183 }
   3184 
   3185 /*
   3186  * wm_mchash:
   3187  *
   3188  *	Compute the hash of the multicast address for the 4096-bit
   3189  *	multicast filter.
   3190  */
   3191 static uint32_t
   3192 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3193 {
   3194 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3195 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3196 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3197 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3198 	uint32_t hash;
   3199 
   3200 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3201 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3202 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3203 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3204 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3205 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3206 		return (hash & 0x3ff);
   3207 	}
   3208 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3209 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3210 
   3211 	return (hash & 0xfff);
   3212 }
   3213 
   3214 /*
   3215  * wm_set_filter:
   3216  *
   3217  *	Set up the receive filter.
   3218  */
   3219 static void
   3220 wm_set_filter(struct wm_softc *sc)
   3221 {
   3222 	struct ethercom *ec = &sc->sc_ethercom;
   3223 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3224 	struct ether_multi *enm;
   3225 	struct ether_multistep step;
   3226 	bus_addr_t mta_reg;
   3227 	uint32_t hash, reg, bit;
   3228 	int i, size, ralmax;
   3229 
   3230 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3231 		device_xname(sc->sc_dev), __func__));
   3232 
   3233 	if (sc->sc_type >= WM_T_82544)
   3234 		mta_reg = WMREG_CORDOVA_MTA;
   3235 	else
   3236 		mta_reg = WMREG_MTA;
   3237 
   3238 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3239 
   3240 	if (ifp->if_flags & IFF_BROADCAST)
   3241 		sc->sc_rctl |= RCTL_BAM;
   3242 	if (ifp->if_flags & IFF_PROMISC) {
   3243 		sc->sc_rctl |= RCTL_UPE;
   3244 		goto allmulti;
   3245 	}
   3246 
   3247 	/*
   3248 	 * Set the station address in the first RAL slot, and
   3249 	 * clear the remaining slots.
   3250 	 */
   3251 	if (sc->sc_type == WM_T_ICH8)
   3252 		size = WM_RAL_TABSIZE_ICH8 -1;
   3253 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3254 	    || (sc->sc_type == WM_T_PCH))
   3255 		size = WM_RAL_TABSIZE_ICH8;
   3256 	else if (sc->sc_type == WM_T_PCH2)
   3257 		size = WM_RAL_TABSIZE_PCH2;
   3258 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3259 		size = WM_RAL_TABSIZE_PCH_LPT;
   3260 	else if (sc->sc_type == WM_T_82575)
   3261 		size = WM_RAL_TABSIZE_82575;
   3262 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3263 		size = WM_RAL_TABSIZE_82576;
   3264 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3265 		size = WM_RAL_TABSIZE_I350;
   3266 	else
   3267 		size = WM_RAL_TABSIZE;
   3268 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3269 
   3270 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3271 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3272 		switch (i) {
   3273 		case 0:
   3274 			/* We can use all entries */
   3275 			ralmax = size;
   3276 			break;
   3277 		case 1:
   3278 			/* Only RAR[0] */
   3279 			ralmax = 1;
   3280 			break;
   3281 		default:
   3282 			/* available SHRA + RAR[0] */
   3283 			ralmax = i + 1;
   3284 		}
   3285 	} else
   3286 		ralmax = size;
   3287 	for (i = 1; i < size; i++) {
   3288 		if (i < ralmax)
   3289 			wm_set_ral(sc, NULL, i);
   3290 	}
   3291 
   3292 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3293 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3294 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3295 	    || (sc->sc_type == WM_T_PCH_SPT))
   3296 		size = WM_ICH8_MC_TABSIZE;
   3297 	else
   3298 		size = WM_MC_TABSIZE;
   3299 	/* Clear out the multicast table. */
   3300 	for (i = 0; i < size; i++)
   3301 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3302 
   3303 	ETHER_LOCK(ec);
   3304 	ETHER_FIRST_MULTI(step, ec, enm);
   3305 	while (enm != NULL) {
   3306 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3307 			ETHER_UNLOCK(ec);
   3308 			/*
   3309 			 * We must listen to a range of multicast addresses.
   3310 			 * For now, just accept all multicasts, rather than
   3311 			 * trying to set only those filter bits needed to match
   3312 			 * the range.  (At this time, the only use of address
   3313 			 * ranges is for IP multicast routing, for which the
   3314 			 * range is big enough to require all bits set.)
   3315 			 */
   3316 			goto allmulti;
   3317 		}
   3318 
   3319 		hash = wm_mchash(sc, enm->enm_addrlo);
   3320 
   3321 		reg = (hash >> 5);
   3322 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3323 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3324 		    || (sc->sc_type == WM_T_PCH2)
   3325 		    || (sc->sc_type == WM_T_PCH_LPT)
   3326 		    || (sc->sc_type == WM_T_PCH_SPT))
   3327 			reg &= 0x1f;
   3328 		else
   3329 			reg &= 0x7f;
   3330 		bit = hash & 0x1f;
   3331 
   3332 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3333 		hash |= 1U << bit;
   3334 
   3335 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3336 			/*
   3337 			 * 82544 Errata 9: Certain register cannot be written
   3338 			 * with particular alignments in PCI-X bus operation
   3339 			 * (FCAH, MTA and VFTA).
   3340 			 */
   3341 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3342 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3343 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3344 		} else
   3345 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3346 
   3347 		ETHER_NEXT_MULTI(step, enm);
   3348 	}
   3349 	ETHER_UNLOCK(ec);
   3350 
   3351 	ifp->if_flags &= ~IFF_ALLMULTI;
   3352 	goto setit;
   3353 
   3354  allmulti:
   3355 	ifp->if_flags |= IFF_ALLMULTI;
   3356 	sc->sc_rctl |= RCTL_MPE;
   3357 
   3358  setit:
   3359 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3360 }
   3361 
   3362 /* Reset and init related */
   3363 
   3364 static void
   3365 wm_set_vlan(struct wm_softc *sc)
   3366 {
   3367 
   3368 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3369 		device_xname(sc->sc_dev), __func__));
   3370 
   3371 	/* Deal with VLAN enables. */
   3372 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3373 		sc->sc_ctrl |= CTRL_VME;
   3374 	else
   3375 		sc->sc_ctrl &= ~CTRL_VME;
   3376 
   3377 	/* Write the control registers. */
   3378 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3379 }
   3380 
   3381 static void
   3382 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3383 {
   3384 	uint32_t gcr;
   3385 	pcireg_t ctrl2;
   3386 
   3387 	gcr = CSR_READ(sc, WMREG_GCR);
   3388 
   3389 	/* Only take action if timeout value is defaulted to 0 */
   3390 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3391 		goto out;
   3392 
   3393 	if ((gcr & GCR_CAP_VER2) == 0) {
   3394 		gcr |= GCR_CMPL_TMOUT_10MS;
   3395 		goto out;
   3396 	}
   3397 
   3398 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3399 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3400 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3401 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3402 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3403 
   3404 out:
   3405 	/* Disable completion timeout resend */
   3406 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3407 
   3408 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3409 }
   3410 
   3411 void
   3412 wm_get_auto_rd_done(struct wm_softc *sc)
   3413 {
   3414 	int i;
   3415 
   3416 	/* wait for eeprom to reload */
   3417 	switch (sc->sc_type) {
   3418 	case WM_T_82571:
   3419 	case WM_T_82572:
   3420 	case WM_T_82573:
   3421 	case WM_T_82574:
   3422 	case WM_T_82583:
   3423 	case WM_T_82575:
   3424 	case WM_T_82576:
   3425 	case WM_T_82580:
   3426 	case WM_T_I350:
   3427 	case WM_T_I354:
   3428 	case WM_T_I210:
   3429 	case WM_T_I211:
   3430 	case WM_T_80003:
   3431 	case WM_T_ICH8:
   3432 	case WM_T_ICH9:
   3433 		for (i = 0; i < 10; i++) {
   3434 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3435 				break;
   3436 			delay(1000);
   3437 		}
   3438 		if (i == 10) {
   3439 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3440 			    "complete\n", device_xname(sc->sc_dev));
   3441 		}
   3442 		break;
   3443 	default:
   3444 		break;
   3445 	}
   3446 }
   3447 
   3448 void
   3449 wm_lan_init_done(struct wm_softc *sc)
   3450 {
   3451 	uint32_t reg = 0;
   3452 	int i;
   3453 
   3454 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3455 		device_xname(sc->sc_dev), __func__));
   3456 
   3457 	/* Wait for eeprom to reload */
   3458 	switch (sc->sc_type) {
   3459 	case WM_T_ICH10:
   3460 	case WM_T_PCH:
   3461 	case WM_T_PCH2:
   3462 	case WM_T_PCH_LPT:
   3463 	case WM_T_PCH_SPT:
   3464 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3465 			reg = CSR_READ(sc, WMREG_STATUS);
   3466 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3467 				break;
   3468 			delay(100);
   3469 		}
   3470 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3471 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3472 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3473 		}
   3474 		break;
   3475 	default:
   3476 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3477 		    __func__);
   3478 		break;
   3479 	}
   3480 
   3481 	reg &= ~STATUS_LAN_INIT_DONE;
   3482 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3483 }
   3484 
   3485 void
   3486 wm_get_cfg_done(struct wm_softc *sc)
   3487 {
   3488 	int mask;
   3489 	uint32_t reg;
   3490 	int i;
   3491 
   3492 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3493 		device_xname(sc->sc_dev), __func__));
   3494 
   3495 	/* Wait for eeprom to reload */
   3496 	switch (sc->sc_type) {
   3497 	case WM_T_82542_2_0:
   3498 	case WM_T_82542_2_1:
   3499 		/* null */
   3500 		break;
   3501 	case WM_T_82543:
   3502 	case WM_T_82544:
   3503 	case WM_T_82540:
   3504 	case WM_T_82545:
   3505 	case WM_T_82545_3:
   3506 	case WM_T_82546:
   3507 	case WM_T_82546_3:
   3508 	case WM_T_82541:
   3509 	case WM_T_82541_2:
   3510 	case WM_T_82547:
   3511 	case WM_T_82547_2:
   3512 	case WM_T_82573:
   3513 	case WM_T_82574:
   3514 	case WM_T_82583:
   3515 		/* generic */
   3516 		delay(10*1000);
   3517 		break;
   3518 	case WM_T_80003:
   3519 	case WM_T_82571:
   3520 	case WM_T_82572:
   3521 	case WM_T_82575:
   3522 	case WM_T_82576:
   3523 	case WM_T_82580:
   3524 	case WM_T_I350:
   3525 	case WM_T_I354:
   3526 	case WM_T_I210:
   3527 	case WM_T_I211:
   3528 		if (sc->sc_type == WM_T_82571) {
   3529 			/* Only 82571 shares port 0 */
   3530 			mask = EEMNGCTL_CFGDONE_0;
   3531 		} else
   3532 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3533 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3534 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3535 				break;
   3536 			delay(1000);
   3537 		}
   3538 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3539 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3540 				device_xname(sc->sc_dev), __func__));
   3541 		}
   3542 		break;
   3543 	case WM_T_ICH8:
   3544 	case WM_T_ICH9:
   3545 	case WM_T_ICH10:
   3546 	case WM_T_PCH:
   3547 	case WM_T_PCH2:
   3548 	case WM_T_PCH_LPT:
   3549 	case WM_T_PCH_SPT:
   3550 		delay(10*1000);
   3551 		if (sc->sc_type >= WM_T_ICH10)
   3552 			wm_lan_init_done(sc);
   3553 		else
   3554 			wm_get_auto_rd_done(sc);
   3555 
   3556 		reg = CSR_READ(sc, WMREG_STATUS);
   3557 		if ((reg & STATUS_PHYRA) != 0)
   3558 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3559 		break;
   3560 	default:
   3561 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3562 		    __func__);
   3563 		break;
   3564 	}
   3565 }
   3566 
   3567 /* Init hardware bits */
   3568 void
   3569 wm_initialize_hardware_bits(struct wm_softc *sc)
   3570 {
   3571 	uint32_t tarc0, tarc1, reg;
   3572 
   3573 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3574 		device_xname(sc->sc_dev), __func__));
   3575 
   3576 	/* For 82571 variant, 80003 and ICHs */
   3577 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3578 	    || (sc->sc_type >= WM_T_80003)) {
   3579 
   3580 		/* Transmit Descriptor Control 0 */
   3581 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3582 		reg |= TXDCTL_COUNT_DESC;
   3583 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3584 
   3585 		/* Transmit Descriptor Control 1 */
   3586 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3587 		reg |= TXDCTL_COUNT_DESC;
   3588 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3589 
   3590 		/* TARC0 */
   3591 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3592 		switch (sc->sc_type) {
   3593 		case WM_T_82571:
   3594 		case WM_T_82572:
   3595 		case WM_T_82573:
   3596 		case WM_T_82574:
   3597 		case WM_T_82583:
   3598 		case WM_T_80003:
   3599 			/* Clear bits 30..27 */
   3600 			tarc0 &= ~__BITS(30, 27);
   3601 			break;
   3602 		default:
   3603 			break;
   3604 		}
   3605 
   3606 		switch (sc->sc_type) {
   3607 		case WM_T_82571:
   3608 		case WM_T_82572:
   3609 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3610 
   3611 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3612 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3613 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3614 			/* 8257[12] Errata No.7 */
   3615 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3616 
   3617 			/* TARC1 bit 28 */
   3618 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3619 				tarc1 &= ~__BIT(28);
   3620 			else
   3621 				tarc1 |= __BIT(28);
   3622 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3623 
   3624 			/*
   3625 			 * 8257[12] Errata No.13
   3626 			 * Disable Dyamic Clock Gating.
   3627 			 */
   3628 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3629 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3630 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3631 			break;
   3632 		case WM_T_82573:
   3633 		case WM_T_82574:
   3634 		case WM_T_82583:
   3635 			if ((sc->sc_type == WM_T_82574)
   3636 			    || (sc->sc_type == WM_T_82583))
   3637 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3638 
   3639 			/* Extended Device Control */
   3640 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3641 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3642 			reg |= __BIT(22);	/* Set bit 22 */
   3643 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3644 
   3645 			/* Device Control */
   3646 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3647 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3648 
   3649 			/* PCIe Control Register */
   3650 			/*
   3651 			 * 82573 Errata (unknown).
   3652 			 *
   3653 			 * 82574 Errata 25 and 82583 Errata 12
   3654 			 * "Dropped Rx Packets":
   3655 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3656 			 */
   3657 			reg = CSR_READ(sc, WMREG_GCR);
   3658 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3659 			CSR_WRITE(sc, WMREG_GCR, reg);
   3660 
   3661 			if ((sc->sc_type == WM_T_82574)
   3662 			    || (sc->sc_type == WM_T_82583)) {
   3663 				/*
   3664 				 * Document says this bit must be set for
   3665 				 * proper operation.
   3666 				 */
   3667 				reg = CSR_READ(sc, WMREG_GCR);
   3668 				reg |= __BIT(22);
   3669 				CSR_WRITE(sc, WMREG_GCR, reg);
   3670 
   3671 				/*
   3672 				 * Apply workaround for hardware errata
   3673 				 * documented in errata docs Fixes issue where
   3674 				 * some error prone or unreliable PCIe
   3675 				 * completions are occurring, particularly
   3676 				 * with ASPM enabled. Without fix, issue can
   3677 				 * cause Tx timeouts.
   3678 				 */
   3679 				reg = CSR_READ(sc, WMREG_GCR2);
   3680 				reg |= __BIT(0);
   3681 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3682 			}
   3683 			break;
   3684 		case WM_T_80003:
   3685 			/* TARC0 */
   3686 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3687 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3688 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3689 
   3690 			/* TARC1 bit 28 */
   3691 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3692 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3693 				tarc1 &= ~__BIT(28);
   3694 			else
   3695 				tarc1 |= __BIT(28);
   3696 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3697 			break;
   3698 		case WM_T_ICH8:
   3699 		case WM_T_ICH9:
   3700 		case WM_T_ICH10:
   3701 		case WM_T_PCH:
   3702 		case WM_T_PCH2:
   3703 		case WM_T_PCH_LPT:
   3704 		case WM_T_PCH_SPT:
   3705 			/* TARC0 */
   3706 			if ((sc->sc_type == WM_T_ICH8)
   3707 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3708 				/* Set TARC0 bits 29 and 28 */
   3709 				tarc0 |= __BITS(29, 28);
   3710 			}
   3711 			/* Set TARC0 bits 23,24,26,27 */
   3712 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3713 
   3714 			/* CTRL_EXT */
   3715 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3716 			reg |= __BIT(22);	/* Set bit 22 */
   3717 			/*
   3718 			 * Enable PHY low-power state when MAC is at D3
   3719 			 * w/o WoL
   3720 			 */
   3721 			if (sc->sc_type >= WM_T_PCH)
   3722 				reg |= CTRL_EXT_PHYPDEN;
   3723 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3724 
   3725 			/* TARC1 */
   3726 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3727 			/* bit 28 */
   3728 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3729 				tarc1 &= ~__BIT(28);
   3730 			else
   3731 				tarc1 |= __BIT(28);
   3732 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3733 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3734 
   3735 			/* Device Status */
   3736 			if (sc->sc_type == WM_T_ICH8) {
   3737 				reg = CSR_READ(sc, WMREG_STATUS);
   3738 				reg &= ~__BIT(31);
   3739 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3740 
   3741 			}
   3742 
   3743 			/* IOSFPC */
   3744 			if (sc->sc_type == WM_T_PCH_SPT) {
   3745 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3746 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3747 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3748 			}
   3749 			/*
   3750 			 * Work-around descriptor data corruption issue during
   3751 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3752 			 * capability.
   3753 			 */
   3754 			reg = CSR_READ(sc, WMREG_RFCTL);
   3755 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3756 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3757 			break;
   3758 		default:
   3759 			break;
   3760 		}
   3761 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3762 
   3763 		switch (sc->sc_type) {
   3764 		/*
   3765 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3766 		 * Avoid RSS Hash Value bug.
   3767 		 */
   3768 		case WM_T_82571:
   3769 		case WM_T_82572:
   3770 		case WM_T_82573:
   3771 		case WM_T_80003:
   3772 		case WM_T_ICH8:
   3773 			reg = CSR_READ(sc, WMREG_RFCTL);
   3774 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3775 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3776 			break;
   3777 		case WM_T_82574:
   3778 			/* use extened Rx descriptor. */
   3779 			reg = CSR_READ(sc, WMREG_RFCTL);
   3780 			reg |= WMREG_RFCTL_EXSTEN;
   3781 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3782 			break;
   3783 		default:
   3784 			break;
   3785 		}
   3786 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   3787 		/*
   3788 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   3789 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   3790 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   3791 		 * Correctly by the Device"
   3792 		 *
   3793 		 * I354(C2000) Errata AVR53:
   3794 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   3795 		 * Hang"
   3796 		 */
   3797 		reg = CSR_READ(sc, WMREG_RFCTL);
   3798 		reg |= WMREG_RFCTL_IPV6EXDIS;
   3799 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   3800 	}
   3801 }
   3802 
   3803 static uint32_t
   3804 wm_rxpbs_adjust_82580(uint32_t val)
   3805 {
   3806 	uint32_t rv = 0;
   3807 
   3808 	if (val < __arraycount(wm_82580_rxpbs_table))
   3809 		rv = wm_82580_rxpbs_table[val];
   3810 
   3811 	return rv;
   3812 }
   3813 
   3814 /*
   3815  * wm_reset_phy:
   3816  *
   3817  *	generic PHY reset function.
   3818  *	Same as e1000_phy_hw_reset_generic()
   3819  */
   3820 static void
   3821 wm_reset_phy(struct wm_softc *sc)
   3822 {
   3823 	uint32_t reg;
   3824 
   3825 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3826 		device_xname(sc->sc_dev), __func__));
   3827 	if (wm_phy_resetisblocked(sc))
   3828 		return;
   3829 
   3830 	sc->phy.acquire(sc);
   3831 
   3832 	reg = CSR_READ(sc, WMREG_CTRL);
   3833 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3834 	CSR_WRITE_FLUSH(sc);
   3835 
   3836 	delay(sc->phy.reset_delay_us);
   3837 
   3838 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3839 	CSR_WRITE_FLUSH(sc);
   3840 
   3841 	delay(150);
   3842 
   3843 	sc->phy.release(sc);
   3844 
   3845 	wm_get_cfg_done(sc);
   3846 }
   3847 
   3848 static void
   3849 wm_flush_desc_rings(struct wm_softc *sc)
   3850 {
   3851 	pcireg_t preg;
   3852 	uint32_t reg;
   3853 	int nexttx;
   3854 
   3855 	/* First, disable MULR fix in FEXTNVM11 */
   3856 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3857 	reg |= FEXTNVM11_DIS_MULRFIX;
   3858 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3859 
   3860 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3861 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3862 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3863 		struct wm_txqueue *txq;
   3864 		wiseman_txdesc_t *txd;
   3865 
   3866 		/* TX */
   3867 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3868 		    device_xname(sc->sc_dev), preg, reg);
   3869 		reg = CSR_READ(sc, WMREG_TCTL);
   3870 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3871 
   3872 		txq = &sc->sc_queue[0].wmq_txq;
   3873 		nexttx = txq->txq_next;
   3874 		txd = &txq->txq_descs[nexttx];
   3875 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3876 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3877 		txd->wtx_fields.wtxu_status = 0;
   3878 		txd->wtx_fields.wtxu_options = 0;
   3879 		txd->wtx_fields.wtxu_vlan = 0;
   3880 
   3881 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3882 			BUS_SPACE_BARRIER_WRITE);
   3883 
   3884 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3885 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3886 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3887 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3888 		delay(250);
   3889 	}
   3890 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3891 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3892 		uint32_t rctl;
   3893 
   3894 		/* RX */
   3895 		printf("%s: Need RX flush (reg = %08x)\n",
   3896 		    device_xname(sc->sc_dev), preg);
   3897 		rctl = CSR_READ(sc, WMREG_RCTL);
   3898 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3899 		CSR_WRITE_FLUSH(sc);
   3900 		delay(150);
   3901 
   3902 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3903 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3904 		reg &= 0xffffc000;
   3905 		/*
   3906 		 * update thresholds: prefetch threshold to 31, host threshold
   3907 		 * to 1 and make sure the granularity is "descriptors" and not
   3908 		 * "cache lines"
   3909 		 */
   3910 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   3911 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   3912 
   3913 		/*
   3914 		 * momentarily enable the RX ring for the changes to take
   3915 		 * effect
   3916 		 */
   3917 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   3918 		CSR_WRITE_FLUSH(sc);
   3919 		delay(150);
   3920 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3921 	}
   3922 }
   3923 
   3924 /*
   3925  * wm_reset:
   3926  *
   3927  *	Reset the i82542 chip.
   3928  */
   3929 static void
   3930 wm_reset(struct wm_softc *sc)
   3931 {
   3932 	int phy_reset = 0;
   3933 	int i, error = 0;
   3934 	uint32_t reg;
   3935 
   3936 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3937 		device_xname(sc->sc_dev), __func__));
   3938 	KASSERT(sc->sc_type != 0);
   3939 
   3940 	/*
   3941 	 * Allocate on-chip memory according to the MTU size.
   3942 	 * The Packet Buffer Allocation register must be written
   3943 	 * before the chip is reset.
   3944 	 */
   3945 	switch (sc->sc_type) {
   3946 	case WM_T_82547:
   3947 	case WM_T_82547_2:
   3948 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3949 		    PBA_22K : PBA_30K;
   3950 		for (i = 0; i < sc->sc_nqueues; i++) {
   3951 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3952 			txq->txq_fifo_head = 0;
   3953 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3954 			txq->txq_fifo_size =
   3955 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3956 			txq->txq_fifo_stall = 0;
   3957 		}
   3958 		break;
   3959 	case WM_T_82571:
   3960 	case WM_T_82572:
   3961 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3962 	case WM_T_80003:
   3963 		sc->sc_pba = PBA_32K;
   3964 		break;
   3965 	case WM_T_82573:
   3966 		sc->sc_pba = PBA_12K;
   3967 		break;
   3968 	case WM_T_82574:
   3969 	case WM_T_82583:
   3970 		sc->sc_pba = PBA_20K;
   3971 		break;
   3972 	case WM_T_82576:
   3973 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3974 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3975 		break;
   3976 	case WM_T_82580:
   3977 	case WM_T_I350:
   3978 	case WM_T_I354:
   3979 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3980 		break;
   3981 	case WM_T_I210:
   3982 	case WM_T_I211:
   3983 		sc->sc_pba = PBA_34K;
   3984 		break;
   3985 	case WM_T_ICH8:
   3986 		/* Workaround for a bit corruption issue in FIFO memory */
   3987 		sc->sc_pba = PBA_8K;
   3988 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3989 		break;
   3990 	case WM_T_ICH9:
   3991 	case WM_T_ICH10:
   3992 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3993 		    PBA_14K : PBA_10K;
   3994 		break;
   3995 	case WM_T_PCH:
   3996 	case WM_T_PCH2:
   3997 	case WM_T_PCH_LPT:
   3998 	case WM_T_PCH_SPT:
   3999 		sc->sc_pba = PBA_26K;
   4000 		break;
   4001 	default:
   4002 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4003 		    PBA_40K : PBA_48K;
   4004 		break;
   4005 	}
   4006 	/*
   4007 	 * Only old or non-multiqueue devices have the PBA register
   4008 	 * XXX Need special handling for 82575.
   4009 	 */
   4010 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4011 	    || (sc->sc_type == WM_T_82575))
   4012 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4013 
   4014 	/* Prevent the PCI-E bus from sticking */
   4015 	if (sc->sc_flags & WM_F_PCIE) {
   4016 		int timeout = 800;
   4017 
   4018 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4019 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4020 
   4021 		while (timeout--) {
   4022 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4023 			    == 0)
   4024 				break;
   4025 			delay(100);
   4026 		}
   4027 	}
   4028 
   4029 	/* Set the completion timeout for interface */
   4030 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4031 	    || (sc->sc_type == WM_T_82580)
   4032 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4033 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4034 		wm_set_pcie_completion_timeout(sc);
   4035 
   4036 	/* Clear interrupt */
   4037 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4038 	if (sc->sc_nintrs > 1) {
   4039 		if (sc->sc_type != WM_T_82574) {
   4040 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4041 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4042 		} else {
   4043 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4044 		}
   4045 	}
   4046 
   4047 	/* Stop the transmit and receive processes. */
   4048 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4049 	sc->sc_rctl &= ~RCTL_EN;
   4050 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4051 	CSR_WRITE_FLUSH(sc);
   4052 
   4053 	/* XXX set_tbi_sbp_82543() */
   4054 
   4055 	delay(10*1000);
   4056 
   4057 	/* Must acquire the MDIO ownership before MAC reset */
   4058 	switch (sc->sc_type) {
   4059 	case WM_T_82573:
   4060 	case WM_T_82574:
   4061 	case WM_T_82583:
   4062 		error = wm_get_hw_semaphore_82573(sc);
   4063 		break;
   4064 	default:
   4065 		break;
   4066 	}
   4067 
   4068 	/*
   4069 	 * 82541 Errata 29? & 82547 Errata 28?
   4070 	 * See also the description about PHY_RST bit in CTRL register
   4071 	 * in 8254x_GBe_SDM.pdf.
   4072 	 */
   4073 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4074 		CSR_WRITE(sc, WMREG_CTRL,
   4075 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4076 		CSR_WRITE_FLUSH(sc);
   4077 		delay(5000);
   4078 	}
   4079 
   4080 	switch (sc->sc_type) {
   4081 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4082 	case WM_T_82541:
   4083 	case WM_T_82541_2:
   4084 	case WM_T_82547:
   4085 	case WM_T_82547_2:
   4086 		/*
   4087 		 * On some chipsets, a reset through a memory-mapped write
   4088 		 * cycle can cause the chip to reset before completing the
   4089 		 * write cycle.  This causes major headache that can be
   4090 		 * avoided by issuing the reset via indirect register writes
   4091 		 * through I/O space.
   4092 		 *
   4093 		 * So, if we successfully mapped the I/O BAR at attach time,
   4094 		 * use that.  Otherwise, try our luck with a memory-mapped
   4095 		 * reset.
   4096 		 */
   4097 		if (sc->sc_flags & WM_F_IOH_VALID)
   4098 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4099 		else
   4100 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4101 		break;
   4102 	case WM_T_82545_3:
   4103 	case WM_T_82546_3:
   4104 		/* Use the shadow control register on these chips. */
   4105 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4106 		break;
   4107 	case WM_T_80003:
   4108 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4109 		sc->phy.acquire(sc);
   4110 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4111 		sc->phy.release(sc);
   4112 		break;
   4113 	case WM_T_ICH8:
   4114 	case WM_T_ICH9:
   4115 	case WM_T_ICH10:
   4116 	case WM_T_PCH:
   4117 	case WM_T_PCH2:
   4118 	case WM_T_PCH_LPT:
   4119 	case WM_T_PCH_SPT:
   4120 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4121 		if (wm_phy_resetisblocked(sc) == false) {
   4122 			/*
   4123 			 * Gate automatic PHY configuration by hardware on
   4124 			 * non-managed 82579
   4125 			 */
   4126 			if ((sc->sc_type == WM_T_PCH2)
   4127 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4128 				== 0))
   4129 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4130 
   4131 			reg |= CTRL_PHY_RESET;
   4132 			phy_reset = 1;
   4133 		} else
   4134 			printf("XXX reset is blocked!!!\n");
   4135 		sc->phy.acquire(sc);
   4136 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4137 		/* Don't insert a completion barrier when reset */
   4138 		delay(20*1000);
   4139 		mutex_exit(sc->sc_ich_phymtx);
   4140 		break;
   4141 	case WM_T_82580:
   4142 	case WM_T_I350:
   4143 	case WM_T_I354:
   4144 	case WM_T_I210:
   4145 	case WM_T_I211:
   4146 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4147 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4148 			CSR_WRITE_FLUSH(sc);
   4149 		delay(5000);
   4150 		break;
   4151 	case WM_T_82542_2_0:
   4152 	case WM_T_82542_2_1:
   4153 	case WM_T_82543:
   4154 	case WM_T_82540:
   4155 	case WM_T_82545:
   4156 	case WM_T_82546:
   4157 	case WM_T_82571:
   4158 	case WM_T_82572:
   4159 	case WM_T_82573:
   4160 	case WM_T_82574:
   4161 	case WM_T_82575:
   4162 	case WM_T_82576:
   4163 	case WM_T_82583:
   4164 	default:
   4165 		/* Everything else can safely use the documented method. */
   4166 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4167 		break;
   4168 	}
   4169 
   4170 	/* Must release the MDIO ownership after MAC reset */
   4171 	switch (sc->sc_type) {
   4172 	case WM_T_82573:
   4173 	case WM_T_82574:
   4174 	case WM_T_82583:
   4175 		if (error == 0)
   4176 			wm_put_hw_semaphore_82573(sc);
   4177 		break;
   4178 	default:
   4179 		break;
   4180 	}
   4181 
   4182 	if (phy_reset != 0)
   4183 		wm_get_cfg_done(sc);
   4184 
   4185 	/* reload EEPROM */
   4186 	switch (sc->sc_type) {
   4187 	case WM_T_82542_2_0:
   4188 	case WM_T_82542_2_1:
   4189 	case WM_T_82543:
   4190 	case WM_T_82544:
   4191 		delay(10);
   4192 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4193 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4194 		CSR_WRITE_FLUSH(sc);
   4195 		delay(2000);
   4196 		break;
   4197 	case WM_T_82540:
   4198 	case WM_T_82545:
   4199 	case WM_T_82545_3:
   4200 	case WM_T_82546:
   4201 	case WM_T_82546_3:
   4202 		delay(5*1000);
   4203 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4204 		break;
   4205 	case WM_T_82541:
   4206 	case WM_T_82541_2:
   4207 	case WM_T_82547:
   4208 	case WM_T_82547_2:
   4209 		delay(20000);
   4210 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4211 		break;
   4212 	case WM_T_82571:
   4213 	case WM_T_82572:
   4214 	case WM_T_82573:
   4215 	case WM_T_82574:
   4216 	case WM_T_82583:
   4217 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4218 			delay(10);
   4219 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4220 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4221 			CSR_WRITE_FLUSH(sc);
   4222 		}
   4223 		/* check EECD_EE_AUTORD */
   4224 		wm_get_auto_rd_done(sc);
   4225 		/*
   4226 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4227 		 * is set.
   4228 		 */
   4229 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4230 		    || (sc->sc_type == WM_T_82583))
   4231 			delay(25*1000);
   4232 		break;
   4233 	case WM_T_82575:
   4234 	case WM_T_82576:
   4235 	case WM_T_82580:
   4236 	case WM_T_I350:
   4237 	case WM_T_I354:
   4238 	case WM_T_I210:
   4239 	case WM_T_I211:
   4240 	case WM_T_80003:
   4241 		/* check EECD_EE_AUTORD */
   4242 		wm_get_auto_rd_done(sc);
   4243 		break;
   4244 	case WM_T_ICH8:
   4245 	case WM_T_ICH9:
   4246 	case WM_T_ICH10:
   4247 	case WM_T_PCH:
   4248 	case WM_T_PCH2:
   4249 	case WM_T_PCH_LPT:
   4250 	case WM_T_PCH_SPT:
   4251 		break;
   4252 	default:
   4253 		panic("%s: unknown type\n", __func__);
   4254 	}
   4255 
   4256 	/* Check whether EEPROM is present or not */
   4257 	switch (sc->sc_type) {
   4258 	case WM_T_82575:
   4259 	case WM_T_82576:
   4260 	case WM_T_82580:
   4261 	case WM_T_I350:
   4262 	case WM_T_I354:
   4263 	case WM_T_ICH8:
   4264 	case WM_T_ICH9:
   4265 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4266 			/* Not found */
   4267 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4268 			if (sc->sc_type == WM_T_82575)
   4269 				wm_reset_init_script_82575(sc);
   4270 		}
   4271 		break;
   4272 	default:
   4273 		break;
   4274 	}
   4275 
   4276 	if ((sc->sc_type == WM_T_82580)
   4277 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4278 		/* clear global device reset status bit */
   4279 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4280 	}
   4281 
   4282 	/* Clear any pending interrupt events. */
   4283 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4284 	reg = CSR_READ(sc, WMREG_ICR);
   4285 	if (sc->sc_nintrs > 1) {
   4286 		if (sc->sc_type != WM_T_82574) {
   4287 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4288 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4289 		} else
   4290 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4291 	}
   4292 
   4293 	/* reload sc_ctrl */
   4294 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4295 
   4296 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4297 		wm_set_eee_i350(sc);
   4298 
   4299 	/* Clear the host wakeup bit after lcd reset */
   4300 	if (sc->sc_type >= WM_T_PCH) {
   4301 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4302 		    BM_PORT_GEN_CFG);
   4303 		reg &= ~BM_WUC_HOST_WU_BIT;
   4304 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4305 		    BM_PORT_GEN_CFG, reg);
   4306 	}
   4307 
   4308 	/*
   4309 	 * For PCH, this write will make sure that any noise will be detected
   4310 	 * as a CRC error and be dropped rather than show up as a bad packet
   4311 	 * to the DMA engine
   4312 	 */
   4313 	if (sc->sc_type == WM_T_PCH)
   4314 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4315 
   4316 	if (sc->sc_type >= WM_T_82544)
   4317 		CSR_WRITE(sc, WMREG_WUC, 0);
   4318 
   4319 	wm_reset_mdicnfg_82580(sc);
   4320 
   4321 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4322 		wm_pll_workaround_i210(sc);
   4323 }
   4324 
   4325 /*
   4326  * wm_add_rxbuf:
   4327  *
   4328  *	Add a receive buffer to the indiciated descriptor.
   4329  */
   4330 static int
   4331 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4332 {
   4333 	struct wm_softc *sc = rxq->rxq_sc;
   4334 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4335 	struct mbuf *m;
   4336 	int error;
   4337 
   4338 	KASSERT(mutex_owned(rxq->rxq_lock));
   4339 
   4340 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4341 	if (m == NULL)
   4342 		return ENOBUFS;
   4343 
   4344 	MCLGET(m, M_DONTWAIT);
   4345 	if ((m->m_flags & M_EXT) == 0) {
   4346 		m_freem(m);
   4347 		return ENOBUFS;
   4348 	}
   4349 
   4350 	if (rxs->rxs_mbuf != NULL)
   4351 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4352 
   4353 	rxs->rxs_mbuf = m;
   4354 
   4355 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4356 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4357 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4358 	if (error) {
   4359 		/* XXX XXX XXX */
   4360 		aprint_error_dev(sc->sc_dev,
   4361 		    "unable to load rx DMA map %d, error = %d\n",
   4362 		    idx, error);
   4363 		panic("wm_add_rxbuf");
   4364 	}
   4365 
   4366 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4367 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4368 
   4369 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4370 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4371 			wm_init_rxdesc(rxq, idx);
   4372 	} else
   4373 		wm_init_rxdesc(rxq, idx);
   4374 
   4375 	return 0;
   4376 }
   4377 
   4378 /*
   4379  * wm_rxdrain:
   4380  *
   4381  *	Drain the receive queue.
   4382  */
   4383 static void
   4384 wm_rxdrain(struct wm_rxqueue *rxq)
   4385 {
   4386 	struct wm_softc *sc = rxq->rxq_sc;
   4387 	struct wm_rxsoft *rxs;
   4388 	int i;
   4389 
   4390 	KASSERT(mutex_owned(rxq->rxq_lock));
   4391 
   4392 	for (i = 0; i < WM_NRXDESC; i++) {
   4393 		rxs = &rxq->rxq_soft[i];
   4394 		if (rxs->rxs_mbuf != NULL) {
   4395 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4396 			m_freem(rxs->rxs_mbuf);
   4397 			rxs->rxs_mbuf = NULL;
   4398 		}
   4399 	}
   4400 }
   4401 
   4402 
   4403 /*
   4404  * XXX copy from FreeBSD's sys/net/rss_config.c
   4405  */
   4406 /*
   4407  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4408  * effectiveness may be limited by algorithm choice and available entropy
   4409  * during the boot.
   4410  *
   4411  * XXXRW: And that we don't randomize it yet!
   4412  *
   4413  * This is the default Microsoft RSS specification key which is also
   4414  * the Chelsio T5 firmware default key.
   4415  */
   4416 #define RSS_KEYSIZE 40
   4417 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4418 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4419 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4420 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4421 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4422 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4423 };
   4424 
   4425 /*
   4426  * Caller must pass an array of size sizeof(rss_key).
   4427  *
   4428  * XXX
   4429  * As if_ixgbe may use this function, this function should not be
   4430  * if_wm specific function.
   4431  */
   4432 static void
   4433 wm_rss_getkey(uint8_t *key)
   4434 {
   4435 
   4436 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4437 }
   4438 
   4439 /*
   4440  * Setup registers for RSS.
   4441  *
   4442  * XXX not yet VMDq support
   4443  */
   4444 static void
   4445 wm_init_rss(struct wm_softc *sc)
   4446 {
   4447 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4448 	int i;
   4449 
   4450 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4451 
   4452 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4453 		int qid, reta_ent;
   4454 
   4455 		qid  = i % sc->sc_nqueues;
   4456 		switch(sc->sc_type) {
   4457 		case WM_T_82574:
   4458 			reta_ent = __SHIFTIN(qid,
   4459 			    RETA_ENT_QINDEX_MASK_82574);
   4460 			break;
   4461 		case WM_T_82575:
   4462 			reta_ent = __SHIFTIN(qid,
   4463 			    RETA_ENT_QINDEX1_MASK_82575);
   4464 			break;
   4465 		default:
   4466 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4467 			break;
   4468 		}
   4469 
   4470 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4471 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4472 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4473 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4474 	}
   4475 
   4476 	wm_rss_getkey((uint8_t *)rss_key);
   4477 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4478 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4479 
   4480 	if (sc->sc_type == WM_T_82574)
   4481 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4482 	else
   4483 		mrqc = MRQC_ENABLE_RSS_MQ;
   4484 
   4485 	/*
   4486 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4487 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4488 	 */
   4489 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4490 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4491 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4492 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4493 
   4494 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4495 }
   4496 
   4497 /*
   4498  * Adjust TX and RX queue numbers which the system actulally uses.
   4499  *
   4500  * The numbers are affected by below parameters.
   4501  *     - The nubmer of hardware queues
   4502  *     - The number of MSI-X vectors (= "nvectors" argument)
   4503  *     - ncpu
   4504  */
   4505 static void
   4506 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4507 {
   4508 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4509 
   4510 	if (nvectors < 2) {
   4511 		sc->sc_nqueues = 1;
   4512 		return;
   4513 	}
   4514 
   4515 	switch(sc->sc_type) {
   4516 	case WM_T_82572:
   4517 		hw_ntxqueues = 2;
   4518 		hw_nrxqueues = 2;
   4519 		break;
   4520 	case WM_T_82574:
   4521 		hw_ntxqueues = 2;
   4522 		hw_nrxqueues = 2;
   4523 		break;
   4524 	case WM_T_82575:
   4525 		hw_ntxqueues = 4;
   4526 		hw_nrxqueues = 4;
   4527 		break;
   4528 	case WM_T_82576:
   4529 		hw_ntxqueues = 16;
   4530 		hw_nrxqueues = 16;
   4531 		break;
   4532 	case WM_T_82580:
   4533 	case WM_T_I350:
   4534 	case WM_T_I354:
   4535 		hw_ntxqueues = 8;
   4536 		hw_nrxqueues = 8;
   4537 		break;
   4538 	case WM_T_I210:
   4539 		hw_ntxqueues = 4;
   4540 		hw_nrxqueues = 4;
   4541 		break;
   4542 	case WM_T_I211:
   4543 		hw_ntxqueues = 2;
   4544 		hw_nrxqueues = 2;
   4545 		break;
   4546 		/*
   4547 		 * As below ethernet controllers does not support MSI-X,
   4548 		 * this driver let them not use multiqueue.
   4549 		 *     - WM_T_80003
   4550 		 *     - WM_T_ICH8
   4551 		 *     - WM_T_ICH9
   4552 		 *     - WM_T_ICH10
   4553 		 *     - WM_T_PCH
   4554 		 *     - WM_T_PCH2
   4555 		 *     - WM_T_PCH_LPT
   4556 		 */
   4557 	default:
   4558 		hw_ntxqueues = 1;
   4559 		hw_nrxqueues = 1;
   4560 		break;
   4561 	}
   4562 
   4563 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4564 
   4565 	/*
   4566 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4567 	 * the number of queues used actually.
   4568 	 */
   4569 	if (nvectors < hw_nqueues + 1) {
   4570 		sc->sc_nqueues = nvectors - 1;
   4571 	} else {
   4572 		sc->sc_nqueues = hw_nqueues;
   4573 	}
   4574 
   4575 	/*
   4576 	 * As queues more then cpus cannot improve scaling, we limit
   4577 	 * the number of queues used actually.
   4578 	 */
   4579 	if (ncpu < sc->sc_nqueues)
   4580 		sc->sc_nqueues = ncpu;
   4581 }
   4582 
   4583 static int
   4584 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4585 {
   4586 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4587 	wmq->wmq_id = qidx;
   4588 	wmq->wmq_intr_idx = intr_idx;
   4589 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4590 #ifdef WM_MPSAFE
   4591 	    | SOFTINT_MPSAFE
   4592 #endif
   4593 	    , wm_handle_queue, wmq);
   4594 	if (wmq->wmq_si != NULL)
   4595 		return 0;
   4596 
   4597 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4598 	    wmq->wmq_id);
   4599 
   4600 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4601 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4602 	return ENOMEM;
   4603 }
   4604 
   4605 /*
   4606  * Both single interrupt MSI and INTx can use this function.
   4607  */
   4608 static int
   4609 wm_setup_legacy(struct wm_softc *sc)
   4610 {
   4611 	pci_chipset_tag_t pc = sc->sc_pc;
   4612 	const char *intrstr = NULL;
   4613 	char intrbuf[PCI_INTRSTR_LEN];
   4614 	int error;
   4615 
   4616 	error = wm_alloc_txrx_queues(sc);
   4617 	if (error) {
   4618 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4619 		    error);
   4620 		return ENOMEM;
   4621 	}
   4622 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4623 	    sizeof(intrbuf));
   4624 #ifdef WM_MPSAFE
   4625 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4626 #endif
   4627 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4628 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4629 	if (sc->sc_ihs[0] == NULL) {
   4630 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4631 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4632 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4633 		return ENOMEM;
   4634 	}
   4635 
   4636 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4637 	sc->sc_nintrs = 1;
   4638 
   4639 	return wm_softint_establish(sc, 0, 0);
   4640 }
   4641 
   4642 static int
   4643 wm_setup_msix(struct wm_softc *sc)
   4644 {
   4645 	void *vih;
   4646 	kcpuset_t *affinity;
   4647 	int qidx, error, intr_idx, txrx_established;
   4648 	pci_chipset_tag_t pc = sc->sc_pc;
   4649 	const char *intrstr = NULL;
   4650 	char intrbuf[PCI_INTRSTR_LEN];
   4651 	char intr_xname[INTRDEVNAMEBUF];
   4652 
   4653 	if (sc->sc_nqueues < ncpu) {
   4654 		/*
   4655 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4656 		 * interrupts start from CPU#1.
   4657 		 */
   4658 		sc->sc_affinity_offset = 1;
   4659 	} else {
   4660 		/*
   4661 		 * In this case, this device use all CPUs. So, we unify
   4662 		 * affinitied cpu_index to msix vector number for readability.
   4663 		 */
   4664 		sc->sc_affinity_offset = 0;
   4665 	}
   4666 
   4667 	error = wm_alloc_txrx_queues(sc);
   4668 	if (error) {
   4669 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4670 		    error);
   4671 		return ENOMEM;
   4672 	}
   4673 
   4674 	kcpuset_create(&affinity, false);
   4675 	intr_idx = 0;
   4676 
   4677 	/*
   4678 	 * TX and RX
   4679 	 */
   4680 	txrx_established = 0;
   4681 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4682 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4683 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4684 
   4685 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4686 		    sizeof(intrbuf));
   4687 #ifdef WM_MPSAFE
   4688 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4689 		    PCI_INTR_MPSAFE, true);
   4690 #endif
   4691 		memset(intr_xname, 0, sizeof(intr_xname));
   4692 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4693 		    device_xname(sc->sc_dev), qidx);
   4694 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4695 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4696 		if (vih == NULL) {
   4697 			aprint_error_dev(sc->sc_dev,
   4698 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4699 			    intrstr ? " at " : "",
   4700 			    intrstr ? intrstr : "");
   4701 
   4702 			goto fail;
   4703 		}
   4704 		kcpuset_zero(affinity);
   4705 		/* Round-robin affinity */
   4706 		kcpuset_set(affinity, affinity_to);
   4707 		error = interrupt_distribute(vih, affinity, NULL);
   4708 		if (error == 0) {
   4709 			aprint_normal_dev(sc->sc_dev,
   4710 			    "for TX and RX interrupting at %s affinity to %u\n",
   4711 			    intrstr, affinity_to);
   4712 		} else {
   4713 			aprint_normal_dev(sc->sc_dev,
   4714 			    "for TX and RX interrupting at %s\n", intrstr);
   4715 		}
   4716 		sc->sc_ihs[intr_idx] = vih;
   4717 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   4718 			goto fail;
   4719 		txrx_established++;
   4720 		intr_idx++;
   4721 	}
   4722 
   4723 	/*
   4724 	 * LINK
   4725 	 */
   4726 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4727 	    sizeof(intrbuf));
   4728 #ifdef WM_MPSAFE
   4729 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4730 #endif
   4731 	memset(intr_xname, 0, sizeof(intr_xname));
   4732 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4733 	    device_xname(sc->sc_dev));
   4734 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4735 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4736 	if (vih == NULL) {
   4737 		aprint_error_dev(sc->sc_dev,
   4738 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4739 		    intrstr ? " at " : "",
   4740 		    intrstr ? intrstr : "");
   4741 
   4742 		goto fail;
   4743 	}
   4744 	/* keep default affinity to LINK interrupt */
   4745 	aprint_normal_dev(sc->sc_dev,
   4746 	    "for LINK interrupting at %s\n", intrstr);
   4747 	sc->sc_ihs[intr_idx] = vih;
   4748 	sc->sc_link_intr_idx = intr_idx;
   4749 
   4750 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4751 	kcpuset_destroy(affinity);
   4752 	return 0;
   4753 
   4754  fail:
   4755 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4756 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4757 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4758 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4759 	}
   4760 
   4761 	kcpuset_destroy(affinity);
   4762 	return ENOMEM;
   4763 }
   4764 
   4765 static void
   4766 wm_turnon(struct wm_softc *sc)
   4767 {
   4768 	int i;
   4769 
   4770 	KASSERT(WM_CORE_LOCKED(sc));
   4771 
   4772 	/*
   4773 	 * must unset stopping flags in ascending order.
   4774 	 */
   4775 	for(i = 0; i < sc->sc_nqueues; i++) {
   4776 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4777 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4778 
   4779 		mutex_enter(txq->txq_lock);
   4780 		txq->txq_stopping = false;
   4781 		mutex_exit(txq->txq_lock);
   4782 
   4783 		mutex_enter(rxq->rxq_lock);
   4784 		rxq->rxq_stopping = false;
   4785 		mutex_exit(rxq->rxq_lock);
   4786 	}
   4787 
   4788 	sc->sc_core_stopping = false;
   4789 }
   4790 
   4791 static void
   4792 wm_turnoff(struct wm_softc *sc)
   4793 {
   4794 	int i;
   4795 
   4796 	KASSERT(WM_CORE_LOCKED(sc));
   4797 
   4798 	sc->sc_core_stopping = true;
   4799 
   4800 	/*
   4801 	 * must set stopping flags in ascending order.
   4802 	 */
   4803 	for(i = 0; i < sc->sc_nqueues; i++) {
   4804 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4805 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4806 
   4807 		mutex_enter(rxq->rxq_lock);
   4808 		rxq->rxq_stopping = true;
   4809 		mutex_exit(rxq->rxq_lock);
   4810 
   4811 		mutex_enter(txq->txq_lock);
   4812 		txq->txq_stopping = true;
   4813 		mutex_exit(txq->txq_lock);
   4814 	}
   4815 }
   4816 
   4817 /*
   4818  * write interrupt interval value to ITR or EITR
   4819  */
   4820 static void
   4821 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   4822 {
   4823 
   4824 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4825 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   4826 
   4827 		/*
   4828 		 * 82575 doesn't have CNT_INGR field.
   4829 		 * So, overwrite counter field by software.
   4830 		 */
   4831 		if (sc->sc_type == WM_T_82575)
   4832 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   4833 		else
   4834 			eitr |= EITR_CNT_INGR;
   4835 
   4836 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   4837 	} else if (sc->sc_type == WM_T_82574 && sc->sc_nintrs > 1) {
   4838 		/*
   4839 		 * 82574 has both ITR and EITR. SET EITR when we use
   4840 		 * the multi queue function with MSI-X.
   4841 		 */
   4842 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   4843 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   4844 	} else {
   4845 		KASSERT(wmq->wmq_id == 0);
   4846 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   4847 	}
   4848 }
   4849 
   4850 /*
   4851  * wm_init:		[ifnet interface function]
   4852  *
   4853  *	Initialize the interface.
   4854  */
   4855 static int
   4856 wm_init(struct ifnet *ifp)
   4857 {
   4858 	struct wm_softc *sc = ifp->if_softc;
   4859 	int ret;
   4860 
   4861 	WM_CORE_LOCK(sc);
   4862 	ret = wm_init_locked(ifp);
   4863 	WM_CORE_UNLOCK(sc);
   4864 
   4865 	return ret;
   4866 }
   4867 
   4868 static int
   4869 wm_init_locked(struct ifnet *ifp)
   4870 {
   4871 	struct wm_softc *sc = ifp->if_softc;
   4872 	int i, j, trynum, error = 0;
   4873 	uint32_t reg;
   4874 
   4875 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4876 		device_xname(sc->sc_dev), __func__));
   4877 	KASSERT(WM_CORE_LOCKED(sc));
   4878 
   4879 	/*
   4880 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4881 	 * There is a small but measurable benefit to avoiding the adjusment
   4882 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4883 	 * on such platforms.  One possibility is that the DMA itself is
   4884 	 * slightly more efficient if the front of the entire packet (instead
   4885 	 * of the front of the headers) is aligned.
   4886 	 *
   4887 	 * Note we must always set align_tweak to 0 if we are using
   4888 	 * jumbo frames.
   4889 	 */
   4890 #ifdef __NO_STRICT_ALIGNMENT
   4891 	sc->sc_align_tweak = 0;
   4892 #else
   4893 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4894 		sc->sc_align_tweak = 0;
   4895 	else
   4896 		sc->sc_align_tweak = 2;
   4897 #endif /* __NO_STRICT_ALIGNMENT */
   4898 
   4899 	/* Cancel any pending I/O. */
   4900 	wm_stop_locked(ifp, 0);
   4901 
   4902 	/* update statistics before reset */
   4903 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4904 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4905 
   4906 	/* PCH_SPT hardware workaround */
   4907 	if (sc->sc_type == WM_T_PCH_SPT)
   4908 		wm_flush_desc_rings(sc);
   4909 
   4910 	/* Reset the chip to a known state. */
   4911 	wm_reset(sc);
   4912 
   4913 	/* AMT based hardware can now take control from firmware */
   4914 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4915 		wm_get_hw_control(sc);
   4916 
   4917 	/* Init hardware bits */
   4918 	wm_initialize_hardware_bits(sc);
   4919 
   4920 	/* Reset the PHY. */
   4921 	if (sc->sc_flags & WM_F_HAS_MII)
   4922 		wm_gmii_reset(sc);
   4923 
   4924 	/* Calculate (E)ITR value */
   4925 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   4926 		/*
   4927 		 * For NEWQUEUE's EITR (except for 82575).
   4928 		 * 82575's EITR should be set same throttling value as other
   4929 		 * old controllers' ITR because the interrupt/sec calculation
   4930 		 * is the same, that is, 1,000,000,000 / (N * 256).
   4931 		 *
   4932 		 * 82574's EITR should be set same throttling value as ITR.
   4933 		 *
   4934 		 * For N interrupts/sec, set this value to:
   4935 		 * 1,000,000 / N in contrast to ITR throttoling value.
   4936 		 */
   4937 		sc->sc_itr_init = 450;
   4938 	} else if (sc->sc_type >= WM_T_82543) {
   4939 		/*
   4940 		 * Set up the interrupt throttling register (units of 256ns)
   4941 		 * Note that a footnote in Intel's documentation says this
   4942 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4943 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4944 		 * that that is also true for the 1024ns units of the other
   4945 		 * interrupt-related timer registers -- so, really, we ought
   4946 		 * to divide this value by 4 when the link speed is low.
   4947 		 *
   4948 		 * XXX implement this division at link speed change!
   4949 		 */
   4950 
   4951 		/*
   4952 		 * For N interrupts/sec, set this value to:
   4953 		 * 1,000,000,000 / (N * 256).  Note that we set the
   4954 		 * absolute and packet timer values to this value
   4955 		 * divided by 4 to get "simple timer" behavior.
   4956 		 */
   4957 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   4958 	}
   4959 
   4960 	error = wm_init_txrx_queues(sc);
   4961 	if (error)
   4962 		goto out;
   4963 
   4964 	/*
   4965 	 * Clear out the VLAN table -- we don't use it (yet).
   4966 	 */
   4967 	CSR_WRITE(sc, WMREG_VET, 0);
   4968 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4969 		trynum = 10; /* Due to hw errata */
   4970 	else
   4971 		trynum = 1;
   4972 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4973 		for (j = 0; j < trynum; j++)
   4974 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4975 
   4976 	/*
   4977 	 * Set up flow-control parameters.
   4978 	 *
   4979 	 * XXX Values could probably stand some tuning.
   4980 	 */
   4981 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4982 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4983 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4984 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4985 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4986 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4987 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4988 	}
   4989 
   4990 	sc->sc_fcrtl = FCRTL_DFLT;
   4991 	if (sc->sc_type < WM_T_82543) {
   4992 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4993 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4994 	} else {
   4995 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4996 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4997 	}
   4998 
   4999 	if (sc->sc_type == WM_T_80003)
   5000 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5001 	else
   5002 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5003 
   5004 	/* Writes the control register. */
   5005 	wm_set_vlan(sc);
   5006 
   5007 	if (sc->sc_flags & WM_F_HAS_MII) {
   5008 		int val;
   5009 
   5010 		switch (sc->sc_type) {
   5011 		case WM_T_80003:
   5012 		case WM_T_ICH8:
   5013 		case WM_T_ICH9:
   5014 		case WM_T_ICH10:
   5015 		case WM_T_PCH:
   5016 		case WM_T_PCH2:
   5017 		case WM_T_PCH_LPT:
   5018 		case WM_T_PCH_SPT:
   5019 			/*
   5020 			 * Set the mac to wait the maximum time between each
   5021 			 * iteration and increase the max iterations when
   5022 			 * polling the phy; this fixes erroneous timeouts at
   5023 			 * 10Mbps.
   5024 			 */
   5025 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5026 			    0xFFFF);
   5027 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   5028 			val |= 0x3F;
   5029 			wm_kmrn_writereg(sc,
   5030 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   5031 			break;
   5032 		default:
   5033 			break;
   5034 		}
   5035 
   5036 		if (sc->sc_type == WM_T_80003) {
   5037 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   5038 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   5039 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   5040 
   5041 			/* Bypass RX and TX FIFO's */
   5042 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5043 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5044 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5045 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5046 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5047 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5048 		}
   5049 	}
   5050 #if 0
   5051 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5052 #endif
   5053 
   5054 	/* Set up checksum offload parameters. */
   5055 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5056 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5057 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5058 		reg |= RXCSUM_IPOFL;
   5059 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5060 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5061 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5062 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5063 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5064 
   5065 	/* Set up MSI-X */
   5066 	if (sc->sc_nintrs > 1) {
   5067 		uint32_t ivar;
   5068 		struct wm_queue *wmq;
   5069 		int qid, qintr_idx;
   5070 
   5071 		if (sc->sc_type == WM_T_82575) {
   5072 			/* Interrupt control */
   5073 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5074 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5075 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5076 
   5077 			/* TX and RX */
   5078 			for (i = 0; i < sc->sc_nqueues; i++) {
   5079 				wmq = &sc->sc_queue[i];
   5080 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5081 				    EITR_TX_QUEUE(wmq->wmq_id)
   5082 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5083 			}
   5084 			/* Link status */
   5085 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5086 			    EITR_OTHER);
   5087 		} else if (sc->sc_type == WM_T_82574) {
   5088 			/* Interrupt control */
   5089 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5090 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5091 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5092 
   5093 			/*
   5094 			 * workaround issue with spurious interrupts
   5095 			 * in MSI-X mode.
   5096 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5097 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5098 			 */
   5099 			reg = CSR_READ(sc, WMREG_RFCTL);
   5100 			reg |= WMREG_RFCTL_ACKDIS;
   5101 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5102 
   5103 			ivar = 0;
   5104 			/* TX and RX */
   5105 			for (i = 0; i < sc->sc_nqueues; i++) {
   5106 				wmq = &sc->sc_queue[i];
   5107 				qid = wmq->wmq_id;
   5108 				qintr_idx = wmq->wmq_intr_idx;
   5109 
   5110 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5111 				    IVAR_TX_MASK_Q_82574(qid));
   5112 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5113 				    IVAR_RX_MASK_Q_82574(qid));
   5114 			}
   5115 			/* Link status */
   5116 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5117 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5118 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5119 		} else {
   5120 			/* Interrupt control */
   5121 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5122 			    | GPIE_EIAME | GPIE_PBA);
   5123 
   5124 			switch (sc->sc_type) {
   5125 			case WM_T_82580:
   5126 			case WM_T_I350:
   5127 			case WM_T_I354:
   5128 			case WM_T_I210:
   5129 			case WM_T_I211:
   5130 				/* TX and RX */
   5131 				for (i = 0; i < sc->sc_nqueues; i++) {
   5132 					wmq = &sc->sc_queue[i];
   5133 					qid = wmq->wmq_id;
   5134 					qintr_idx = wmq->wmq_intr_idx;
   5135 
   5136 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5137 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5138 					ivar |= __SHIFTIN((qintr_idx
   5139 						| IVAR_VALID),
   5140 					    IVAR_TX_MASK_Q(qid));
   5141 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5142 					ivar |= __SHIFTIN((qintr_idx
   5143 						| IVAR_VALID),
   5144 					    IVAR_RX_MASK_Q(qid));
   5145 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5146 				}
   5147 				break;
   5148 			case WM_T_82576:
   5149 				/* TX and RX */
   5150 				for (i = 0; i < sc->sc_nqueues; i++) {
   5151 					wmq = &sc->sc_queue[i];
   5152 					qid = wmq->wmq_id;
   5153 					qintr_idx = wmq->wmq_intr_idx;
   5154 
   5155 					ivar = CSR_READ(sc,
   5156 					    WMREG_IVAR_Q_82576(qid));
   5157 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5158 					ivar |= __SHIFTIN((qintr_idx
   5159 						| IVAR_VALID),
   5160 					    IVAR_TX_MASK_Q_82576(qid));
   5161 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5162 					ivar |= __SHIFTIN((qintr_idx
   5163 						| IVAR_VALID),
   5164 					    IVAR_RX_MASK_Q_82576(qid));
   5165 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5166 					    ivar);
   5167 				}
   5168 				break;
   5169 			default:
   5170 				break;
   5171 			}
   5172 
   5173 			/* Link status */
   5174 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5175 			    IVAR_MISC_OTHER);
   5176 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5177 		}
   5178 
   5179 		if (sc->sc_nqueues > 1) {
   5180 			wm_init_rss(sc);
   5181 
   5182 			/*
   5183 			** NOTE: Receive Full-Packet Checksum Offload
   5184 			** is mutually exclusive with Multiqueue. However
   5185 			** this is not the same as TCP/IP checksums which
   5186 			** still work.
   5187 			*/
   5188 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5189 			reg |= RXCSUM_PCSD;
   5190 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5191 		}
   5192 	}
   5193 
   5194 	/* Set up the interrupt registers. */
   5195 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5196 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5197 	    ICR_RXO | ICR_RXT0;
   5198 	if (sc->sc_nintrs > 1) {
   5199 		uint32_t mask;
   5200 		struct wm_queue *wmq;
   5201 
   5202 		switch (sc->sc_type) {
   5203 		case WM_T_82574:
   5204 			mask = 0;
   5205 			for (i = 0; i < sc->sc_nqueues; i++) {
   5206 				wmq = &sc->sc_queue[i];
   5207 				mask |= ICR_TXQ(wmq->wmq_id);
   5208 				mask |= ICR_RXQ(wmq->wmq_id);
   5209 			}
   5210 			mask |= ICR_OTHER;
   5211 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5212 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5213 			break;
   5214 		default:
   5215 			if (sc->sc_type == WM_T_82575) {
   5216 				mask = 0;
   5217 				for (i = 0; i < sc->sc_nqueues; i++) {
   5218 					wmq = &sc->sc_queue[i];
   5219 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5220 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5221 				}
   5222 				mask |= EITR_OTHER;
   5223 			} else {
   5224 				mask = 0;
   5225 				for (i = 0; i < sc->sc_nqueues; i++) {
   5226 					wmq = &sc->sc_queue[i];
   5227 					mask |= 1 << wmq->wmq_intr_idx;
   5228 				}
   5229 				mask |= 1 << sc->sc_link_intr_idx;
   5230 			}
   5231 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5232 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5233 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5234 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5235 			break;
   5236 		}
   5237 	} else
   5238 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5239 
   5240 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5241 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5242 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5243 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   5244 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5245 		reg |= KABGTXD_BGSQLBIAS;
   5246 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5247 	}
   5248 
   5249 	/* Set up the inter-packet gap. */
   5250 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5251 
   5252 	if (sc->sc_type >= WM_T_82543) {
   5253 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5254 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5255 			wm_itrs_writereg(sc, wmq);
   5256 		}
   5257 		/*
   5258 		 * Link interrupts occur much less than TX
   5259 		 * interrupts and RX interrupts. So, we don't
   5260 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5261 		 * FreeBSD's if_igb.
   5262 		 */
   5263 	}
   5264 
   5265 	/* Set the VLAN ethernetype. */
   5266 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5267 
   5268 	/*
   5269 	 * Set up the transmit control register; we start out with
   5270 	 * a collision distance suitable for FDX, but update it whe
   5271 	 * we resolve the media type.
   5272 	 */
   5273 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5274 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5275 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5276 	if (sc->sc_type >= WM_T_82571)
   5277 		sc->sc_tctl |= TCTL_MULR;
   5278 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5279 
   5280 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5281 		/* Write TDT after TCTL.EN is set. See the document. */
   5282 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5283 	}
   5284 
   5285 	if (sc->sc_type == WM_T_80003) {
   5286 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5287 		reg &= ~TCTL_EXT_GCEX_MASK;
   5288 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5289 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5290 	}
   5291 
   5292 	/* Set the media. */
   5293 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5294 		goto out;
   5295 
   5296 	/* Configure for OS presence */
   5297 	wm_init_manageability(sc);
   5298 
   5299 	/*
   5300 	 * Set up the receive control register; we actually program
   5301 	 * the register when we set the receive filter.  Use multicast
   5302 	 * address offset type 0.
   5303 	 *
   5304 	 * Only the i82544 has the ability to strip the incoming
   5305 	 * CRC, so we don't enable that feature.
   5306 	 */
   5307 	sc->sc_mchash_type = 0;
   5308 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5309 	    | RCTL_MO(sc->sc_mchash_type);
   5310 
   5311 	/*
   5312 	 * 82574 use one buffer extended Rx descriptor.
   5313 	 */
   5314 	if (sc->sc_type == WM_T_82574)
   5315 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5316 
   5317 	/*
   5318 	 * The I350 has a bug where it always strips the CRC whether
   5319 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5320 	 */
   5321 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5322 	    || (sc->sc_type == WM_T_I210))
   5323 		sc->sc_rctl |= RCTL_SECRC;
   5324 
   5325 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5326 	    && (ifp->if_mtu > ETHERMTU)) {
   5327 		sc->sc_rctl |= RCTL_LPE;
   5328 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5329 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5330 	}
   5331 
   5332 	if (MCLBYTES == 2048) {
   5333 		sc->sc_rctl |= RCTL_2k;
   5334 	} else {
   5335 		if (sc->sc_type >= WM_T_82543) {
   5336 			switch (MCLBYTES) {
   5337 			case 4096:
   5338 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5339 				break;
   5340 			case 8192:
   5341 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5342 				break;
   5343 			case 16384:
   5344 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5345 				break;
   5346 			default:
   5347 				panic("wm_init: MCLBYTES %d unsupported",
   5348 				    MCLBYTES);
   5349 				break;
   5350 			}
   5351 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5352 	}
   5353 
   5354 	/* Set the receive filter. */
   5355 	wm_set_filter(sc);
   5356 
   5357 	/* Enable ECC */
   5358 	switch (sc->sc_type) {
   5359 	case WM_T_82571:
   5360 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5361 		reg |= PBA_ECC_CORR_EN;
   5362 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5363 		break;
   5364 	case WM_T_PCH_LPT:
   5365 	case WM_T_PCH_SPT:
   5366 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5367 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5368 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5369 
   5370 		sc->sc_ctrl |= CTRL_MEHE;
   5371 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5372 		break;
   5373 	default:
   5374 		break;
   5375 	}
   5376 
   5377 	/* On 575 and later set RDT only if RX enabled */
   5378 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5379 		int qidx;
   5380 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5381 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5382 			for (i = 0; i < WM_NRXDESC; i++) {
   5383 				mutex_enter(rxq->rxq_lock);
   5384 				wm_init_rxdesc(rxq, i);
   5385 				mutex_exit(rxq->rxq_lock);
   5386 
   5387 			}
   5388 		}
   5389 	}
   5390 
   5391 	wm_turnon(sc);
   5392 
   5393 	/* Start the one second link check clock. */
   5394 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5395 
   5396 	/* ...all done! */
   5397 	ifp->if_flags |= IFF_RUNNING;
   5398 	ifp->if_flags &= ~IFF_OACTIVE;
   5399 
   5400  out:
   5401 	sc->sc_if_flags = ifp->if_flags;
   5402 	if (error)
   5403 		log(LOG_ERR, "%s: interface not running\n",
   5404 		    device_xname(sc->sc_dev));
   5405 	return error;
   5406 }
   5407 
   5408 /*
   5409  * wm_stop:		[ifnet interface function]
   5410  *
   5411  *	Stop transmission on the interface.
   5412  */
   5413 static void
   5414 wm_stop(struct ifnet *ifp, int disable)
   5415 {
   5416 	struct wm_softc *sc = ifp->if_softc;
   5417 
   5418 	WM_CORE_LOCK(sc);
   5419 	wm_stop_locked(ifp, disable);
   5420 	WM_CORE_UNLOCK(sc);
   5421 }
   5422 
   5423 static void
   5424 wm_stop_locked(struct ifnet *ifp, int disable)
   5425 {
   5426 	struct wm_softc *sc = ifp->if_softc;
   5427 	struct wm_txsoft *txs;
   5428 	int i, qidx;
   5429 
   5430 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5431 		device_xname(sc->sc_dev), __func__));
   5432 	KASSERT(WM_CORE_LOCKED(sc));
   5433 
   5434 	wm_turnoff(sc);
   5435 
   5436 	/* Stop the one second clock. */
   5437 	callout_stop(&sc->sc_tick_ch);
   5438 
   5439 	/* Stop the 82547 Tx FIFO stall check timer. */
   5440 	if (sc->sc_type == WM_T_82547)
   5441 		callout_stop(&sc->sc_txfifo_ch);
   5442 
   5443 	if (sc->sc_flags & WM_F_HAS_MII) {
   5444 		/* Down the MII. */
   5445 		mii_down(&sc->sc_mii);
   5446 	} else {
   5447 #if 0
   5448 		/* Should we clear PHY's status properly? */
   5449 		wm_reset(sc);
   5450 #endif
   5451 	}
   5452 
   5453 	/* Stop the transmit and receive processes. */
   5454 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5455 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5456 	sc->sc_rctl &= ~RCTL_EN;
   5457 
   5458 	/*
   5459 	 * Clear the interrupt mask to ensure the device cannot assert its
   5460 	 * interrupt line.
   5461 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5462 	 * service any currently pending or shared interrupt.
   5463 	 */
   5464 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5465 	sc->sc_icr = 0;
   5466 	if (sc->sc_nintrs > 1) {
   5467 		if (sc->sc_type != WM_T_82574) {
   5468 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5469 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5470 		} else
   5471 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5472 	}
   5473 
   5474 	/* Release any queued transmit buffers. */
   5475 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5476 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5477 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5478 		mutex_enter(txq->txq_lock);
   5479 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5480 			txs = &txq->txq_soft[i];
   5481 			if (txs->txs_mbuf != NULL) {
   5482 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5483 				m_freem(txs->txs_mbuf);
   5484 				txs->txs_mbuf = NULL;
   5485 			}
   5486 		}
   5487 		mutex_exit(txq->txq_lock);
   5488 	}
   5489 
   5490 	/* Mark the interface as down and cancel the watchdog timer. */
   5491 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5492 	ifp->if_timer = 0;
   5493 
   5494 	if (disable) {
   5495 		for (i = 0; i < sc->sc_nqueues; i++) {
   5496 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5497 			mutex_enter(rxq->rxq_lock);
   5498 			wm_rxdrain(rxq);
   5499 			mutex_exit(rxq->rxq_lock);
   5500 		}
   5501 	}
   5502 
   5503 #if 0 /* notyet */
   5504 	if (sc->sc_type >= WM_T_82544)
   5505 		CSR_WRITE(sc, WMREG_WUC, 0);
   5506 #endif
   5507 }
   5508 
   5509 static void
   5510 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5511 {
   5512 	struct mbuf *m;
   5513 	int i;
   5514 
   5515 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5516 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5517 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5518 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5519 		    m->m_data, m->m_len, m->m_flags);
   5520 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5521 	    i, i == 1 ? "" : "s");
   5522 }
   5523 
   5524 /*
   5525  * wm_82547_txfifo_stall:
   5526  *
   5527  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5528  *	reset the FIFO pointers, and restart packet transmission.
   5529  */
   5530 static void
   5531 wm_82547_txfifo_stall(void *arg)
   5532 {
   5533 	struct wm_softc *sc = arg;
   5534 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5535 
   5536 	mutex_enter(txq->txq_lock);
   5537 
   5538 	if (txq->txq_stopping)
   5539 		goto out;
   5540 
   5541 	if (txq->txq_fifo_stall) {
   5542 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5543 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5544 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5545 			/*
   5546 			 * Packets have drained.  Stop transmitter, reset
   5547 			 * FIFO pointers, restart transmitter, and kick
   5548 			 * the packet queue.
   5549 			 */
   5550 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5551 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5552 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5553 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5554 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5555 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5556 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5557 			CSR_WRITE_FLUSH(sc);
   5558 
   5559 			txq->txq_fifo_head = 0;
   5560 			txq->txq_fifo_stall = 0;
   5561 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5562 		} else {
   5563 			/*
   5564 			 * Still waiting for packets to drain; try again in
   5565 			 * another tick.
   5566 			 */
   5567 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5568 		}
   5569 	}
   5570 
   5571 out:
   5572 	mutex_exit(txq->txq_lock);
   5573 }
   5574 
   5575 /*
   5576  * wm_82547_txfifo_bugchk:
   5577  *
   5578  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5579  *	prevent enqueueing a packet that would wrap around the end
   5580  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5581  *
   5582  *	We do this by checking the amount of space before the end
   5583  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5584  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5585  *	the internal FIFO pointers to the beginning, and restart
   5586  *	transmission on the interface.
   5587  */
   5588 #define	WM_FIFO_HDR		0x10
   5589 #define	WM_82547_PAD_LEN	0x3e0
   5590 static int
   5591 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5592 {
   5593 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5594 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5595 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5596 
   5597 	/* Just return if already stalled. */
   5598 	if (txq->txq_fifo_stall)
   5599 		return 1;
   5600 
   5601 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5602 		/* Stall only occurs in half-duplex mode. */
   5603 		goto send_packet;
   5604 	}
   5605 
   5606 	if (len >= WM_82547_PAD_LEN + space) {
   5607 		txq->txq_fifo_stall = 1;
   5608 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5609 		return 1;
   5610 	}
   5611 
   5612  send_packet:
   5613 	txq->txq_fifo_head += len;
   5614 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5615 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5616 
   5617 	return 0;
   5618 }
   5619 
   5620 static int
   5621 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5622 {
   5623 	int error;
   5624 
   5625 	/*
   5626 	 * Allocate the control data structures, and create and load the
   5627 	 * DMA map for it.
   5628 	 *
   5629 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5630 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5631 	 * both sets within the same 4G segment.
   5632 	 */
   5633 	if (sc->sc_type < WM_T_82544)
   5634 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5635 	else
   5636 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5637 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5638 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5639 	else
   5640 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5641 
   5642 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5643 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5644 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5645 		aprint_error_dev(sc->sc_dev,
   5646 		    "unable to allocate TX control data, error = %d\n",
   5647 		    error);
   5648 		goto fail_0;
   5649 	}
   5650 
   5651 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5652 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5653 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5654 		aprint_error_dev(sc->sc_dev,
   5655 		    "unable to map TX control data, error = %d\n", error);
   5656 		goto fail_1;
   5657 	}
   5658 
   5659 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5660 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5661 		aprint_error_dev(sc->sc_dev,
   5662 		    "unable to create TX control data DMA map, error = %d\n",
   5663 		    error);
   5664 		goto fail_2;
   5665 	}
   5666 
   5667 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5668 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5669 		aprint_error_dev(sc->sc_dev,
   5670 		    "unable to load TX control data DMA map, error = %d\n",
   5671 		    error);
   5672 		goto fail_3;
   5673 	}
   5674 
   5675 	return 0;
   5676 
   5677  fail_3:
   5678 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5679  fail_2:
   5680 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5681 	    WM_TXDESCS_SIZE(txq));
   5682  fail_1:
   5683 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5684  fail_0:
   5685 	return error;
   5686 }
   5687 
   5688 static void
   5689 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5690 {
   5691 
   5692 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5693 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5694 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5695 	    WM_TXDESCS_SIZE(txq));
   5696 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5697 }
   5698 
   5699 static int
   5700 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5701 {
   5702 	int error;
   5703 	size_t rxq_descs_size;
   5704 
   5705 	/*
   5706 	 * Allocate the control data structures, and create and load the
   5707 	 * DMA map for it.
   5708 	 *
   5709 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5710 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5711 	 * both sets within the same 4G segment.
   5712 	 */
   5713 	rxq->rxq_ndesc = WM_NRXDESC;
   5714 	if (sc->sc_type == WM_T_82574)
   5715 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   5716 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5717 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   5718 	else
   5719 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   5720 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   5721 
   5722 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   5723 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5724 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5725 		aprint_error_dev(sc->sc_dev,
   5726 		    "unable to allocate RX control data, error = %d\n",
   5727 		    error);
   5728 		goto fail_0;
   5729 	}
   5730 
   5731 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5732 		    rxq->rxq_desc_rseg, rxq_descs_size,
   5733 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5734 		aprint_error_dev(sc->sc_dev,
   5735 		    "unable to map RX control data, error = %d\n", error);
   5736 		goto fail_1;
   5737 	}
   5738 
   5739 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   5740 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5741 		aprint_error_dev(sc->sc_dev,
   5742 		    "unable to create RX control data DMA map, error = %d\n",
   5743 		    error);
   5744 		goto fail_2;
   5745 	}
   5746 
   5747 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5748 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   5749 		aprint_error_dev(sc->sc_dev,
   5750 		    "unable to load RX control data DMA map, error = %d\n",
   5751 		    error);
   5752 		goto fail_3;
   5753 	}
   5754 
   5755 	return 0;
   5756 
   5757  fail_3:
   5758 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5759  fail_2:
   5760 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5761 	    rxq_descs_size);
   5762  fail_1:
   5763 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5764  fail_0:
   5765 	return error;
   5766 }
   5767 
   5768 static void
   5769 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5770 {
   5771 
   5772 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5773 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5774 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5775 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   5776 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5777 }
   5778 
   5779 
   5780 static int
   5781 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5782 {
   5783 	int i, error;
   5784 
   5785 	/* Create the transmit buffer DMA maps. */
   5786 	WM_TXQUEUELEN(txq) =
   5787 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5788 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5789 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5790 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5791 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5792 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5793 			aprint_error_dev(sc->sc_dev,
   5794 			    "unable to create Tx DMA map %d, error = %d\n",
   5795 			    i, error);
   5796 			goto fail;
   5797 		}
   5798 	}
   5799 
   5800 	return 0;
   5801 
   5802  fail:
   5803 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5804 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5805 			bus_dmamap_destroy(sc->sc_dmat,
   5806 			    txq->txq_soft[i].txs_dmamap);
   5807 	}
   5808 	return error;
   5809 }
   5810 
   5811 static void
   5812 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5813 {
   5814 	int i;
   5815 
   5816 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5817 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5818 			bus_dmamap_destroy(sc->sc_dmat,
   5819 			    txq->txq_soft[i].txs_dmamap);
   5820 	}
   5821 }
   5822 
   5823 static int
   5824 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5825 {
   5826 	int i, error;
   5827 
   5828 	/* Create the receive buffer DMA maps. */
   5829 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5830 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5831 			    MCLBYTES, 0, 0,
   5832 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5833 			aprint_error_dev(sc->sc_dev,
   5834 			    "unable to create Rx DMA map %d error = %d\n",
   5835 			    i, error);
   5836 			goto fail;
   5837 		}
   5838 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5839 	}
   5840 
   5841 	return 0;
   5842 
   5843  fail:
   5844 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5845 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5846 			bus_dmamap_destroy(sc->sc_dmat,
   5847 			    rxq->rxq_soft[i].rxs_dmamap);
   5848 	}
   5849 	return error;
   5850 }
   5851 
   5852 static void
   5853 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5854 {
   5855 	int i;
   5856 
   5857 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5858 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5859 			bus_dmamap_destroy(sc->sc_dmat,
   5860 			    rxq->rxq_soft[i].rxs_dmamap);
   5861 	}
   5862 }
   5863 
   5864 /*
   5865  * wm_alloc_quques:
   5866  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5867  */
   5868 static int
   5869 wm_alloc_txrx_queues(struct wm_softc *sc)
   5870 {
   5871 	int i, error, tx_done, rx_done;
   5872 
   5873 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5874 	    KM_SLEEP);
   5875 	if (sc->sc_queue == NULL) {
   5876 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5877 		error = ENOMEM;
   5878 		goto fail_0;
   5879 	}
   5880 
   5881 	/*
   5882 	 * For transmission
   5883 	 */
   5884 	error = 0;
   5885 	tx_done = 0;
   5886 	for (i = 0; i < sc->sc_nqueues; i++) {
   5887 #ifdef WM_EVENT_COUNTERS
   5888 		int j;
   5889 		const char *xname;
   5890 #endif
   5891 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5892 		txq->txq_sc = sc;
   5893 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5894 
   5895 		error = wm_alloc_tx_descs(sc, txq);
   5896 		if (error)
   5897 			break;
   5898 		error = wm_alloc_tx_buffer(sc, txq);
   5899 		if (error) {
   5900 			wm_free_tx_descs(sc, txq);
   5901 			break;
   5902 		}
   5903 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5904 		if (txq->txq_interq == NULL) {
   5905 			wm_free_tx_descs(sc, txq);
   5906 			wm_free_tx_buffer(sc, txq);
   5907 			error = ENOMEM;
   5908 			break;
   5909 		}
   5910 
   5911 #ifdef WM_EVENT_COUNTERS
   5912 		xname = device_xname(sc->sc_dev);
   5913 
   5914 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5915 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5916 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5917 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5918 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5919 
   5920 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5921 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5922 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5923 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5924 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5925 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5926 
   5927 		for (j = 0; j < WM_NTXSEGS; j++) {
   5928 			snprintf(txq->txq_txseg_evcnt_names[j],
   5929 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   5930 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   5931 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   5932 		}
   5933 
   5934 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   5935 
   5936 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   5937 #endif /* WM_EVENT_COUNTERS */
   5938 
   5939 		tx_done++;
   5940 	}
   5941 	if (error)
   5942 		goto fail_1;
   5943 
   5944 	/*
   5945 	 * For recieve
   5946 	 */
   5947 	error = 0;
   5948 	rx_done = 0;
   5949 	for (i = 0; i < sc->sc_nqueues; i++) {
   5950 #ifdef WM_EVENT_COUNTERS
   5951 		const char *xname;
   5952 #endif
   5953 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5954 		rxq->rxq_sc = sc;
   5955 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5956 
   5957 		error = wm_alloc_rx_descs(sc, rxq);
   5958 		if (error)
   5959 			break;
   5960 
   5961 		error = wm_alloc_rx_buffer(sc, rxq);
   5962 		if (error) {
   5963 			wm_free_rx_descs(sc, rxq);
   5964 			break;
   5965 		}
   5966 
   5967 #ifdef WM_EVENT_COUNTERS
   5968 		xname = device_xname(sc->sc_dev);
   5969 
   5970 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   5971 
   5972 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   5973 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   5974 #endif /* WM_EVENT_COUNTERS */
   5975 
   5976 		rx_done++;
   5977 	}
   5978 	if (error)
   5979 		goto fail_2;
   5980 
   5981 	return 0;
   5982 
   5983  fail_2:
   5984 	for (i = 0; i < rx_done; i++) {
   5985 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5986 		wm_free_rx_buffer(sc, rxq);
   5987 		wm_free_rx_descs(sc, rxq);
   5988 		if (rxq->rxq_lock)
   5989 			mutex_obj_free(rxq->rxq_lock);
   5990 	}
   5991  fail_1:
   5992 	for (i = 0; i < tx_done; i++) {
   5993 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5994 		pcq_destroy(txq->txq_interq);
   5995 		wm_free_tx_buffer(sc, txq);
   5996 		wm_free_tx_descs(sc, txq);
   5997 		if (txq->txq_lock)
   5998 			mutex_obj_free(txq->txq_lock);
   5999 	}
   6000 
   6001 	kmem_free(sc->sc_queue,
   6002 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6003  fail_0:
   6004 	return error;
   6005 }
   6006 
   6007 /*
   6008  * wm_free_quques:
   6009  *	Free {tx,rx}descs and {tx,rx} buffers
   6010  */
   6011 static void
   6012 wm_free_txrx_queues(struct wm_softc *sc)
   6013 {
   6014 	int i;
   6015 
   6016 	for (i = 0; i < sc->sc_nqueues; i++) {
   6017 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6018 
   6019 #ifdef WM_EVENT_COUNTERS
   6020 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6021 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6022 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6023 #endif /* WM_EVENT_COUNTERS */
   6024 
   6025 		wm_free_rx_buffer(sc, rxq);
   6026 		wm_free_rx_descs(sc, rxq);
   6027 		if (rxq->rxq_lock)
   6028 			mutex_obj_free(rxq->rxq_lock);
   6029 	}
   6030 
   6031 	for (i = 0; i < sc->sc_nqueues; i++) {
   6032 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6033 		struct mbuf *m;
   6034 #ifdef WM_EVENT_COUNTERS
   6035 		int j;
   6036 
   6037 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6038 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6039 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6040 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6041 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6042 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6043 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6044 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6045 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6046 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6047 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6048 
   6049 		for (j = 0; j < WM_NTXSEGS; j++)
   6050 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6051 
   6052 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6053 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6054 #endif /* WM_EVENT_COUNTERS */
   6055 
   6056 		/* drain txq_interq */
   6057 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6058 			m_freem(m);
   6059 		pcq_destroy(txq->txq_interq);
   6060 
   6061 		wm_free_tx_buffer(sc, txq);
   6062 		wm_free_tx_descs(sc, txq);
   6063 		if (txq->txq_lock)
   6064 			mutex_obj_free(txq->txq_lock);
   6065 	}
   6066 
   6067 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6068 }
   6069 
   6070 static void
   6071 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6072 {
   6073 
   6074 	KASSERT(mutex_owned(txq->txq_lock));
   6075 
   6076 	/* Initialize the transmit descriptor ring. */
   6077 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6078 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6079 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6080 	txq->txq_free = WM_NTXDESC(txq);
   6081 	txq->txq_next = 0;
   6082 }
   6083 
   6084 static void
   6085 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6086     struct wm_txqueue *txq)
   6087 {
   6088 
   6089 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6090 		device_xname(sc->sc_dev), __func__));
   6091 	KASSERT(mutex_owned(txq->txq_lock));
   6092 
   6093 	if (sc->sc_type < WM_T_82543) {
   6094 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6095 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6096 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6097 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6098 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6099 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6100 	} else {
   6101 		int qid = wmq->wmq_id;
   6102 
   6103 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6104 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6105 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6106 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6107 
   6108 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6109 			/*
   6110 			 * Don't write TDT before TCTL.EN is set.
   6111 			 * See the document.
   6112 			 */
   6113 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6114 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6115 			    | TXDCTL_WTHRESH(0));
   6116 		else {
   6117 			/* XXX should update with AIM? */
   6118 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6119 			if (sc->sc_type >= WM_T_82540) {
   6120 				/* should be same */
   6121 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6122 			}
   6123 
   6124 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6125 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6126 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6127 		}
   6128 	}
   6129 }
   6130 
   6131 static void
   6132 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6133 {
   6134 	int i;
   6135 
   6136 	KASSERT(mutex_owned(txq->txq_lock));
   6137 
   6138 	/* Initialize the transmit job descriptors. */
   6139 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6140 		txq->txq_soft[i].txs_mbuf = NULL;
   6141 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6142 	txq->txq_snext = 0;
   6143 	txq->txq_sdirty = 0;
   6144 }
   6145 
   6146 static void
   6147 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6148     struct wm_txqueue *txq)
   6149 {
   6150 
   6151 	KASSERT(mutex_owned(txq->txq_lock));
   6152 
   6153 	/*
   6154 	 * Set up some register offsets that are different between
   6155 	 * the i82542 and the i82543 and later chips.
   6156 	 */
   6157 	if (sc->sc_type < WM_T_82543)
   6158 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6159 	else
   6160 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6161 
   6162 	wm_init_tx_descs(sc, txq);
   6163 	wm_init_tx_regs(sc, wmq, txq);
   6164 	wm_init_tx_buffer(sc, txq);
   6165 }
   6166 
   6167 static void
   6168 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6169     struct wm_rxqueue *rxq)
   6170 {
   6171 
   6172 	KASSERT(mutex_owned(rxq->rxq_lock));
   6173 
   6174 	/*
   6175 	 * Initialize the receive descriptor and receive job
   6176 	 * descriptor rings.
   6177 	 */
   6178 	if (sc->sc_type < WM_T_82543) {
   6179 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6180 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6181 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6182 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6183 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6184 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6185 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6186 
   6187 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6188 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6189 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6190 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6191 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6192 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6193 	} else {
   6194 		int qid = wmq->wmq_id;
   6195 
   6196 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6197 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6198 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6199 
   6200 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6201 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6202 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6203 
   6204 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6205 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6206 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6207 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6208 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6209 			    | RXDCTL_WTHRESH(1));
   6210 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6211 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6212 		} else {
   6213 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6214 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6215 			/* XXX should update with AIM? */
   6216 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6217 			/* MUST be same */
   6218 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6219 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6220 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6221 		}
   6222 	}
   6223 }
   6224 
   6225 static int
   6226 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6227 {
   6228 	struct wm_rxsoft *rxs;
   6229 	int error, i;
   6230 
   6231 	KASSERT(mutex_owned(rxq->rxq_lock));
   6232 
   6233 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6234 		rxs = &rxq->rxq_soft[i];
   6235 		if (rxs->rxs_mbuf == NULL) {
   6236 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6237 				log(LOG_ERR, "%s: unable to allocate or map "
   6238 				    "rx buffer %d, error = %d\n",
   6239 				    device_xname(sc->sc_dev), i, error);
   6240 				/*
   6241 				 * XXX Should attempt to run with fewer receive
   6242 				 * XXX buffers instead of just failing.
   6243 				 */
   6244 				wm_rxdrain(rxq);
   6245 				return ENOMEM;
   6246 			}
   6247 		} else {
   6248 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6249 				wm_init_rxdesc(rxq, i);
   6250 			/*
   6251 			 * For 82575 and newer device, the RX descriptors
   6252 			 * must be initialized after the setting of RCTL.EN in
   6253 			 * wm_set_filter()
   6254 			 */
   6255 		}
   6256 	}
   6257 	rxq->rxq_ptr = 0;
   6258 	rxq->rxq_discard = 0;
   6259 	WM_RXCHAIN_RESET(rxq);
   6260 
   6261 	return 0;
   6262 }
   6263 
   6264 static int
   6265 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6266     struct wm_rxqueue *rxq)
   6267 {
   6268 
   6269 	KASSERT(mutex_owned(rxq->rxq_lock));
   6270 
   6271 	/*
   6272 	 * Set up some register offsets that are different between
   6273 	 * the i82542 and the i82543 and later chips.
   6274 	 */
   6275 	if (sc->sc_type < WM_T_82543)
   6276 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6277 	else
   6278 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6279 
   6280 	wm_init_rx_regs(sc, wmq, rxq);
   6281 	return wm_init_rx_buffer(sc, rxq);
   6282 }
   6283 
   6284 /*
   6285  * wm_init_quques:
   6286  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6287  */
   6288 static int
   6289 wm_init_txrx_queues(struct wm_softc *sc)
   6290 {
   6291 	int i, error = 0;
   6292 
   6293 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6294 		device_xname(sc->sc_dev), __func__));
   6295 
   6296 	for (i = 0; i < sc->sc_nqueues; i++) {
   6297 		struct wm_queue *wmq = &sc->sc_queue[i];
   6298 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6299 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6300 
   6301 		wmq->wmq_itr = sc->sc_itr_init;
   6302 
   6303 		mutex_enter(txq->txq_lock);
   6304 		wm_init_tx_queue(sc, wmq, txq);
   6305 		mutex_exit(txq->txq_lock);
   6306 
   6307 		mutex_enter(rxq->rxq_lock);
   6308 		error = wm_init_rx_queue(sc, wmq, rxq);
   6309 		mutex_exit(rxq->rxq_lock);
   6310 		if (error)
   6311 			break;
   6312 	}
   6313 
   6314 	return error;
   6315 }
   6316 
   6317 /*
   6318  * wm_tx_offload:
   6319  *
   6320  *	Set up TCP/IP checksumming parameters for the
   6321  *	specified packet.
   6322  */
   6323 static int
   6324 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   6325     uint8_t *fieldsp)
   6326 {
   6327 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6328 	struct mbuf *m0 = txs->txs_mbuf;
   6329 	struct livengood_tcpip_ctxdesc *t;
   6330 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6331 	uint32_t ipcse;
   6332 	struct ether_header *eh;
   6333 	int offset, iphl;
   6334 	uint8_t fields;
   6335 
   6336 	/*
   6337 	 * XXX It would be nice if the mbuf pkthdr had offset
   6338 	 * fields for the protocol headers.
   6339 	 */
   6340 
   6341 	eh = mtod(m0, struct ether_header *);
   6342 	switch (htons(eh->ether_type)) {
   6343 	case ETHERTYPE_IP:
   6344 	case ETHERTYPE_IPV6:
   6345 		offset = ETHER_HDR_LEN;
   6346 		break;
   6347 
   6348 	case ETHERTYPE_VLAN:
   6349 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6350 		break;
   6351 
   6352 	default:
   6353 		/*
   6354 		 * Don't support this protocol or encapsulation.
   6355 		 */
   6356 		*fieldsp = 0;
   6357 		*cmdp = 0;
   6358 		return 0;
   6359 	}
   6360 
   6361 	if ((m0->m_pkthdr.csum_flags &
   6362 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   6363 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6364 	} else {
   6365 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6366 	}
   6367 	ipcse = offset + iphl - 1;
   6368 
   6369 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6370 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6371 	seg = 0;
   6372 	fields = 0;
   6373 
   6374 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6375 		int hlen = offset + iphl;
   6376 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6377 
   6378 		if (__predict_false(m0->m_len <
   6379 				    (hlen + sizeof(struct tcphdr)))) {
   6380 			/*
   6381 			 * TCP/IP headers are not in the first mbuf; we need
   6382 			 * to do this the slow and painful way.  Let's just
   6383 			 * hope this doesn't happen very often.
   6384 			 */
   6385 			struct tcphdr th;
   6386 
   6387 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6388 
   6389 			m_copydata(m0, hlen, sizeof(th), &th);
   6390 			if (v4) {
   6391 				struct ip ip;
   6392 
   6393 				m_copydata(m0, offset, sizeof(ip), &ip);
   6394 				ip.ip_len = 0;
   6395 				m_copyback(m0,
   6396 				    offset + offsetof(struct ip, ip_len),
   6397 				    sizeof(ip.ip_len), &ip.ip_len);
   6398 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6399 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6400 			} else {
   6401 				struct ip6_hdr ip6;
   6402 
   6403 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6404 				ip6.ip6_plen = 0;
   6405 				m_copyback(m0,
   6406 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6407 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6408 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6409 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6410 			}
   6411 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6412 			    sizeof(th.th_sum), &th.th_sum);
   6413 
   6414 			hlen += th.th_off << 2;
   6415 		} else {
   6416 			/*
   6417 			 * TCP/IP headers are in the first mbuf; we can do
   6418 			 * this the easy way.
   6419 			 */
   6420 			struct tcphdr *th;
   6421 
   6422 			if (v4) {
   6423 				struct ip *ip =
   6424 				    (void *)(mtod(m0, char *) + offset);
   6425 				th = (void *)(mtod(m0, char *) + hlen);
   6426 
   6427 				ip->ip_len = 0;
   6428 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6429 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6430 			} else {
   6431 				struct ip6_hdr *ip6 =
   6432 				    (void *)(mtod(m0, char *) + offset);
   6433 				th = (void *)(mtod(m0, char *) + hlen);
   6434 
   6435 				ip6->ip6_plen = 0;
   6436 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6437 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6438 			}
   6439 			hlen += th->th_off << 2;
   6440 		}
   6441 
   6442 		if (v4) {
   6443 			WM_Q_EVCNT_INCR(txq, txtso);
   6444 			cmdlen |= WTX_TCPIP_CMD_IP;
   6445 		} else {
   6446 			WM_Q_EVCNT_INCR(txq, txtso6);
   6447 			ipcse = 0;
   6448 		}
   6449 		cmd |= WTX_TCPIP_CMD_TSE;
   6450 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6451 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6452 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6453 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6454 	}
   6455 
   6456 	/*
   6457 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6458 	 * offload feature, if we load the context descriptor, we
   6459 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6460 	 */
   6461 
   6462 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6463 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6464 	    WTX_TCPIP_IPCSE(ipcse);
   6465 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6466 		WM_Q_EVCNT_INCR(txq, txipsum);
   6467 		fields |= WTX_IXSM;
   6468 	}
   6469 
   6470 	offset += iphl;
   6471 
   6472 	if (m0->m_pkthdr.csum_flags &
   6473 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6474 		WM_Q_EVCNT_INCR(txq, txtusum);
   6475 		fields |= WTX_TXSM;
   6476 		tucs = WTX_TCPIP_TUCSS(offset) |
   6477 		    WTX_TCPIP_TUCSO(offset +
   6478 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6479 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6480 	} else if ((m0->m_pkthdr.csum_flags &
   6481 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6482 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6483 		fields |= WTX_TXSM;
   6484 		tucs = WTX_TCPIP_TUCSS(offset) |
   6485 		    WTX_TCPIP_TUCSO(offset +
   6486 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6487 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6488 	} else {
   6489 		/* Just initialize it to a valid TCP context. */
   6490 		tucs = WTX_TCPIP_TUCSS(offset) |
   6491 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6492 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6493 	}
   6494 
   6495 	/* Fill in the context descriptor. */
   6496 	t = (struct livengood_tcpip_ctxdesc *)
   6497 	    &txq->txq_descs[txq->txq_next];
   6498 	t->tcpip_ipcs = htole32(ipcs);
   6499 	t->tcpip_tucs = htole32(tucs);
   6500 	t->tcpip_cmdlen = htole32(cmdlen);
   6501 	t->tcpip_seg = htole32(seg);
   6502 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6503 
   6504 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6505 	txs->txs_ndesc++;
   6506 
   6507 	*cmdp = cmd;
   6508 	*fieldsp = fields;
   6509 
   6510 	return 0;
   6511 }
   6512 
   6513 static inline int
   6514 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6515 {
   6516 	struct wm_softc *sc = ifp->if_softc;
   6517 	u_int cpuid = cpu_index(curcpu());
   6518 
   6519 	/*
   6520 	 * Currently, simple distribute strategy.
   6521 	 * TODO:
   6522 	 * distribute by flowid(RSS has value).
   6523 	 */
   6524         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6525 }
   6526 
   6527 /*
   6528  * wm_start:		[ifnet interface function]
   6529  *
   6530  *	Start packet transmission on the interface.
   6531  */
   6532 static void
   6533 wm_start(struct ifnet *ifp)
   6534 {
   6535 	struct wm_softc *sc = ifp->if_softc;
   6536 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6537 
   6538 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6539 
   6540 	/*
   6541 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6542 	 */
   6543 
   6544 	mutex_enter(txq->txq_lock);
   6545 	if (!txq->txq_stopping)
   6546 		wm_start_locked(ifp);
   6547 	mutex_exit(txq->txq_lock);
   6548 }
   6549 
   6550 static void
   6551 wm_start_locked(struct ifnet *ifp)
   6552 {
   6553 	struct wm_softc *sc = ifp->if_softc;
   6554 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6555 
   6556 	wm_send_common_locked(ifp, txq, false);
   6557 }
   6558 
   6559 static int
   6560 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6561 {
   6562 	int qid;
   6563 	struct wm_softc *sc = ifp->if_softc;
   6564 	struct wm_txqueue *txq;
   6565 
   6566 	qid = wm_select_txqueue(ifp, m);
   6567 	txq = &sc->sc_queue[qid].wmq_txq;
   6568 
   6569 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6570 		m_freem(m);
   6571 		WM_Q_EVCNT_INCR(txq, txdrop);
   6572 		return ENOBUFS;
   6573 	}
   6574 
   6575 	/*
   6576 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6577 	 */
   6578 	ifp->if_obytes += m->m_pkthdr.len;
   6579 	if (m->m_flags & M_MCAST)
   6580 		ifp->if_omcasts++;
   6581 
   6582 	if (mutex_tryenter(txq->txq_lock)) {
   6583 		if (!txq->txq_stopping)
   6584 			wm_transmit_locked(ifp, txq);
   6585 		mutex_exit(txq->txq_lock);
   6586 	}
   6587 
   6588 	return 0;
   6589 }
   6590 
   6591 static void
   6592 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6593 {
   6594 
   6595 	wm_send_common_locked(ifp, txq, true);
   6596 }
   6597 
   6598 static void
   6599 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6600     bool is_transmit)
   6601 {
   6602 	struct wm_softc *sc = ifp->if_softc;
   6603 	struct mbuf *m0;
   6604 	struct m_tag *mtag;
   6605 	struct wm_txsoft *txs;
   6606 	bus_dmamap_t dmamap;
   6607 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6608 	bus_addr_t curaddr;
   6609 	bus_size_t seglen, curlen;
   6610 	uint32_t cksumcmd;
   6611 	uint8_t cksumfields;
   6612 
   6613 	KASSERT(mutex_owned(txq->txq_lock));
   6614 
   6615 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   6616 		return;
   6617 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   6618 		return;
   6619 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6620 		return;
   6621 
   6622 	/* Remember the previous number of free descriptors. */
   6623 	ofree = txq->txq_free;
   6624 
   6625 	/*
   6626 	 * Loop through the send queue, setting up transmit descriptors
   6627 	 * until we drain the queue, or use up all available transmit
   6628 	 * descriptors.
   6629 	 */
   6630 	for (;;) {
   6631 		m0 = NULL;
   6632 
   6633 		/* Get a work queue entry. */
   6634 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6635 			wm_txeof(sc, txq);
   6636 			if (txq->txq_sfree == 0) {
   6637 				DPRINTF(WM_DEBUG_TX,
   6638 				    ("%s: TX: no free job descriptors\n",
   6639 					device_xname(sc->sc_dev)));
   6640 				WM_Q_EVCNT_INCR(txq, txsstall);
   6641 				break;
   6642 			}
   6643 		}
   6644 
   6645 		/* Grab a packet off the queue. */
   6646 		if (is_transmit)
   6647 			m0 = pcq_get(txq->txq_interq);
   6648 		else
   6649 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6650 		if (m0 == NULL)
   6651 			break;
   6652 
   6653 		DPRINTF(WM_DEBUG_TX,
   6654 		    ("%s: TX: have packet to transmit: %p\n",
   6655 		    device_xname(sc->sc_dev), m0));
   6656 
   6657 		txs = &txq->txq_soft[txq->txq_snext];
   6658 		dmamap = txs->txs_dmamap;
   6659 
   6660 		use_tso = (m0->m_pkthdr.csum_flags &
   6661 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6662 
   6663 		/*
   6664 		 * So says the Linux driver:
   6665 		 * The controller does a simple calculation to make sure
   6666 		 * there is enough room in the FIFO before initiating the
   6667 		 * DMA for each buffer.  The calc is:
   6668 		 *	4 = ceil(buffer len / MSS)
   6669 		 * To make sure we don't overrun the FIFO, adjust the max
   6670 		 * buffer len if the MSS drops.
   6671 		 */
   6672 		dmamap->dm_maxsegsz =
   6673 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6674 		    ? m0->m_pkthdr.segsz << 2
   6675 		    : WTX_MAX_LEN;
   6676 
   6677 		/*
   6678 		 * Load the DMA map.  If this fails, the packet either
   6679 		 * didn't fit in the allotted number of segments, or we
   6680 		 * were short on resources.  For the too-many-segments
   6681 		 * case, we simply report an error and drop the packet,
   6682 		 * since we can't sanely copy a jumbo packet to a single
   6683 		 * buffer.
   6684 		 */
   6685 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6686 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6687 		if (error) {
   6688 			if (error == EFBIG) {
   6689 				WM_Q_EVCNT_INCR(txq, txdrop);
   6690 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6691 				    "DMA segments, dropping...\n",
   6692 				    device_xname(sc->sc_dev));
   6693 				wm_dump_mbuf_chain(sc, m0);
   6694 				m_freem(m0);
   6695 				continue;
   6696 			}
   6697 			/*  Short on resources, just stop for now. */
   6698 			DPRINTF(WM_DEBUG_TX,
   6699 			    ("%s: TX: dmamap load failed: %d\n",
   6700 			    device_xname(sc->sc_dev), error));
   6701 			break;
   6702 		}
   6703 
   6704 		segs_needed = dmamap->dm_nsegs;
   6705 		if (use_tso) {
   6706 			/* For sentinel descriptor; see below. */
   6707 			segs_needed++;
   6708 		}
   6709 
   6710 		/*
   6711 		 * Ensure we have enough descriptors free to describe
   6712 		 * the packet.  Note, we always reserve one descriptor
   6713 		 * at the end of the ring due to the semantics of the
   6714 		 * TDT register, plus one more in the event we need
   6715 		 * to load offload context.
   6716 		 */
   6717 		if (segs_needed > txq->txq_free - 2) {
   6718 			/*
   6719 			 * Not enough free descriptors to transmit this
   6720 			 * packet.  We haven't committed anything yet,
   6721 			 * so just unload the DMA map, put the packet
   6722 			 * pack on the queue, and punt.  Notify the upper
   6723 			 * layer that there are no more slots left.
   6724 			 */
   6725 			DPRINTF(WM_DEBUG_TX,
   6726 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6727 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6728 			    segs_needed, txq->txq_free - 1));
   6729 			if (!is_transmit)
   6730 				ifp->if_flags |= IFF_OACTIVE;
   6731 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6732 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6733 			WM_Q_EVCNT_INCR(txq, txdstall);
   6734 			break;
   6735 		}
   6736 
   6737 		/*
   6738 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6739 		 * once we know we can transmit the packet, since we
   6740 		 * do some internal FIFO space accounting here.
   6741 		 */
   6742 		if (sc->sc_type == WM_T_82547 &&
   6743 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6744 			DPRINTF(WM_DEBUG_TX,
   6745 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6746 			    device_xname(sc->sc_dev)));
   6747 			if (!is_transmit)
   6748 				ifp->if_flags |= IFF_OACTIVE;
   6749 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6750 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6751 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6752 			break;
   6753 		}
   6754 
   6755 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6756 
   6757 		DPRINTF(WM_DEBUG_TX,
   6758 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6759 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6760 
   6761 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6762 
   6763 		/*
   6764 		 * Store a pointer to the packet so that we can free it
   6765 		 * later.
   6766 		 *
   6767 		 * Initially, we consider the number of descriptors the
   6768 		 * packet uses the number of DMA segments.  This may be
   6769 		 * incremented by 1 if we do checksum offload (a descriptor
   6770 		 * is used to set the checksum context).
   6771 		 */
   6772 		txs->txs_mbuf = m0;
   6773 		txs->txs_firstdesc = txq->txq_next;
   6774 		txs->txs_ndesc = segs_needed;
   6775 
   6776 		/* Set up offload parameters for this packet. */
   6777 		if (m0->m_pkthdr.csum_flags &
   6778 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6779 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6780 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6781 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6782 					  &cksumfields) != 0) {
   6783 				/* Error message already displayed. */
   6784 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6785 				continue;
   6786 			}
   6787 		} else {
   6788 			cksumcmd = 0;
   6789 			cksumfields = 0;
   6790 		}
   6791 
   6792 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6793 
   6794 		/* Sync the DMA map. */
   6795 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6796 		    BUS_DMASYNC_PREWRITE);
   6797 
   6798 		/* Initialize the transmit descriptor. */
   6799 		for (nexttx = txq->txq_next, seg = 0;
   6800 		     seg < dmamap->dm_nsegs; seg++) {
   6801 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6802 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6803 			     seglen != 0;
   6804 			     curaddr += curlen, seglen -= curlen,
   6805 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6806 				curlen = seglen;
   6807 
   6808 				/*
   6809 				 * So says the Linux driver:
   6810 				 * Work around for premature descriptor
   6811 				 * write-backs in TSO mode.  Append a
   6812 				 * 4-byte sentinel descriptor.
   6813 				 */
   6814 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6815 				    curlen > 8)
   6816 					curlen -= 4;
   6817 
   6818 				wm_set_dma_addr(
   6819 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6820 				txq->txq_descs[nexttx].wtx_cmdlen
   6821 				    = htole32(cksumcmd | curlen);
   6822 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6823 				    = 0;
   6824 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6825 				    = cksumfields;
   6826 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6827 				lasttx = nexttx;
   6828 
   6829 				DPRINTF(WM_DEBUG_TX,
   6830 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6831 				     "len %#04zx\n",
   6832 				    device_xname(sc->sc_dev), nexttx,
   6833 				    (uint64_t)curaddr, curlen));
   6834 			}
   6835 		}
   6836 
   6837 		KASSERT(lasttx != -1);
   6838 
   6839 		/*
   6840 		 * Set up the command byte on the last descriptor of
   6841 		 * the packet.  If we're in the interrupt delay window,
   6842 		 * delay the interrupt.
   6843 		 */
   6844 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6845 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6846 
   6847 		/*
   6848 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6849 		 * up the descriptor to encapsulate the packet for us.
   6850 		 *
   6851 		 * This is only valid on the last descriptor of the packet.
   6852 		 */
   6853 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6854 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6855 			    htole32(WTX_CMD_VLE);
   6856 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6857 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6858 		}
   6859 
   6860 		txs->txs_lastdesc = lasttx;
   6861 
   6862 		DPRINTF(WM_DEBUG_TX,
   6863 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6864 		    device_xname(sc->sc_dev),
   6865 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6866 
   6867 		/* Sync the descriptors we're using. */
   6868 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6869 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6870 
   6871 		/* Give the packet to the chip. */
   6872 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6873 
   6874 		DPRINTF(WM_DEBUG_TX,
   6875 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6876 
   6877 		DPRINTF(WM_DEBUG_TX,
   6878 		    ("%s: TX: finished transmitting packet, job %d\n",
   6879 		    device_xname(sc->sc_dev), txq->txq_snext));
   6880 
   6881 		/* Advance the tx pointer. */
   6882 		txq->txq_free -= txs->txs_ndesc;
   6883 		txq->txq_next = nexttx;
   6884 
   6885 		txq->txq_sfree--;
   6886 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6887 
   6888 		/* Pass the packet to any BPF listeners. */
   6889 		bpf_mtap(ifp, m0);
   6890 	}
   6891 
   6892 	if (m0 != NULL) {
   6893 		if (!is_transmit)
   6894 			ifp->if_flags |= IFF_OACTIVE;
   6895 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   6896 		WM_Q_EVCNT_INCR(txq, txdrop);
   6897 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6898 			__func__));
   6899 		m_freem(m0);
   6900 	}
   6901 
   6902 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6903 		/* No more slots; notify upper layer. */
   6904 		if (!is_transmit)
   6905 			ifp->if_flags |= IFF_OACTIVE;
   6906 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   6907 	}
   6908 
   6909 	if (txq->txq_free != ofree) {
   6910 		/* Set a watchdog timer in case the chip flakes out. */
   6911 		ifp->if_timer = 5;
   6912 	}
   6913 }
   6914 
   6915 /*
   6916  * wm_nq_tx_offload:
   6917  *
   6918  *	Set up TCP/IP checksumming parameters for the
   6919  *	specified packet, for NEWQUEUE devices
   6920  */
   6921 static int
   6922 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6923     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6924 {
   6925 	struct mbuf *m0 = txs->txs_mbuf;
   6926 	struct m_tag *mtag;
   6927 	uint32_t vl_len, mssidx, cmdc;
   6928 	struct ether_header *eh;
   6929 	int offset, iphl;
   6930 
   6931 	/*
   6932 	 * XXX It would be nice if the mbuf pkthdr had offset
   6933 	 * fields for the protocol headers.
   6934 	 */
   6935 	*cmdlenp = 0;
   6936 	*fieldsp = 0;
   6937 
   6938 	eh = mtod(m0, struct ether_header *);
   6939 	switch (htons(eh->ether_type)) {
   6940 	case ETHERTYPE_IP:
   6941 	case ETHERTYPE_IPV6:
   6942 		offset = ETHER_HDR_LEN;
   6943 		break;
   6944 
   6945 	case ETHERTYPE_VLAN:
   6946 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6947 		break;
   6948 
   6949 	default:
   6950 		/* Don't support this protocol or encapsulation. */
   6951 		*do_csum = false;
   6952 		return 0;
   6953 	}
   6954 	*do_csum = true;
   6955 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6956 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6957 
   6958 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6959 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6960 
   6961 	if ((m0->m_pkthdr.csum_flags &
   6962 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6963 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6964 	} else {
   6965 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6966 	}
   6967 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6968 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6969 
   6970 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6971 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6972 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6973 		*cmdlenp |= NQTX_CMD_VLE;
   6974 	}
   6975 
   6976 	mssidx = 0;
   6977 
   6978 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6979 		int hlen = offset + iphl;
   6980 		int tcp_hlen;
   6981 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6982 
   6983 		if (__predict_false(m0->m_len <
   6984 				    (hlen + sizeof(struct tcphdr)))) {
   6985 			/*
   6986 			 * TCP/IP headers are not in the first mbuf; we need
   6987 			 * to do this the slow and painful way.  Let's just
   6988 			 * hope this doesn't happen very often.
   6989 			 */
   6990 			struct tcphdr th;
   6991 
   6992 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6993 
   6994 			m_copydata(m0, hlen, sizeof(th), &th);
   6995 			if (v4) {
   6996 				struct ip ip;
   6997 
   6998 				m_copydata(m0, offset, sizeof(ip), &ip);
   6999 				ip.ip_len = 0;
   7000 				m_copyback(m0,
   7001 				    offset + offsetof(struct ip, ip_len),
   7002 				    sizeof(ip.ip_len), &ip.ip_len);
   7003 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7004 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7005 			} else {
   7006 				struct ip6_hdr ip6;
   7007 
   7008 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7009 				ip6.ip6_plen = 0;
   7010 				m_copyback(m0,
   7011 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7012 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7013 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7014 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7015 			}
   7016 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7017 			    sizeof(th.th_sum), &th.th_sum);
   7018 
   7019 			tcp_hlen = th.th_off << 2;
   7020 		} else {
   7021 			/*
   7022 			 * TCP/IP headers are in the first mbuf; we can do
   7023 			 * this the easy way.
   7024 			 */
   7025 			struct tcphdr *th;
   7026 
   7027 			if (v4) {
   7028 				struct ip *ip =
   7029 				    (void *)(mtod(m0, char *) + offset);
   7030 				th = (void *)(mtod(m0, char *) + hlen);
   7031 
   7032 				ip->ip_len = 0;
   7033 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7034 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7035 			} else {
   7036 				struct ip6_hdr *ip6 =
   7037 				    (void *)(mtod(m0, char *) + offset);
   7038 				th = (void *)(mtod(m0, char *) + hlen);
   7039 
   7040 				ip6->ip6_plen = 0;
   7041 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7042 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7043 			}
   7044 			tcp_hlen = th->th_off << 2;
   7045 		}
   7046 		hlen += tcp_hlen;
   7047 		*cmdlenp |= NQTX_CMD_TSE;
   7048 
   7049 		if (v4) {
   7050 			WM_Q_EVCNT_INCR(txq, txtso);
   7051 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7052 		} else {
   7053 			WM_Q_EVCNT_INCR(txq, txtso6);
   7054 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7055 		}
   7056 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7057 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7058 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7059 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7060 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7061 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7062 	} else {
   7063 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7064 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7065 	}
   7066 
   7067 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7068 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7069 		cmdc |= NQTXC_CMD_IP4;
   7070 	}
   7071 
   7072 	if (m0->m_pkthdr.csum_flags &
   7073 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7074 		WM_Q_EVCNT_INCR(txq, txtusum);
   7075 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7076 			cmdc |= NQTXC_CMD_TCP;
   7077 		} else {
   7078 			cmdc |= NQTXC_CMD_UDP;
   7079 		}
   7080 		cmdc |= NQTXC_CMD_IP4;
   7081 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7082 	}
   7083 	if (m0->m_pkthdr.csum_flags &
   7084 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7085 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7086 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7087 			cmdc |= NQTXC_CMD_TCP;
   7088 		} else {
   7089 			cmdc |= NQTXC_CMD_UDP;
   7090 		}
   7091 		cmdc |= NQTXC_CMD_IP6;
   7092 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7093 	}
   7094 
   7095 	/* Fill in the context descriptor. */
   7096 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7097 	    htole32(vl_len);
   7098 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7099 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7100 	    htole32(cmdc);
   7101 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7102 	    htole32(mssidx);
   7103 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7104 	DPRINTF(WM_DEBUG_TX,
   7105 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7106 	    txq->txq_next, 0, vl_len));
   7107 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7108 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7109 	txs->txs_ndesc++;
   7110 	return 0;
   7111 }
   7112 
   7113 /*
   7114  * wm_nq_start:		[ifnet interface function]
   7115  *
   7116  *	Start packet transmission on the interface for NEWQUEUE devices
   7117  */
   7118 static void
   7119 wm_nq_start(struct ifnet *ifp)
   7120 {
   7121 	struct wm_softc *sc = ifp->if_softc;
   7122 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7123 
   7124 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7125 
   7126 	/*
   7127 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7128 	 */
   7129 
   7130 	mutex_enter(txq->txq_lock);
   7131 	if (!txq->txq_stopping)
   7132 		wm_nq_start_locked(ifp);
   7133 	mutex_exit(txq->txq_lock);
   7134 }
   7135 
   7136 static void
   7137 wm_nq_start_locked(struct ifnet *ifp)
   7138 {
   7139 	struct wm_softc *sc = ifp->if_softc;
   7140 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7141 
   7142 	wm_nq_send_common_locked(ifp, txq, false);
   7143 }
   7144 
   7145 static int
   7146 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7147 {
   7148 	int qid;
   7149 	struct wm_softc *sc = ifp->if_softc;
   7150 	struct wm_txqueue *txq;
   7151 
   7152 	qid = wm_select_txqueue(ifp, m);
   7153 	txq = &sc->sc_queue[qid].wmq_txq;
   7154 
   7155 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7156 		m_freem(m);
   7157 		WM_Q_EVCNT_INCR(txq, txdrop);
   7158 		return ENOBUFS;
   7159 	}
   7160 
   7161 	/*
   7162 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7163 	 */
   7164 	ifp->if_obytes += m->m_pkthdr.len;
   7165 	if (m->m_flags & M_MCAST)
   7166 		ifp->if_omcasts++;
   7167 
   7168 	/*
   7169 	 * The situations which this mutex_tryenter() fails at running time
   7170 	 * are below two patterns.
   7171 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7172 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7173 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7174 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7175 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7176 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7177 	 */
   7178 	if (mutex_tryenter(txq->txq_lock)) {
   7179 		if (!txq->txq_stopping)
   7180 			wm_nq_transmit_locked(ifp, txq);
   7181 		mutex_exit(txq->txq_lock);
   7182 	}
   7183 
   7184 	return 0;
   7185 }
   7186 
   7187 static void
   7188 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7189 {
   7190 
   7191 	wm_nq_send_common_locked(ifp, txq, true);
   7192 }
   7193 
   7194 static void
   7195 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7196     bool is_transmit)
   7197 {
   7198 	struct wm_softc *sc = ifp->if_softc;
   7199 	struct mbuf *m0;
   7200 	struct m_tag *mtag;
   7201 	struct wm_txsoft *txs;
   7202 	bus_dmamap_t dmamap;
   7203 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7204 	bool do_csum, sent;
   7205 
   7206 	KASSERT(mutex_owned(txq->txq_lock));
   7207 
   7208 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7209 		return;
   7210 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7211 		return;
   7212 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7213 		return;
   7214 
   7215 	sent = false;
   7216 
   7217 	/*
   7218 	 * Loop through the send queue, setting up transmit descriptors
   7219 	 * until we drain the queue, or use up all available transmit
   7220 	 * descriptors.
   7221 	 */
   7222 	for (;;) {
   7223 		m0 = NULL;
   7224 
   7225 		/* Get a work queue entry. */
   7226 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7227 			wm_txeof(sc, txq);
   7228 			if (txq->txq_sfree == 0) {
   7229 				DPRINTF(WM_DEBUG_TX,
   7230 				    ("%s: TX: no free job descriptors\n",
   7231 					device_xname(sc->sc_dev)));
   7232 				WM_Q_EVCNT_INCR(txq, txsstall);
   7233 				break;
   7234 			}
   7235 		}
   7236 
   7237 		/* Grab a packet off the queue. */
   7238 		if (is_transmit)
   7239 			m0 = pcq_get(txq->txq_interq);
   7240 		else
   7241 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7242 		if (m0 == NULL)
   7243 			break;
   7244 
   7245 		DPRINTF(WM_DEBUG_TX,
   7246 		    ("%s: TX: have packet to transmit: %p\n",
   7247 		    device_xname(sc->sc_dev), m0));
   7248 
   7249 		txs = &txq->txq_soft[txq->txq_snext];
   7250 		dmamap = txs->txs_dmamap;
   7251 
   7252 		/*
   7253 		 * Load the DMA map.  If this fails, the packet either
   7254 		 * didn't fit in the allotted number of segments, or we
   7255 		 * were short on resources.  For the too-many-segments
   7256 		 * case, we simply report an error and drop the packet,
   7257 		 * since we can't sanely copy a jumbo packet to a single
   7258 		 * buffer.
   7259 		 */
   7260 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7261 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7262 		if (error) {
   7263 			if (error == EFBIG) {
   7264 				WM_Q_EVCNT_INCR(txq, txdrop);
   7265 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7266 				    "DMA segments, dropping...\n",
   7267 				    device_xname(sc->sc_dev));
   7268 				wm_dump_mbuf_chain(sc, m0);
   7269 				m_freem(m0);
   7270 				continue;
   7271 			}
   7272 			/* Short on resources, just stop for now. */
   7273 			DPRINTF(WM_DEBUG_TX,
   7274 			    ("%s: TX: dmamap load failed: %d\n",
   7275 			    device_xname(sc->sc_dev), error));
   7276 			break;
   7277 		}
   7278 
   7279 		segs_needed = dmamap->dm_nsegs;
   7280 
   7281 		/*
   7282 		 * Ensure we have enough descriptors free to describe
   7283 		 * the packet.  Note, we always reserve one descriptor
   7284 		 * at the end of the ring due to the semantics of the
   7285 		 * TDT register, plus one more in the event we need
   7286 		 * to load offload context.
   7287 		 */
   7288 		if (segs_needed > txq->txq_free - 2) {
   7289 			/*
   7290 			 * Not enough free descriptors to transmit this
   7291 			 * packet.  We haven't committed anything yet,
   7292 			 * so just unload the DMA map, put the packet
   7293 			 * pack on the queue, and punt.  Notify the upper
   7294 			 * layer that there are no more slots left.
   7295 			 */
   7296 			DPRINTF(WM_DEBUG_TX,
   7297 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7298 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7299 			    segs_needed, txq->txq_free - 1));
   7300 			if (!is_transmit)
   7301 				ifp->if_flags |= IFF_OACTIVE;
   7302 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7303 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7304 			WM_Q_EVCNT_INCR(txq, txdstall);
   7305 			break;
   7306 		}
   7307 
   7308 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7309 
   7310 		DPRINTF(WM_DEBUG_TX,
   7311 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7312 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7313 
   7314 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7315 
   7316 		/*
   7317 		 * Store a pointer to the packet so that we can free it
   7318 		 * later.
   7319 		 *
   7320 		 * Initially, we consider the number of descriptors the
   7321 		 * packet uses the number of DMA segments.  This may be
   7322 		 * incremented by 1 if we do checksum offload (a descriptor
   7323 		 * is used to set the checksum context).
   7324 		 */
   7325 		txs->txs_mbuf = m0;
   7326 		txs->txs_firstdesc = txq->txq_next;
   7327 		txs->txs_ndesc = segs_needed;
   7328 
   7329 		/* Set up offload parameters for this packet. */
   7330 		uint32_t cmdlen, fields, dcmdlen;
   7331 		if (m0->m_pkthdr.csum_flags &
   7332 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7333 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7334 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7335 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7336 			    &do_csum) != 0) {
   7337 				/* Error message already displayed. */
   7338 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7339 				continue;
   7340 			}
   7341 		} else {
   7342 			do_csum = false;
   7343 			cmdlen = 0;
   7344 			fields = 0;
   7345 		}
   7346 
   7347 		/* Sync the DMA map. */
   7348 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7349 		    BUS_DMASYNC_PREWRITE);
   7350 
   7351 		/* Initialize the first transmit descriptor. */
   7352 		nexttx = txq->txq_next;
   7353 		if (!do_csum) {
   7354 			/* setup a legacy descriptor */
   7355 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7356 			    dmamap->dm_segs[0].ds_addr);
   7357 			txq->txq_descs[nexttx].wtx_cmdlen =
   7358 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7359 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7360 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7361 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7362 			    NULL) {
   7363 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7364 				    htole32(WTX_CMD_VLE);
   7365 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7366 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7367 			} else {
   7368 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7369 			}
   7370 			dcmdlen = 0;
   7371 		} else {
   7372 			/* setup an advanced data descriptor */
   7373 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7374 			    htole64(dmamap->dm_segs[0].ds_addr);
   7375 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7376 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7377 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7378 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7379 			    htole32(fields);
   7380 			DPRINTF(WM_DEBUG_TX,
   7381 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7382 			    device_xname(sc->sc_dev), nexttx,
   7383 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7384 			DPRINTF(WM_DEBUG_TX,
   7385 			    ("\t 0x%08x%08x\n", fields,
   7386 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7387 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7388 		}
   7389 
   7390 		lasttx = nexttx;
   7391 		nexttx = WM_NEXTTX(txq, nexttx);
   7392 		/*
   7393 		 * fill in the next descriptors. legacy or adcanced format
   7394 		 * is the same here
   7395 		 */
   7396 		for (seg = 1; seg < dmamap->dm_nsegs;
   7397 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7398 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7399 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7400 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7401 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7402 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7403 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7404 			lasttx = nexttx;
   7405 
   7406 			DPRINTF(WM_DEBUG_TX,
   7407 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7408 			     "len %#04zx\n",
   7409 			    device_xname(sc->sc_dev), nexttx,
   7410 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7411 			    dmamap->dm_segs[seg].ds_len));
   7412 		}
   7413 
   7414 		KASSERT(lasttx != -1);
   7415 
   7416 		/*
   7417 		 * Set up the command byte on the last descriptor of
   7418 		 * the packet.  If we're in the interrupt delay window,
   7419 		 * delay the interrupt.
   7420 		 */
   7421 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7422 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7423 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7424 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7425 
   7426 		txs->txs_lastdesc = lasttx;
   7427 
   7428 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7429 		    device_xname(sc->sc_dev),
   7430 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7431 
   7432 		/* Sync the descriptors we're using. */
   7433 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7434 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7435 
   7436 		/* Give the packet to the chip. */
   7437 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7438 		sent = true;
   7439 
   7440 		DPRINTF(WM_DEBUG_TX,
   7441 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7442 
   7443 		DPRINTF(WM_DEBUG_TX,
   7444 		    ("%s: TX: finished transmitting packet, job %d\n",
   7445 		    device_xname(sc->sc_dev), txq->txq_snext));
   7446 
   7447 		/* Advance the tx pointer. */
   7448 		txq->txq_free -= txs->txs_ndesc;
   7449 		txq->txq_next = nexttx;
   7450 
   7451 		txq->txq_sfree--;
   7452 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7453 
   7454 		/* Pass the packet to any BPF listeners. */
   7455 		bpf_mtap(ifp, m0);
   7456 	}
   7457 
   7458 	if (m0 != NULL) {
   7459 		if (!is_transmit)
   7460 			ifp->if_flags |= IFF_OACTIVE;
   7461 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7462 		WM_Q_EVCNT_INCR(txq, txdrop);
   7463 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7464 			__func__));
   7465 		m_freem(m0);
   7466 	}
   7467 
   7468 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7469 		/* No more slots; notify upper layer. */
   7470 		if (!is_transmit)
   7471 			ifp->if_flags |= IFF_OACTIVE;
   7472 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7473 	}
   7474 
   7475 	if (sent) {
   7476 		/* Set a watchdog timer in case the chip flakes out. */
   7477 		ifp->if_timer = 5;
   7478 	}
   7479 }
   7480 
   7481 static void
   7482 wm_deferred_start_locked(struct wm_txqueue *txq)
   7483 {
   7484 	struct wm_softc *sc = txq->txq_sc;
   7485 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7486 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7487 	int qid = wmq->wmq_id;
   7488 
   7489 	KASSERT(mutex_owned(txq->txq_lock));
   7490 
   7491 	if (txq->txq_stopping) {
   7492 		mutex_exit(txq->txq_lock);
   7493 		return;
   7494 	}
   7495 
   7496 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7497 		/* XXX need for ALTQ */
   7498 		if (qid == 0)
   7499 			wm_nq_start_locked(ifp);
   7500 		wm_nq_transmit_locked(ifp, txq);
   7501 	} else {
   7502 		/* XXX need for ALTQ */
   7503 		if (qid == 0)
   7504 			wm_start_locked(ifp);
   7505 		wm_transmit_locked(ifp, txq);
   7506 	}
   7507 }
   7508 
   7509 /* Interrupt */
   7510 
   7511 /*
   7512  * wm_txeof:
   7513  *
   7514  *	Helper; handle transmit interrupts.
   7515  */
   7516 static int
   7517 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7518 {
   7519 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7520 	struct wm_txsoft *txs;
   7521 	bool processed = false;
   7522 	int count = 0;
   7523 	int i;
   7524 	uint8_t status;
   7525 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7526 
   7527 	KASSERT(mutex_owned(txq->txq_lock));
   7528 
   7529 	if (txq->txq_stopping)
   7530 		return 0;
   7531 
   7532 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7533 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7534 	if (wmq->wmq_id == 0)
   7535 		ifp->if_flags &= ~IFF_OACTIVE;
   7536 
   7537 	/*
   7538 	 * Go through the Tx list and free mbufs for those
   7539 	 * frames which have been transmitted.
   7540 	 */
   7541 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7542 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7543 		txs = &txq->txq_soft[i];
   7544 
   7545 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7546 			device_xname(sc->sc_dev), i));
   7547 
   7548 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7549 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7550 
   7551 		status =
   7552 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7553 		if ((status & WTX_ST_DD) == 0) {
   7554 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7555 			    BUS_DMASYNC_PREREAD);
   7556 			break;
   7557 		}
   7558 
   7559 		processed = true;
   7560 		count++;
   7561 		DPRINTF(WM_DEBUG_TX,
   7562 		    ("%s: TX: job %d done: descs %d..%d\n",
   7563 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7564 		    txs->txs_lastdesc));
   7565 
   7566 		/*
   7567 		 * XXX We should probably be using the statistics
   7568 		 * XXX registers, but I don't know if they exist
   7569 		 * XXX on chips before the i82544.
   7570 		 */
   7571 
   7572 #ifdef WM_EVENT_COUNTERS
   7573 		if (status & WTX_ST_TU)
   7574 			WM_Q_EVCNT_INCR(txq, tu);
   7575 #endif /* WM_EVENT_COUNTERS */
   7576 
   7577 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7578 			ifp->if_oerrors++;
   7579 			if (status & WTX_ST_LC)
   7580 				log(LOG_WARNING, "%s: late collision\n",
   7581 				    device_xname(sc->sc_dev));
   7582 			else if (status & WTX_ST_EC) {
   7583 				ifp->if_collisions += 16;
   7584 				log(LOG_WARNING, "%s: excessive collisions\n",
   7585 				    device_xname(sc->sc_dev));
   7586 			}
   7587 		} else
   7588 			ifp->if_opackets++;
   7589 
   7590 		txq->txq_free += txs->txs_ndesc;
   7591 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7592 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7593 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7594 		m_freem(txs->txs_mbuf);
   7595 		txs->txs_mbuf = NULL;
   7596 	}
   7597 
   7598 	/* Update the dirty transmit buffer pointer. */
   7599 	txq->txq_sdirty = i;
   7600 	DPRINTF(WM_DEBUG_TX,
   7601 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7602 
   7603 	if (count != 0)
   7604 		rnd_add_uint32(&sc->rnd_source, count);
   7605 
   7606 	/*
   7607 	 * If there are no more pending transmissions, cancel the watchdog
   7608 	 * timer.
   7609 	 */
   7610 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7611 		ifp->if_timer = 0;
   7612 
   7613 	return processed;
   7614 }
   7615 
   7616 static inline uint32_t
   7617 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   7618 {
   7619 	struct wm_softc *sc = rxq->rxq_sc;
   7620 
   7621 	if (sc->sc_type == WM_T_82574)
   7622 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7623 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7624 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7625 	else
   7626 		return rxq->rxq_descs[idx].wrx_status;
   7627 }
   7628 
   7629 static inline uint32_t
   7630 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   7631 {
   7632 	struct wm_softc *sc = rxq->rxq_sc;
   7633 
   7634 	if (sc->sc_type == WM_T_82574)
   7635 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7636 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7637 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7638 	else
   7639 		return rxq->rxq_descs[idx].wrx_errors;
   7640 }
   7641 
   7642 static inline uint16_t
   7643 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   7644 {
   7645 	struct wm_softc *sc = rxq->rxq_sc;
   7646 
   7647 	if (sc->sc_type == WM_T_82574)
   7648 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   7649 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7650 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   7651 	else
   7652 		return rxq->rxq_descs[idx].wrx_special;
   7653 }
   7654 
   7655 static inline int
   7656 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   7657 {
   7658 	struct wm_softc *sc = rxq->rxq_sc;
   7659 
   7660 	if (sc->sc_type == WM_T_82574)
   7661 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   7662 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7663 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   7664 	else
   7665 		return rxq->rxq_descs[idx].wrx_len;
   7666 }
   7667 
   7668 #ifdef WM_DEBUG
   7669 static inline uint32_t
   7670 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   7671 {
   7672 	struct wm_softc *sc = rxq->rxq_sc;
   7673 
   7674 	if (sc->sc_type == WM_T_82574)
   7675 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   7676 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7677 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   7678 	else
   7679 		return 0;
   7680 }
   7681 
   7682 static inline uint8_t
   7683 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   7684 {
   7685 	struct wm_softc *sc = rxq->rxq_sc;
   7686 
   7687 	if (sc->sc_type == WM_T_82574)
   7688 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   7689 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7690 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   7691 	else
   7692 		return 0;
   7693 }
   7694 #endif /* WM_DEBUG */
   7695 
   7696 static inline bool
   7697 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   7698     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7699 {
   7700 
   7701 	if (sc->sc_type == WM_T_82574)
   7702 		return (status & ext_bit) != 0;
   7703 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7704 		return (status & nq_bit) != 0;
   7705 	else
   7706 		return (status & legacy_bit) != 0;
   7707 }
   7708 
   7709 static inline bool
   7710 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   7711     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7712 {
   7713 
   7714 	if (sc->sc_type == WM_T_82574)
   7715 		return (error & ext_bit) != 0;
   7716 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7717 		return (error & nq_bit) != 0;
   7718 	else
   7719 		return (error & legacy_bit) != 0;
   7720 }
   7721 
   7722 static inline bool
   7723 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   7724 {
   7725 
   7726 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7727 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   7728 		return true;
   7729 	else
   7730 		return false;
   7731 }
   7732 
   7733 static inline bool
   7734 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   7735 {
   7736 	struct wm_softc *sc = rxq->rxq_sc;
   7737 
   7738 	/* XXXX missing error bit for newqueue? */
   7739 	if (wm_rxdesc_is_set_error(sc, errors,
   7740 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   7741 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   7742 		NQRXC_ERROR_RXE)) {
   7743 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   7744 			log(LOG_WARNING, "%s: symbol error\n",
   7745 			    device_xname(sc->sc_dev));
   7746 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   7747 			log(LOG_WARNING, "%s: receive sequence error\n",
   7748 			    device_xname(sc->sc_dev));
   7749 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   7750 			log(LOG_WARNING, "%s: CRC error\n",
   7751 			    device_xname(sc->sc_dev));
   7752 		return true;
   7753 	}
   7754 
   7755 	return false;
   7756 }
   7757 
   7758 static inline bool
   7759 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   7760 {
   7761 	struct wm_softc *sc = rxq->rxq_sc;
   7762 
   7763 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   7764 		NQRXC_STATUS_DD)) {
   7765 		/* We have processed all of the receive descriptors. */
   7766 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   7767 		return false;
   7768 	}
   7769 
   7770 	return true;
   7771 }
   7772 
   7773 static inline bool
   7774 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   7775     struct mbuf *m)
   7776 {
   7777 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   7778 
   7779 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7780 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   7781 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   7782 	}
   7783 
   7784 	return true;
   7785 }
   7786 
   7787 static inline void
   7788 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   7789     uint32_t errors, struct mbuf *m)
   7790 {
   7791 	struct wm_softc *sc = rxq->rxq_sc;
   7792 
   7793 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   7794 		if (wm_rxdesc_is_set_status(sc, status,
   7795 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   7796 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   7797 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7798 			if (wm_rxdesc_is_set_error(sc, errors,
   7799 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   7800 				m->m_pkthdr.csum_flags |=
   7801 					M_CSUM_IPv4_BAD;
   7802 		}
   7803 		if (wm_rxdesc_is_set_status(sc, status,
   7804 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   7805 			/*
   7806 			 * Note: we don't know if this was TCP or UDP,
   7807 			 * so we just set both bits, and expect the
   7808 			 * upper layers to deal.
   7809 			 */
   7810 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   7811 			m->m_pkthdr.csum_flags |=
   7812 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7813 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7814 			if (wm_rxdesc_is_set_error(sc, errors,
   7815 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   7816 				m->m_pkthdr.csum_flags |=
   7817 					M_CSUM_TCP_UDP_BAD;
   7818 		}
   7819 	}
   7820 }
   7821 
   7822 /*
   7823  * wm_rxeof:
   7824  *
   7825  *	Helper; handle receive interrupts.
   7826  */
   7827 static void
   7828 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   7829 {
   7830 	struct wm_softc *sc = rxq->rxq_sc;
   7831 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7832 	struct wm_rxsoft *rxs;
   7833 	struct mbuf *m;
   7834 	int i, len;
   7835 	int count = 0;
   7836 	uint32_t status, errors;
   7837 	uint16_t vlantag;
   7838 
   7839 	KASSERT(mutex_owned(rxq->rxq_lock));
   7840 
   7841 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7842 		if (limit-- == 0) {
   7843 			rxq->rxq_ptr = i;
   7844 			break;
   7845 		}
   7846 
   7847 		rxs = &rxq->rxq_soft[i];
   7848 
   7849 		DPRINTF(WM_DEBUG_RX,
   7850 		    ("%s: RX: checking descriptor %d\n",
   7851 		    device_xname(sc->sc_dev), i));
   7852 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7853 
   7854 		status = wm_rxdesc_get_status(rxq, i);
   7855 		errors = wm_rxdesc_get_errors(rxq, i);
   7856 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   7857 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   7858 #ifdef WM_DEBUG
   7859 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   7860 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   7861 #endif
   7862 
   7863 		if (!wm_rxdesc_dd(rxq, i, status)) {
   7864 			/*
   7865 			 * Update the receive pointer holding rxq_lock
   7866 			 * consistent with increment counter.
   7867 			 */
   7868 			rxq->rxq_ptr = i;
   7869 			break;
   7870 		}
   7871 
   7872 		count++;
   7873 		if (__predict_false(rxq->rxq_discard)) {
   7874 			DPRINTF(WM_DEBUG_RX,
   7875 			    ("%s: RX: discarding contents of descriptor %d\n",
   7876 			    device_xname(sc->sc_dev), i));
   7877 			wm_init_rxdesc(rxq, i);
   7878 			if (wm_rxdesc_is_eop(rxq, status)) {
   7879 				/* Reset our state. */
   7880 				DPRINTF(WM_DEBUG_RX,
   7881 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7882 				    device_xname(sc->sc_dev)));
   7883 				rxq->rxq_discard = 0;
   7884 			}
   7885 			continue;
   7886 		}
   7887 
   7888 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7889 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7890 
   7891 		m = rxs->rxs_mbuf;
   7892 
   7893 		/*
   7894 		 * Add a new receive buffer to the ring, unless of
   7895 		 * course the length is zero. Treat the latter as a
   7896 		 * failed mapping.
   7897 		 */
   7898 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7899 			/*
   7900 			 * Failed, throw away what we've done so
   7901 			 * far, and discard the rest of the packet.
   7902 			 */
   7903 			ifp->if_ierrors++;
   7904 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7905 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7906 			wm_init_rxdesc(rxq, i);
   7907 			if (!wm_rxdesc_is_eop(rxq, status))
   7908 				rxq->rxq_discard = 1;
   7909 			if (rxq->rxq_head != NULL)
   7910 				m_freem(rxq->rxq_head);
   7911 			WM_RXCHAIN_RESET(rxq);
   7912 			DPRINTF(WM_DEBUG_RX,
   7913 			    ("%s: RX: Rx buffer allocation failed, "
   7914 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7915 			    rxq->rxq_discard ? " (discard)" : ""));
   7916 			continue;
   7917 		}
   7918 
   7919 		m->m_len = len;
   7920 		rxq->rxq_len += len;
   7921 		DPRINTF(WM_DEBUG_RX,
   7922 		    ("%s: RX: buffer at %p len %d\n",
   7923 		    device_xname(sc->sc_dev), m->m_data, len));
   7924 
   7925 		/* If this is not the end of the packet, keep looking. */
   7926 		if (!wm_rxdesc_is_eop(rxq, status)) {
   7927 			WM_RXCHAIN_LINK(rxq, m);
   7928 			DPRINTF(WM_DEBUG_RX,
   7929 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7930 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7931 			continue;
   7932 		}
   7933 
   7934 		/*
   7935 		 * Okay, we have the entire packet now.  The chip is
   7936 		 * configured to include the FCS except I350 and I21[01]
   7937 		 * (not all chips can be configured to strip it),
   7938 		 * so we need to trim it.
   7939 		 * May need to adjust length of previous mbuf in the
   7940 		 * chain if the current mbuf is too short.
   7941 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7942 		 * is always set in I350, so we don't trim it.
   7943 		 */
   7944 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7945 		    && (sc->sc_type != WM_T_I210)
   7946 		    && (sc->sc_type != WM_T_I211)) {
   7947 			if (m->m_len < ETHER_CRC_LEN) {
   7948 				rxq->rxq_tail->m_len
   7949 				    -= (ETHER_CRC_LEN - m->m_len);
   7950 				m->m_len = 0;
   7951 			} else
   7952 				m->m_len -= ETHER_CRC_LEN;
   7953 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7954 		} else
   7955 			len = rxq->rxq_len;
   7956 
   7957 		WM_RXCHAIN_LINK(rxq, m);
   7958 
   7959 		*rxq->rxq_tailp = NULL;
   7960 		m = rxq->rxq_head;
   7961 
   7962 		WM_RXCHAIN_RESET(rxq);
   7963 
   7964 		DPRINTF(WM_DEBUG_RX,
   7965 		    ("%s: RX: have entire packet, len -> %d\n",
   7966 		    device_xname(sc->sc_dev), len));
   7967 
   7968 		/* If an error occurred, update stats and drop the packet. */
   7969 		if (wm_rxdesc_has_errors(rxq, errors)) {
   7970 			m_freem(m);
   7971 			continue;
   7972 		}
   7973 
   7974 		/* No errors.  Receive the packet. */
   7975 		m_set_rcvif(m, ifp);
   7976 		m->m_pkthdr.len = len;
   7977 		/*
   7978 		 * TODO
   7979 		 * should be save rsshash and rsstype to this mbuf.
   7980 		 */
   7981 		DPRINTF(WM_DEBUG_RX,
   7982 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   7983 			device_xname(sc->sc_dev), rsstype, rsshash));
   7984 
   7985 		/*
   7986 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7987 		 * for us.  Associate the tag with the packet.
   7988 		 */
   7989 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   7990 			continue;
   7991 
   7992 		/* Set up checksum info for this packet. */
   7993 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   7994 		/*
   7995 		 * Update the receive pointer holding rxq_lock consistent with
   7996 		 * increment counter.
   7997 		 */
   7998 		rxq->rxq_ptr = i;
   7999 		mutex_exit(rxq->rxq_lock);
   8000 
   8001 		/* Pass it on. */
   8002 		if_percpuq_enqueue(sc->sc_ipq, m);
   8003 
   8004 		mutex_enter(rxq->rxq_lock);
   8005 
   8006 		if (rxq->rxq_stopping)
   8007 			break;
   8008 	}
   8009 
   8010 	if (count != 0)
   8011 		rnd_add_uint32(&sc->rnd_source, count);
   8012 
   8013 	DPRINTF(WM_DEBUG_RX,
   8014 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8015 }
   8016 
   8017 /*
   8018  * wm_linkintr_gmii:
   8019  *
   8020  *	Helper; handle link interrupts for GMII.
   8021  */
   8022 static void
   8023 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8024 {
   8025 
   8026 	KASSERT(WM_CORE_LOCKED(sc));
   8027 
   8028 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8029 		__func__));
   8030 
   8031 	if (icr & ICR_LSC) {
   8032 		uint32_t reg;
   8033 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8034 
   8035 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8036 			wm_gig_downshift_workaround_ich8lan(sc);
   8037 
   8038 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8039 			device_xname(sc->sc_dev)));
   8040 		mii_pollstat(&sc->sc_mii);
   8041 		if (sc->sc_type == WM_T_82543) {
   8042 			int miistatus, active;
   8043 
   8044 			/*
   8045 			 * With 82543, we need to force speed and
   8046 			 * duplex on the MAC equal to what the PHY
   8047 			 * speed and duplex configuration is.
   8048 			 */
   8049 			miistatus = sc->sc_mii.mii_media_status;
   8050 
   8051 			if (miistatus & IFM_ACTIVE) {
   8052 				active = sc->sc_mii.mii_media_active;
   8053 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8054 				switch (IFM_SUBTYPE(active)) {
   8055 				case IFM_10_T:
   8056 					sc->sc_ctrl |= CTRL_SPEED_10;
   8057 					break;
   8058 				case IFM_100_TX:
   8059 					sc->sc_ctrl |= CTRL_SPEED_100;
   8060 					break;
   8061 				case IFM_1000_T:
   8062 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8063 					break;
   8064 				default:
   8065 					/*
   8066 					 * fiber?
   8067 					 * Shoud not enter here.
   8068 					 */
   8069 					printf("unknown media (%x)\n", active);
   8070 					break;
   8071 				}
   8072 				if (active & IFM_FDX)
   8073 					sc->sc_ctrl |= CTRL_FD;
   8074 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8075 			}
   8076 		} else if ((sc->sc_type == WM_T_ICH8)
   8077 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8078 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8079 		} else if (sc->sc_type == WM_T_PCH) {
   8080 			wm_k1_gig_workaround_hv(sc,
   8081 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8082 		}
   8083 
   8084 		if ((sc->sc_phytype == WMPHY_82578)
   8085 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8086 			== IFM_1000_T)) {
   8087 
   8088 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8089 				delay(200*1000); /* XXX too big */
   8090 
   8091 				/* Link stall fix for link up */
   8092 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8093 				    HV_MUX_DATA_CTRL,
   8094 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8095 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8096 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8097 				    HV_MUX_DATA_CTRL,
   8098 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8099 			}
   8100 		}
   8101 		/*
   8102 		 * I217 Packet Loss issue:
   8103 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8104 		 * on power up.
   8105 		 * Set the Beacon Duration for I217 to 8 usec
   8106 		 */
   8107 		if ((sc->sc_type == WM_T_PCH_LPT)
   8108 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8109 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8110 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8111 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8112 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8113 		}
   8114 
   8115 		/* XXX Work-around I218 hang issue */
   8116 		/* e1000_k1_workaround_lpt_lp() */
   8117 
   8118 		if ((sc->sc_type == WM_T_PCH_LPT)
   8119 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8120 			/*
   8121 			 * Set platform power management values for Latency
   8122 			 * Tolerance Reporting (LTR)
   8123 			 */
   8124 			wm_platform_pm_pch_lpt(sc,
   8125 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8126 				    != 0));
   8127 		}
   8128 
   8129 		/* FEXTNVM6 K1-off workaround */
   8130 		if (sc->sc_type == WM_T_PCH_SPT) {
   8131 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8132 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8133 			    & FEXTNVM6_K1_OFF_ENABLE)
   8134 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8135 			else
   8136 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8137 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8138 		}
   8139 	} else if (icr & ICR_RXSEQ) {
   8140 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8141 			device_xname(sc->sc_dev)));
   8142 	}
   8143 }
   8144 
   8145 /*
   8146  * wm_linkintr_tbi:
   8147  *
   8148  *	Helper; handle link interrupts for TBI mode.
   8149  */
   8150 static void
   8151 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8152 {
   8153 	uint32_t status;
   8154 
   8155 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8156 		__func__));
   8157 
   8158 	status = CSR_READ(sc, WMREG_STATUS);
   8159 	if (icr & ICR_LSC) {
   8160 		if (status & STATUS_LU) {
   8161 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8162 			    device_xname(sc->sc_dev),
   8163 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8164 			/*
   8165 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8166 			 * so we should update sc->sc_ctrl
   8167 			 */
   8168 
   8169 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8170 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8171 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8172 			if (status & STATUS_FD)
   8173 				sc->sc_tctl |=
   8174 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8175 			else
   8176 				sc->sc_tctl |=
   8177 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8178 			if (sc->sc_ctrl & CTRL_TFCE)
   8179 				sc->sc_fcrtl |= FCRTL_XONE;
   8180 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8181 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8182 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8183 				      sc->sc_fcrtl);
   8184 			sc->sc_tbi_linkup = 1;
   8185 		} else {
   8186 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8187 			    device_xname(sc->sc_dev)));
   8188 			sc->sc_tbi_linkup = 0;
   8189 		}
   8190 		/* Update LED */
   8191 		wm_tbi_serdes_set_linkled(sc);
   8192 	} else if (icr & ICR_RXSEQ) {
   8193 		DPRINTF(WM_DEBUG_LINK,
   8194 		    ("%s: LINK: Receive sequence error\n",
   8195 		    device_xname(sc->sc_dev)));
   8196 	}
   8197 }
   8198 
   8199 /*
   8200  * wm_linkintr_serdes:
   8201  *
   8202  *	Helper; handle link interrupts for TBI mode.
   8203  */
   8204 static void
   8205 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8206 {
   8207 	struct mii_data *mii = &sc->sc_mii;
   8208 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8209 	uint32_t pcs_adv, pcs_lpab, reg;
   8210 
   8211 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8212 		__func__));
   8213 
   8214 	if (icr & ICR_LSC) {
   8215 		/* Check PCS */
   8216 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8217 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8218 			mii->mii_media_status |= IFM_ACTIVE;
   8219 			sc->sc_tbi_linkup = 1;
   8220 		} else {
   8221 			mii->mii_media_status |= IFM_NONE;
   8222 			sc->sc_tbi_linkup = 0;
   8223 			wm_tbi_serdes_set_linkled(sc);
   8224 			return;
   8225 		}
   8226 		mii->mii_media_active |= IFM_1000_SX;
   8227 		if ((reg & PCS_LSTS_FDX) != 0)
   8228 			mii->mii_media_active |= IFM_FDX;
   8229 		else
   8230 			mii->mii_media_active |= IFM_HDX;
   8231 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8232 			/* Check flow */
   8233 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8234 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8235 				DPRINTF(WM_DEBUG_LINK,
   8236 				    ("XXX LINKOK but not ACOMP\n"));
   8237 				return;
   8238 			}
   8239 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8240 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8241 			DPRINTF(WM_DEBUG_LINK,
   8242 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8243 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8244 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8245 				mii->mii_media_active |= IFM_FLOW
   8246 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8247 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8248 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8249 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8250 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8251 				mii->mii_media_active |= IFM_FLOW
   8252 				    | IFM_ETH_TXPAUSE;
   8253 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8254 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8255 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8256 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8257 				mii->mii_media_active |= IFM_FLOW
   8258 				    | IFM_ETH_RXPAUSE;
   8259 		}
   8260 		/* Update LED */
   8261 		wm_tbi_serdes_set_linkled(sc);
   8262 	} else {
   8263 		DPRINTF(WM_DEBUG_LINK,
   8264 		    ("%s: LINK: Receive sequence error\n",
   8265 		    device_xname(sc->sc_dev)));
   8266 	}
   8267 }
   8268 
   8269 /*
   8270  * wm_linkintr:
   8271  *
   8272  *	Helper; handle link interrupts.
   8273  */
   8274 static void
   8275 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8276 {
   8277 
   8278 	KASSERT(WM_CORE_LOCKED(sc));
   8279 
   8280 	if (sc->sc_flags & WM_F_HAS_MII)
   8281 		wm_linkintr_gmii(sc, icr);
   8282 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8283 	    && (sc->sc_type >= WM_T_82575))
   8284 		wm_linkintr_serdes(sc, icr);
   8285 	else
   8286 		wm_linkintr_tbi(sc, icr);
   8287 }
   8288 
   8289 /*
   8290  * wm_intr_legacy:
   8291  *
   8292  *	Interrupt service routine for INTx and MSI.
   8293  */
   8294 static int
   8295 wm_intr_legacy(void *arg)
   8296 {
   8297 	struct wm_softc *sc = arg;
   8298 	struct wm_queue *wmq = &sc->sc_queue[0];
   8299 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8300 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8301 	uint32_t icr, rndval = 0;
   8302 	int handled = 0;
   8303 
   8304 	DPRINTF(WM_DEBUG_TX,
   8305 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   8306 	while (1 /* CONSTCOND */) {
   8307 		icr = CSR_READ(sc, WMREG_ICR);
   8308 		if ((icr & sc->sc_icr) == 0)
   8309 			break;
   8310 		if (rndval == 0)
   8311 			rndval = icr;
   8312 
   8313 		mutex_enter(rxq->rxq_lock);
   8314 
   8315 		if (rxq->rxq_stopping) {
   8316 			mutex_exit(rxq->rxq_lock);
   8317 			break;
   8318 		}
   8319 
   8320 		handled = 1;
   8321 
   8322 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8323 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8324 			DPRINTF(WM_DEBUG_RX,
   8325 			    ("%s: RX: got Rx intr 0x%08x\n",
   8326 			    device_xname(sc->sc_dev),
   8327 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8328 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8329 		}
   8330 #endif
   8331 		wm_rxeof(rxq, UINT_MAX);
   8332 
   8333 		mutex_exit(rxq->rxq_lock);
   8334 		mutex_enter(txq->txq_lock);
   8335 
   8336 		if (txq->txq_stopping) {
   8337 			mutex_exit(txq->txq_lock);
   8338 			break;
   8339 		}
   8340 
   8341 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8342 		if (icr & ICR_TXDW) {
   8343 			DPRINTF(WM_DEBUG_TX,
   8344 			    ("%s: TX: got TXDW interrupt\n",
   8345 			    device_xname(sc->sc_dev)));
   8346 			WM_Q_EVCNT_INCR(txq, txdw);
   8347 		}
   8348 #endif
   8349 		wm_txeof(sc, txq);
   8350 
   8351 		mutex_exit(txq->txq_lock);
   8352 		WM_CORE_LOCK(sc);
   8353 
   8354 		if (sc->sc_core_stopping) {
   8355 			WM_CORE_UNLOCK(sc);
   8356 			break;
   8357 		}
   8358 
   8359 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8360 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8361 			wm_linkintr(sc, icr);
   8362 		}
   8363 
   8364 		WM_CORE_UNLOCK(sc);
   8365 
   8366 		if (icr & ICR_RXO) {
   8367 #if defined(WM_DEBUG)
   8368 			log(LOG_WARNING, "%s: Receive overrun\n",
   8369 			    device_xname(sc->sc_dev));
   8370 #endif /* defined(WM_DEBUG) */
   8371 		}
   8372 	}
   8373 
   8374 	rnd_add_uint32(&sc->rnd_source, rndval);
   8375 
   8376 	if (handled) {
   8377 		/* Try to get more packets going. */
   8378 		softint_schedule(wmq->wmq_si);
   8379 	}
   8380 
   8381 	return handled;
   8382 }
   8383 
   8384 static inline void
   8385 wm_txrxintr_disable(struct wm_queue *wmq)
   8386 {
   8387 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8388 
   8389 	if (sc->sc_type == WM_T_82574)
   8390 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8391 	else if (sc->sc_type == WM_T_82575)
   8392 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8393 	else
   8394 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8395 }
   8396 
   8397 static inline void
   8398 wm_txrxintr_enable(struct wm_queue *wmq)
   8399 {
   8400 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8401 
   8402 	if (sc->sc_type == WM_T_82574)
   8403 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8404 	else if (sc->sc_type == WM_T_82575)
   8405 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8406 	else
   8407 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8408 }
   8409 
   8410 static int
   8411 wm_txrxintr_msix(void *arg)
   8412 {
   8413 	struct wm_queue *wmq = arg;
   8414 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8415 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8416 	struct wm_softc *sc = txq->txq_sc;
   8417 	u_int limit = sc->sc_rx_intr_process_limit;
   8418 
   8419 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8420 
   8421 	DPRINTF(WM_DEBUG_TX,
   8422 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8423 
   8424 	wm_txrxintr_disable(wmq);
   8425 
   8426 	mutex_enter(txq->txq_lock);
   8427 
   8428 	if (txq->txq_stopping) {
   8429 		mutex_exit(txq->txq_lock);
   8430 		return 0;
   8431 	}
   8432 
   8433 	WM_Q_EVCNT_INCR(txq, txdw);
   8434 	wm_txeof(sc, txq);
   8435 	/* wm_deferred start() is done in wm_handle_queue(). */
   8436 	mutex_exit(txq->txq_lock);
   8437 
   8438 	DPRINTF(WM_DEBUG_RX,
   8439 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8440 	mutex_enter(rxq->rxq_lock);
   8441 
   8442 	if (rxq->rxq_stopping) {
   8443 		mutex_exit(rxq->rxq_lock);
   8444 		return 0;
   8445 	}
   8446 
   8447 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8448 	wm_rxeof(rxq, limit);
   8449 	mutex_exit(rxq->rxq_lock);
   8450 
   8451 	softint_schedule(wmq->wmq_si);
   8452 
   8453 	return 1;
   8454 }
   8455 
   8456 static void
   8457 wm_handle_queue(void *arg)
   8458 {
   8459 	struct wm_queue *wmq = arg;
   8460 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8461 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8462 	struct wm_softc *sc = txq->txq_sc;
   8463 	u_int limit = sc->sc_rx_process_limit;
   8464 
   8465 	mutex_enter(txq->txq_lock);
   8466 	if (txq->txq_stopping) {
   8467 		mutex_exit(txq->txq_lock);
   8468 		return;
   8469 	}
   8470 	wm_txeof(sc, txq);
   8471 	wm_deferred_start_locked(txq);
   8472 	mutex_exit(txq->txq_lock);
   8473 
   8474 	mutex_enter(rxq->rxq_lock);
   8475 	if (rxq->rxq_stopping) {
   8476 		mutex_exit(rxq->rxq_lock);
   8477 		return;
   8478 	}
   8479 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8480 	wm_rxeof(rxq, limit);
   8481 	mutex_exit(rxq->rxq_lock);
   8482 
   8483 	wm_txrxintr_enable(wmq);
   8484 }
   8485 
   8486 /*
   8487  * wm_linkintr_msix:
   8488  *
   8489  *	Interrupt service routine for link status change for MSI-X.
   8490  */
   8491 static int
   8492 wm_linkintr_msix(void *arg)
   8493 {
   8494 	struct wm_softc *sc = arg;
   8495 	uint32_t reg;
   8496 
   8497 	DPRINTF(WM_DEBUG_LINK,
   8498 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8499 
   8500 	reg = CSR_READ(sc, WMREG_ICR);
   8501 	WM_CORE_LOCK(sc);
   8502 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8503 		goto out;
   8504 
   8505 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8506 	wm_linkintr(sc, ICR_LSC);
   8507 
   8508 out:
   8509 	WM_CORE_UNLOCK(sc);
   8510 
   8511 	if (sc->sc_type == WM_T_82574)
   8512 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8513 	else if (sc->sc_type == WM_T_82575)
   8514 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8515 	else
   8516 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8517 
   8518 	return 1;
   8519 }
   8520 
   8521 /*
   8522  * Media related.
   8523  * GMII, SGMII, TBI (and SERDES)
   8524  */
   8525 
   8526 /* Common */
   8527 
   8528 /*
   8529  * wm_tbi_serdes_set_linkled:
   8530  *
   8531  *	Update the link LED on TBI and SERDES devices.
   8532  */
   8533 static void
   8534 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8535 {
   8536 
   8537 	if (sc->sc_tbi_linkup)
   8538 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8539 	else
   8540 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8541 
   8542 	/* 82540 or newer devices are active low */
   8543 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8544 
   8545 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8546 }
   8547 
   8548 /* GMII related */
   8549 
   8550 /*
   8551  * wm_gmii_reset:
   8552  *
   8553  *	Reset the PHY.
   8554  */
   8555 static void
   8556 wm_gmii_reset(struct wm_softc *sc)
   8557 {
   8558 	uint32_t reg;
   8559 	int rv;
   8560 
   8561 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8562 		device_xname(sc->sc_dev), __func__));
   8563 
   8564 	rv = sc->phy.acquire(sc);
   8565 	if (rv != 0) {
   8566 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8567 		    __func__);
   8568 		return;
   8569 	}
   8570 
   8571 	switch (sc->sc_type) {
   8572 	case WM_T_82542_2_0:
   8573 	case WM_T_82542_2_1:
   8574 		/* null */
   8575 		break;
   8576 	case WM_T_82543:
   8577 		/*
   8578 		 * With 82543, we need to force speed and duplex on the MAC
   8579 		 * equal to what the PHY speed and duplex configuration is.
   8580 		 * In addition, we need to perform a hardware reset on the PHY
   8581 		 * to take it out of reset.
   8582 		 */
   8583 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8584 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8585 
   8586 		/* The PHY reset pin is active-low. */
   8587 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8588 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8589 		    CTRL_EXT_SWDPIN(4));
   8590 		reg |= CTRL_EXT_SWDPIO(4);
   8591 
   8592 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8593 		CSR_WRITE_FLUSH(sc);
   8594 		delay(10*1000);
   8595 
   8596 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8597 		CSR_WRITE_FLUSH(sc);
   8598 		delay(150);
   8599 #if 0
   8600 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8601 #endif
   8602 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8603 		break;
   8604 	case WM_T_82544:	/* reset 10000us */
   8605 	case WM_T_82540:
   8606 	case WM_T_82545:
   8607 	case WM_T_82545_3:
   8608 	case WM_T_82546:
   8609 	case WM_T_82546_3:
   8610 	case WM_T_82541:
   8611 	case WM_T_82541_2:
   8612 	case WM_T_82547:
   8613 	case WM_T_82547_2:
   8614 	case WM_T_82571:	/* reset 100us */
   8615 	case WM_T_82572:
   8616 	case WM_T_82573:
   8617 	case WM_T_82574:
   8618 	case WM_T_82575:
   8619 	case WM_T_82576:
   8620 	case WM_T_82580:
   8621 	case WM_T_I350:
   8622 	case WM_T_I354:
   8623 	case WM_T_I210:
   8624 	case WM_T_I211:
   8625 	case WM_T_82583:
   8626 	case WM_T_80003:
   8627 		/* generic reset */
   8628 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8629 		CSR_WRITE_FLUSH(sc);
   8630 		delay(20000);
   8631 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8632 		CSR_WRITE_FLUSH(sc);
   8633 		delay(20000);
   8634 
   8635 		if ((sc->sc_type == WM_T_82541)
   8636 		    || (sc->sc_type == WM_T_82541_2)
   8637 		    || (sc->sc_type == WM_T_82547)
   8638 		    || (sc->sc_type == WM_T_82547_2)) {
   8639 			/* workaround for igp are done in igp_reset() */
   8640 			/* XXX add code to set LED after phy reset */
   8641 		}
   8642 		break;
   8643 	case WM_T_ICH8:
   8644 	case WM_T_ICH9:
   8645 	case WM_T_ICH10:
   8646 	case WM_T_PCH:
   8647 	case WM_T_PCH2:
   8648 	case WM_T_PCH_LPT:
   8649 	case WM_T_PCH_SPT:
   8650 		/* generic reset */
   8651 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8652 		CSR_WRITE_FLUSH(sc);
   8653 		delay(100);
   8654 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8655 		CSR_WRITE_FLUSH(sc);
   8656 		delay(150);
   8657 		break;
   8658 	default:
   8659 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8660 		    __func__);
   8661 		break;
   8662 	}
   8663 
   8664 	sc->phy.release(sc);
   8665 
   8666 	/* get_cfg_done */
   8667 	wm_get_cfg_done(sc);
   8668 
   8669 	/* extra setup */
   8670 	switch (sc->sc_type) {
   8671 	case WM_T_82542_2_0:
   8672 	case WM_T_82542_2_1:
   8673 	case WM_T_82543:
   8674 	case WM_T_82544:
   8675 	case WM_T_82540:
   8676 	case WM_T_82545:
   8677 	case WM_T_82545_3:
   8678 	case WM_T_82546:
   8679 	case WM_T_82546_3:
   8680 	case WM_T_82541_2:
   8681 	case WM_T_82547_2:
   8682 	case WM_T_82571:
   8683 	case WM_T_82572:
   8684 	case WM_T_82573:
   8685 	case WM_T_82575:
   8686 	case WM_T_82576:
   8687 	case WM_T_82580:
   8688 	case WM_T_I350:
   8689 	case WM_T_I354:
   8690 	case WM_T_I210:
   8691 	case WM_T_I211:
   8692 	case WM_T_80003:
   8693 		/* null */
   8694 		break;
   8695 	case WM_T_82574:
   8696 	case WM_T_82583:
   8697 		wm_lplu_d0_disable(sc);
   8698 		break;
   8699 	case WM_T_82541:
   8700 	case WM_T_82547:
   8701 		/* XXX Configure actively LED after PHY reset */
   8702 		break;
   8703 	case WM_T_ICH8:
   8704 	case WM_T_ICH9:
   8705 	case WM_T_ICH10:
   8706 	case WM_T_PCH:
   8707 	case WM_T_PCH2:
   8708 	case WM_T_PCH_LPT:
   8709 	case WM_T_PCH_SPT:
   8710 		/* Allow time for h/w to get to a quiescent state afer reset */
   8711 		delay(10*1000);
   8712 
   8713 		if (sc->sc_type == WM_T_PCH)
   8714 			wm_hv_phy_workaround_ich8lan(sc);
   8715 
   8716 		if (sc->sc_type == WM_T_PCH2)
   8717 			wm_lv_phy_workaround_ich8lan(sc);
   8718 
   8719 		/* Clear the host wakeup bit after lcd reset */
   8720 		if (sc->sc_type >= WM_T_PCH) {
   8721 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8722 			    BM_PORT_GEN_CFG);
   8723 			reg &= ~BM_WUC_HOST_WU_BIT;
   8724 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8725 			    BM_PORT_GEN_CFG, reg);
   8726 		}
   8727 
   8728 		/*
   8729 		 * XXX Configure the LCD with th extended configuration region
   8730 		 * in NVM
   8731 		 */
   8732 
   8733 		/* Disable D0 LPLU. */
   8734 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8735 			wm_lplu_d0_disable_pch(sc);
   8736 		else
   8737 			wm_lplu_d0_disable(sc);	/* ICH* */
   8738 		break;
   8739 	default:
   8740 		panic("%s: unknown type\n", __func__);
   8741 		break;
   8742 	}
   8743 }
   8744 
   8745 /*
   8746  * Setup sc_phytype and mii_{read|write}reg.
   8747  *
   8748  *  To identify PHY type, correct read/write function should be selected.
   8749  * To select correct read/write function, PCI ID or MAC type are required
   8750  * without accessing PHY registers.
   8751  *
   8752  *  On the first call of this function, PHY ID is not known yet. Check
   8753  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   8754  * result might be incorrect.
   8755  *
   8756  *  In the second call, PHY OUI and model is used to identify PHY type.
   8757  * It might not be perfpect because of the lack of compared entry, but it
   8758  * would be better than the first call.
   8759  *
   8760  *  If the detected new result and previous assumption is different,
   8761  * diagnous message will be printed.
   8762  */
   8763 static void
   8764 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   8765     uint16_t phy_model)
   8766 {
   8767 	device_t dev = sc->sc_dev;
   8768 	struct mii_data *mii = &sc->sc_mii;
   8769 	uint16_t new_phytype = WMPHY_UNKNOWN;
   8770 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   8771 	mii_readreg_t new_readreg;
   8772 	mii_writereg_t new_writereg;
   8773 
   8774 	if (mii->mii_readreg == NULL) {
   8775 		/*
   8776 		 *  This is the first call of this function. For ICH and PCH
   8777 		 * variants, it's difficult to determine the PHY access method
   8778 		 * by sc_type, so use the PCI product ID for some devices.
   8779 		 */
   8780 
   8781 		switch (sc->sc_pcidevid) {
   8782 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   8783 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   8784 			/* 82577 */
   8785 			new_phytype = WMPHY_82577;
   8786 			break;
   8787 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   8788 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   8789 			/* 82578 */
   8790 			new_phytype = WMPHY_82578;
   8791 			break;
   8792 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8793 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8794 			/* 82579 */
   8795 			new_phytype = WMPHY_82579;
   8796 			break;
   8797 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8798 		case PCI_PRODUCT_INTEL_82801I_BM:
   8799 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   8800 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8801 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8802 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8803 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8804 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8805 			/* ICH8, 9, 10 with 82567 */
   8806 			new_phytype = WMPHY_BM;
   8807 			break;
   8808 		default:
   8809 			break;
   8810 		}
   8811 	} else {
   8812 		/* It's not the first call. Use PHY OUI and model */
   8813 		switch (phy_oui) {
   8814 		case MII_OUI_ATHEROS: /* XXX ??? */
   8815 			switch (phy_model) {
   8816 			case 0x0004: /* XXX */
   8817 				new_phytype = WMPHY_82578;
   8818 				break;
   8819 			default:
   8820 				break;
   8821 			}
   8822 			break;
   8823 		case MII_OUI_xxMARVELL:
   8824 			switch (phy_model) {
   8825 			case MII_MODEL_xxMARVELL_I210:
   8826 				new_phytype = WMPHY_I210;
   8827 				break;
   8828 			case MII_MODEL_xxMARVELL_E1011:
   8829 			case MII_MODEL_xxMARVELL_E1000_3:
   8830 			case MII_MODEL_xxMARVELL_E1000_5:
   8831 			case MII_MODEL_xxMARVELL_E1112:
   8832 				new_phytype = WMPHY_M88;
   8833 				break;
   8834 			case MII_MODEL_xxMARVELL_E1149:
   8835 				new_phytype = WMPHY_BM;
   8836 				break;
   8837 			case MII_MODEL_xxMARVELL_E1111:
   8838 			case MII_MODEL_xxMARVELL_I347:
   8839 			case MII_MODEL_xxMARVELL_E1512:
   8840 			case MII_MODEL_xxMARVELL_E1340M:
   8841 			case MII_MODEL_xxMARVELL_E1543:
   8842 				new_phytype = WMPHY_M88;
   8843 				break;
   8844 			case MII_MODEL_xxMARVELL_I82563:
   8845 				new_phytype = WMPHY_GG82563;
   8846 				break;
   8847 			default:
   8848 				break;
   8849 			}
   8850 			break;
   8851 		case MII_OUI_INTEL:
   8852 			switch (phy_model) {
   8853 			case MII_MODEL_INTEL_I82577:
   8854 				new_phytype = WMPHY_82577;
   8855 				break;
   8856 			case MII_MODEL_INTEL_I82579:
   8857 				new_phytype = WMPHY_82579;
   8858 				break;
   8859 			case MII_MODEL_INTEL_I217:
   8860 				new_phytype = WMPHY_I217;
   8861 				break;
   8862 			case MII_MODEL_INTEL_I82580:
   8863 			case MII_MODEL_INTEL_I350:
   8864 				new_phytype = WMPHY_82580;
   8865 				break;
   8866 			default:
   8867 				break;
   8868 			}
   8869 			break;
   8870 		case MII_OUI_yyINTEL:
   8871 			switch (phy_model) {
   8872 			case MII_MODEL_yyINTEL_I82562G:
   8873 			case MII_MODEL_yyINTEL_I82562EM:
   8874 			case MII_MODEL_yyINTEL_I82562ET:
   8875 				new_phytype = WMPHY_IFE;
   8876 				break;
   8877 			case MII_MODEL_yyINTEL_IGP01E1000:
   8878 				new_phytype = WMPHY_IGP;
   8879 				break;
   8880 			case MII_MODEL_yyINTEL_I82566:
   8881 				new_phytype = WMPHY_IGP_3;
   8882 				break;
   8883 			default:
   8884 				break;
   8885 			}
   8886 			break;
   8887 		default:
   8888 			break;
   8889 		}
   8890 		if (new_phytype == WMPHY_UNKNOWN)
   8891 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   8892 			    __func__);
   8893 
   8894 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8895 		    && (sc->sc_phytype != new_phytype )) {
   8896 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   8897 			    "was incorrect. PHY type from PHY ID = %u\n",
   8898 			    sc->sc_phytype, new_phytype);
   8899 		}
   8900 	}
   8901 
   8902 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   8903 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   8904 		/* SGMII */
   8905 		new_readreg = wm_sgmii_readreg;
   8906 		new_writereg = wm_sgmii_writereg;
   8907 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   8908 		/* BM2 (phyaddr == 1) */
   8909 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8910 		    && (new_phytype != WMPHY_BM)
   8911 		    && (new_phytype != WMPHY_UNKNOWN))
   8912 			doubt_phytype = new_phytype;
   8913 		new_phytype = WMPHY_BM;
   8914 		new_readreg = wm_gmii_bm_readreg;
   8915 		new_writereg = wm_gmii_bm_writereg;
   8916 	} else if (sc->sc_type >= WM_T_PCH) {
   8917 		/* All PCH* use _hv_ */
   8918 		new_readreg = wm_gmii_hv_readreg;
   8919 		new_writereg = wm_gmii_hv_writereg;
   8920 	} else if (sc->sc_type >= WM_T_ICH8) {
   8921 		/* non-82567 ICH8, 9 and 10 */
   8922 		new_readreg = wm_gmii_i82544_readreg;
   8923 		new_writereg = wm_gmii_i82544_writereg;
   8924 	} else if (sc->sc_type >= WM_T_80003) {
   8925 		/* 80003 */
   8926 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8927 		    && (new_phytype != WMPHY_GG82563)
   8928 		    && (new_phytype != WMPHY_UNKNOWN))
   8929 			doubt_phytype = new_phytype;
   8930 		new_phytype = WMPHY_GG82563;
   8931 		new_readreg = wm_gmii_i80003_readreg;
   8932 		new_writereg = wm_gmii_i80003_writereg;
   8933 	} else if (sc->sc_type >= WM_T_I210) {
   8934 		/* I210 and I211 */
   8935 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8936 		    && (new_phytype != WMPHY_I210)
   8937 		    && (new_phytype != WMPHY_UNKNOWN))
   8938 			doubt_phytype = new_phytype;
   8939 		new_phytype = WMPHY_I210;
   8940 		new_readreg = wm_gmii_gs40g_readreg;
   8941 		new_writereg = wm_gmii_gs40g_writereg;
   8942 	} else if (sc->sc_type >= WM_T_82580) {
   8943 		/* 82580, I350 and I354 */
   8944 		new_readreg = wm_gmii_82580_readreg;
   8945 		new_writereg = wm_gmii_82580_writereg;
   8946 	} else if (sc->sc_type >= WM_T_82544) {
   8947 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8948 		new_readreg = wm_gmii_i82544_readreg;
   8949 		new_writereg = wm_gmii_i82544_writereg;
   8950 	} else {
   8951 		new_readreg = wm_gmii_i82543_readreg;
   8952 		new_writereg = wm_gmii_i82543_writereg;
   8953 	}
   8954 
   8955 	if (new_phytype == WMPHY_BM) {
   8956 		/* All BM use _bm_ */
   8957 		new_readreg = wm_gmii_bm_readreg;
   8958 		new_writereg = wm_gmii_bm_writereg;
   8959 	}
   8960 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8961 		/* All PCH* use _hv_ */
   8962 		new_readreg = wm_gmii_hv_readreg;
   8963 		new_writereg = wm_gmii_hv_writereg;
   8964 	}
   8965 
   8966 	/* Diag output */
   8967 	if (doubt_phytype != WMPHY_UNKNOWN)
   8968 		aprint_error_dev(dev, "Assumed new PHY type was "
   8969 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   8970 		    new_phytype);
   8971 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8972 	    && (sc->sc_phytype != new_phytype ))
   8973 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   8974 		    "was incorrect. New PHY type = %u\n",
   8975 		    sc->sc_phytype, new_phytype);
   8976 
   8977 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   8978 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   8979 
   8980 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   8981 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   8982 		    "function was incorrect.\n");
   8983 
   8984 	/* Update now */
   8985 	sc->sc_phytype = new_phytype;
   8986 	mii->mii_readreg = new_readreg;
   8987 	mii->mii_writereg = new_writereg;
   8988 }
   8989 
   8990 /*
   8991  * wm_get_phy_id_82575:
   8992  *
   8993  * Return PHY ID. Return -1 if it failed.
   8994  */
   8995 static int
   8996 wm_get_phy_id_82575(struct wm_softc *sc)
   8997 {
   8998 	uint32_t reg;
   8999 	int phyid = -1;
   9000 
   9001 	/* XXX */
   9002 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9003 		return -1;
   9004 
   9005 	if (wm_sgmii_uses_mdio(sc)) {
   9006 		switch (sc->sc_type) {
   9007 		case WM_T_82575:
   9008 		case WM_T_82576:
   9009 			reg = CSR_READ(sc, WMREG_MDIC);
   9010 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9011 			break;
   9012 		case WM_T_82580:
   9013 		case WM_T_I350:
   9014 		case WM_T_I354:
   9015 		case WM_T_I210:
   9016 		case WM_T_I211:
   9017 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9018 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9019 			break;
   9020 		default:
   9021 			return -1;
   9022 		}
   9023 	}
   9024 
   9025 	return phyid;
   9026 }
   9027 
   9028 
   9029 /*
   9030  * wm_gmii_mediainit:
   9031  *
   9032  *	Initialize media for use on 1000BASE-T devices.
   9033  */
   9034 static void
   9035 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9036 {
   9037 	device_t dev = sc->sc_dev;
   9038 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9039 	struct mii_data *mii = &sc->sc_mii;
   9040 	uint32_t reg;
   9041 
   9042 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9043 		device_xname(sc->sc_dev), __func__));
   9044 
   9045 	/* We have GMII. */
   9046 	sc->sc_flags |= WM_F_HAS_MII;
   9047 
   9048 	if (sc->sc_type == WM_T_80003)
   9049 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9050 	else
   9051 		sc->sc_tipg = TIPG_1000T_DFLT;
   9052 
   9053 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9054 	if ((sc->sc_type == WM_T_82580)
   9055 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9056 	    || (sc->sc_type == WM_T_I211)) {
   9057 		reg = CSR_READ(sc, WMREG_PHPM);
   9058 		reg &= ~PHPM_GO_LINK_D;
   9059 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9060 	}
   9061 
   9062 	/*
   9063 	 * Let the chip set speed/duplex on its own based on
   9064 	 * signals from the PHY.
   9065 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9066 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9067 	 */
   9068 	sc->sc_ctrl |= CTRL_SLU;
   9069 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9070 
   9071 	/* Initialize our media structures and probe the GMII. */
   9072 	mii->mii_ifp = ifp;
   9073 
   9074 	/*
   9075 	 * The first call of wm_mii_setup_phytype. The result might be
   9076 	 * incorrect.
   9077 	 */
   9078 	wm_gmii_setup_phytype(sc, 0, 0);
   9079 
   9080 	mii->mii_statchg = wm_gmii_statchg;
   9081 
   9082 	/* get PHY control from SMBus to PCIe */
   9083 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9084 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9085 		wm_smbustopci(sc);
   9086 
   9087 	wm_gmii_reset(sc);
   9088 
   9089 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9090 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9091 	    wm_gmii_mediastatus);
   9092 
   9093 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9094 	    || (sc->sc_type == WM_T_82580)
   9095 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9096 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9097 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9098 			/* Attach only one port */
   9099 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9100 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9101 		} else {
   9102 			int i, id;
   9103 			uint32_t ctrl_ext;
   9104 
   9105 			id = wm_get_phy_id_82575(sc);
   9106 			if (id != -1) {
   9107 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9108 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9109 			}
   9110 			if ((id == -1)
   9111 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9112 				/* Power on sgmii phy if it is disabled */
   9113 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9114 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9115 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9116 				CSR_WRITE_FLUSH(sc);
   9117 				delay(300*1000); /* XXX too long */
   9118 
   9119 				/* from 1 to 8 */
   9120 				for (i = 1; i < 8; i++)
   9121 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9122 					    0xffffffff, i, MII_OFFSET_ANY,
   9123 					    MIIF_DOPAUSE);
   9124 
   9125 				/* restore previous sfp cage power state */
   9126 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9127 			}
   9128 		}
   9129 	} else {
   9130 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9131 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9132 	}
   9133 
   9134 	/*
   9135 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9136 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9137 	 */
   9138 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9139 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9140 		wm_set_mdio_slow_mode_hv(sc);
   9141 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9142 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9143 	}
   9144 
   9145 	/*
   9146 	 * (For ICH8 variants)
   9147 	 * If PHY detection failed, use BM's r/w function and retry.
   9148 	 */
   9149 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9150 		/* if failed, retry with *_bm_* */
   9151 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9152 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9153 		    sc->sc_phytype);
   9154 		sc->sc_phytype = WMPHY_BM;
   9155 		mii->mii_readreg = wm_gmii_bm_readreg;
   9156 		mii->mii_writereg = wm_gmii_bm_writereg;
   9157 
   9158 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9159 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9160 	}
   9161 
   9162 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9163 		/* Any PHY wasn't find */
   9164 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9165 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9166 		sc->sc_phytype = WMPHY_NONE;
   9167 	} else {
   9168 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9169 
   9170 		/*
   9171 		 * PHY Found! Check PHY type again by the second call of
   9172 		 * wm_mii_setup_phytype.
   9173 		 */
   9174 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9175 		    child->mii_mpd_model);
   9176 
   9177 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9178 	}
   9179 }
   9180 
   9181 /*
   9182  * wm_gmii_mediachange:	[ifmedia interface function]
   9183  *
   9184  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9185  */
   9186 static int
   9187 wm_gmii_mediachange(struct ifnet *ifp)
   9188 {
   9189 	struct wm_softc *sc = ifp->if_softc;
   9190 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9191 	int rc;
   9192 
   9193 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9194 		device_xname(sc->sc_dev), __func__));
   9195 	if ((ifp->if_flags & IFF_UP) == 0)
   9196 		return 0;
   9197 
   9198 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9199 	sc->sc_ctrl |= CTRL_SLU;
   9200 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9201 	    || (sc->sc_type > WM_T_82543)) {
   9202 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9203 	} else {
   9204 		sc->sc_ctrl &= ~CTRL_ASDE;
   9205 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9206 		if (ife->ifm_media & IFM_FDX)
   9207 			sc->sc_ctrl |= CTRL_FD;
   9208 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9209 		case IFM_10_T:
   9210 			sc->sc_ctrl |= CTRL_SPEED_10;
   9211 			break;
   9212 		case IFM_100_TX:
   9213 			sc->sc_ctrl |= CTRL_SPEED_100;
   9214 			break;
   9215 		case IFM_1000_T:
   9216 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9217 			break;
   9218 		default:
   9219 			panic("wm_gmii_mediachange: bad media 0x%x",
   9220 			    ife->ifm_media);
   9221 		}
   9222 	}
   9223 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9224 	if (sc->sc_type <= WM_T_82543)
   9225 		wm_gmii_reset(sc);
   9226 
   9227 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9228 		return 0;
   9229 	return rc;
   9230 }
   9231 
   9232 /*
   9233  * wm_gmii_mediastatus:	[ifmedia interface function]
   9234  *
   9235  *	Get the current interface media status on a 1000BASE-T device.
   9236  */
   9237 static void
   9238 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9239 {
   9240 	struct wm_softc *sc = ifp->if_softc;
   9241 
   9242 	ether_mediastatus(ifp, ifmr);
   9243 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9244 	    | sc->sc_flowflags;
   9245 }
   9246 
   9247 #define	MDI_IO		CTRL_SWDPIN(2)
   9248 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9249 #define	MDI_CLK		CTRL_SWDPIN(3)
   9250 
   9251 static void
   9252 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9253 {
   9254 	uint32_t i, v;
   9255 
   9256 	v = CSR_READ(sc, WMREG_CTRL);
   9257 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9258 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9259 
   9260 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9261 		if (data & i)
   9262 			v |= MDI_IO;
   9263 		else
   9264 			v &= ~MDI_IO;
   9265 		CSR_WRITE(sc, WMREG_CTRL, v);
   9266 		CSR_WRITE_FLUSH(sc);
   9267 		delay(10);
   9268 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9269 		CSR_WRITE_FLUSH(sc);
   9270 		delay(10);
   9271 		CSR_WRITE(sc, WMREG_CTRL, v);
   9272 		CSR_WRITE_FLUSH(sc);
   9273 		delay(10);
   9274 	}
   9275 }
   9276 
   9277 static uint32_t
   9278 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9279 {
   9280 	uint32_t v, i, data = 0;
   9281 
   9282 	v = CSR_READ(sc, WMREG_CTRL);
   9283 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9284 	v |= CTRL_SWDPIO(3);
   9285 
   9286 	CSR_WRITE(sc, WMREG_CTRL, v);
   9287 	CSR_WRITE_FLUSH(sc);
   9288 	delay(10);
   9289 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9290 	CSR_WRITE_FLUSH(sc);
   9291 	delay(10);
   9292 	CSR_WRITE(sc, WMREG_CTRL, v);
   9293 	CSR_WRITE_FLUSH(sc);
   9294 	delay(10);
   9295 
   9296 	for (i = 0; i < 16; i++) {
   9297 		data <<= 1;
   9298 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9299 		CSR_WRITE_FLUSH(sc);
   9300 		delay(10);
   9301 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9302 			data |= 1;
   9303 		CSR_WRITE(sc, WMREG_CTRL, v);
   9304 		CSR_WRITE_FLUSH(sc);
   9305 		delay(10);
   9306 	}
   9307 
   9308 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9309 	CSR_WRITE_FLUSH(sc);
   9310 	delay(10);
   9311 	CSR_WRITE(sc, WMREG_CTRL, v);
   9312 	CSR_WRITE_FLUSH(sc);
   9313 	delay(10);
   9314 
   9315 	return data;
   9316 }
   9317 
   9318 #undef MDI_IO
   9319 #undef MDI_DIR
   9320 #undef MDI_CLK
   9321 
   9322 /*
   9323  * wm_gmii_i82543_readreg:	[mii interface function]
   9324  *
   9325  *	Read a PHY register on the GMII (i82543 version).
   9326  */
   9327 static int
   9328 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   9329 {
   9330 	struct wm_softc *sc = device_private(self);
   9331 	int rv;
   9332 
   9333 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9334 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9335 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9336 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9337 
   9338 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9339 	    device_xname(sc->sc_dev), phy, reg, rv));
   9340 
   9341 	return rv;
   9342 }
   9343 
   9344 /*
   9345  * wm_gmii_i82543_writereg:	[mii interface function]
   9346  *
   9347  *	Write a PHY register on the GMII (i82543 version).
   9348  */
   9349 static void
   9350 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   9351 {
   9352 	struct wm_softc *sc = device_private(self);
   9353 
   9354 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9355 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9356 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9357 	    (MII_COMMAND_START << 30), 32);
   9358 }
   9359 
   9360 /*
   9361  * wm_gmii_mdic_readreg:	[mii interface function]
   9362  *
   9363  *	Read a PHY register on the GMII.
   9364  */
   9365 static int
   9366 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   9367 {
   9368 	struct wm_softc *sc = device_private(self);
   9369 	uint32_t mdic = 0;
   9370 	int i, rv;
   9371 
   9372 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9373 	    MDIC_REGADD(reg));
   9374 
   9375 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9376 		mdic = CSR_READ(sc, WMREG_MDIC);
   9377 		if (mdic & MDIC_READY)
   9378 			break;
   9379 		delay(50);
   9380 	}
   9381 
   9382 	if ((mdic & MDIC_READY) == 0) {
   9383 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9384 		    device_xname(sc->sc_dev), phy, reg);
   9385 		rv = 0;
   9386 	} else if (mdic & MDIC_E) {
   9387 #if 0 /* This is normal if no PHY is present. */
   9388 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9389 		    device_xname(sc->sc_dev), phy, reg);
   9390 #endif
   9391 		rv = 0;
   9392 	} else {
   9393 		rv = MDIC_DATA(mdic);
   9394 		if (rv == 0xffff)
   9395 			rv = 0;
   9396 	}
   9397 
   9398 	return rv;
   9399 }
   9400 
   9401 /*
   9402  * wm_gmii_mdic_writereg:	[mii interface function]
   9403  *
   9404  *	Write a PHY register on the GMII.
   9405  */
   9406 static void
   9407 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   9408 {
   9409 	struct wm_softc *sc = device_private(self);
   9410 	uint32_t mdic = 0;
   9411 	int i;
   9412 
   9413 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9414 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9415 
   9416 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9417 		mdic = CSR_READ(sc, WMREG_MDIC);
   9418 		if (mdic & MDIC_READY)
   9419 			break;
   9420 		delay(50);
   9421 	}
   9422 
   9423 	if ((mdic & MDIC_READY) == 0)
   9424 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9425 		    device_xname(sc->sc_dev), phy, reg);
   9426 	else if (mdic & MDIC_E)
   9427 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9428 		    device_xname(sc->sc_dev), phy, reg);
   9429 }
   9430 
   9431 /*
   9432  * wm_gmii_i82544_readreg:	[mii interface function]
   9433  *
   9434  *	Read a PHY register on the GMII.
   9435  */
   9436 static int
   9437 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   9438 {
   9439 	struct wm_softc *sc = device_private(self);
   9440 	int rv;
   9441 
   9442 	if (sc->phy.acquire(sc)) {
   9443 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9444 		    __func__);
   9445 		return 0;
   9446 	}
   9447 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9448 	sc->phy.release(sc);
   9449 
   9450 	return rv;
   9451 }
   9452 
   9453 /*
   9454  * wm_gmii_i82544_writereg:	[mii interface function]
   9455  *
   9456  *	Write a PHY register on the GMII.
   9457  */
   9458 static void
   9459 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   9460 {
   9461 	struct wm_softc *sc = device_private(self);
   9462 
   9463 	if (sc->phy.acquire(sc)) {
   9464 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9465 		    __func__);
   9466 	}
   9467 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9468 	sc->phy.release(sc);
   9469 }
   9470 
   9471 /*
   9472  * wm_gmii_i80003_readreg:	[mii interface function]
   9473  *
   9474  *	Read a PHY register on the kumeran
   9475  * This could be handled by the PHY layer if we didn't have to lock the
   9476  * ressource ...
   9477  */
   9478 static int
   9479 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   9480 {
   9481 	struct wm_softc *sc = device_private(self);
   9482 	int rv;
   9483 
   9484 	if (phy != 1) /* only one PHY on kumeran bus */
   9485 		return 0;
   9486 
   9487 	if (sc->phy.acquire(sc)) {
   9488 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9489 		    __func__);
   9490 		return 0;
   9491 	}
   9492 
   9493 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9494 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9495 		    reg >> GG82563_PAGE_SHIFT);
   9496 	} else {
   9497 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9498 		    reg >> GG82563_PAGE_SHIFT);
   9499 	}
   9500 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9501 	delay(200);
   9502 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9503 	delay(200);
   9504 	sc->phy.release(sc);
   9505 
   9506 	return rv;
   9507 }
   9508 
   9509 /*
   9510  * wm_gmii_i80003_writereg:	[mii interface function]
   9511  *
   9512  *	Write a PHY register on the kumeran.
   9513  * This could be handled by the PHY layer if we didn't have to lock the
   9514  * ressource ...
   9515  */
   9516 static void
   9517 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   9518 {
   9519 	struct wm_softc *sc = device_private(self);
   9520 
   9521 	if (phy != 1) /* only one PHY on kumeran bus */
   9522 		return;
   9523 
   9524 	if (sc->phy.acquire(sc)) {
   9525 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9526 		    __func__);
   9527 		return;
   9528 	}
   9529 
   9530 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9531 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9532 		    reg >> GG82563_PAGE_SHIFT);
   9533 	} else {
   9534 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9535 		    reg >> GG82563_PAGE_SHIFT);
   9536 	}
   9537 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9538 	delay(200);
   9539 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9540 	delay(200);
   9541 
   9542 	sc->phy.release(sc);
   9543 }
   9544 
   9545 /*
   9546  * wm_gmii_bm_readreg:	[mii interface function]
   9547  *
   9548  *	Read a PHY register on the kumeran
   9549  * This could be handled by the PHY layer if we didn't have to lock the
   9550  * ressource ...
   9551  */
   9552 static int
   9553 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   9554 {
   9555 	struct wm_softc *sc = device_private(self);
   9556 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9557 	uint16_t val;
   9558 	int rv;
   9559 
   9560 	if (sc->phy.acquire(sc)) {
   9561 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9562 		    __func__);
   9563 		return 0;
   9564 	}
   9565 
   9566 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9567 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9568 		    || (reg == 31)) ? 1 : phy;
   9569 	/* Page 800 works differently than the rest so it has its own func */
   9570 	if (page == BM_WUC_PAGE) {
   9571 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9572 		rv = val;
   9573 		goto release;
   9574 	}
   9575 
   9576 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9577 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9578 		    && (sc->sc_type != WM_T_82583))
   9579 			wm_gmii_mdic_writereg(self, phy,
   9580 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9581 		else
   9582 			wm_gmii_mdic_writereg(self, phy,
   9583 			    BME1000_PHY_PAGE_SELECT, page);
   9584 	}
   9585 
   9586 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9587 
   9588 release:
   9589 	sc->phy.release(sc);
   9590 	return rv;
   9591 }
   9592 
   9593 /*
   9594  * wm_gmii_bm_writereg:	[mii interface function]
   9595  *
   9596  *	Write a PHY register on the kumeran.
   9597  * This could be handled by the PHY layer if we didn't have to lock the
   9598  * ressource ...
   9599  */
   9600 static void
   9601 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   9602 {
   9603 	struct wm_softc *sc = device_private(self);
   9604 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9605 
   9606 	if (sc->phy.acquire(sc)) {
   9607 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9608 		    __func__);
   9609 		return;
   9610 	}
   9611 
   9612 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9613 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9614 		    || (reg == 31)) ? 1 : phy;
   9615 	/* Page 800 works differently than the rest so it has its own func */
   9616 	if (page == BM_WUC_PAGE) {
   9617 		uint16_t tmp;
   9618 
   9619 		tmp = val;
   9620 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9621 		goto release;
   9622 	}
   9623 
   9624 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9625 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9626 		    && (sc->sc_type != WM_T_82583))
   9627 			wm_gmii_mdic_writereg(self, phy,
   9628 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9629 		else
   9630 			wm_gmii_mdic_writereg(self, phy,
   9631 			    BME1000_PHY_PAGE_SELECT, page);
   9632 	}
   9633 
   9634 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9635 
   9636 release:
   9637 	sc->phy.release(sc);
   9638 }
   9639 
   9640 static void
   9641 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   9642 {
   9643 	struct wm_softc *sc = device_private(self);
   9644 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9645 	uint16_t wuce, reg;
   9646 
   9647 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9648 		device_xname(sc->sc_dev), __func__));
   9649 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9650 	if (sc->sc_type == WM_T_PCH) {
   9651 		/* XXX e1000 driver do nothing... why? */
   9652 	}
   9653 
   9654 	/*
   9655 	 * 1) Enable PHY wakeup register first.
   9656 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9657 	 */
   9658 
   9659 	/* Set page 769 */
   9660 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9661 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9662 
   9663 	/* Read WUCE and save it */
   9664 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9665 
   9666 	reg = wuce | BM_WUC_ENABLE_BIT;
   9667 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9668 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9669 
   9670 	/* Select page 800 */
   9671 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9672 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9673 
   9674 	/*
   9675 	 * 2) Access PHY wakeup register.
   9676 	 * See e1000_access_phy_wakeup_reg_bm.
   9677 	 */
   9678 
   9679 	/* Write page 800 */
   9680 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9681 
   9682 	if (rd)
   9683 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9684 	else
   9685 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9686 
   9687 	/*
   9688 	 * 3) Disable PHY wakeup register.
   9689 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9690 	 */
   9691 	/* Set page 769 */
   9692 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9693 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9694 
   9695 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9696 }
   9697 
   9698 /*
   9699  * wm_gmii_hv_readreg:	[mii interface function]
   9700  *
   9701  *	Read a PHY register on the kumeran
   9702  * This could be handled by the PHY layer if we didn't have to lock the
   9703  * ressource ...
   9704  */
   9705 static int
   9706 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9707 {
   9708 	struct wm_softc *sc = device_private(self);
   9709 	int rv;
   9710 
   9711 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9712 		device_xname(sc->sc_dev), __func__));
   9713 	if (sc->phy.acquire(sc)) {
   9714 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9715 		    __func__);
   9716 		return 0;
   9717 	}
   9718 
   9719 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9720 	sc->phy.release(sc);
   9721 	return rv;
   9722 }
   9723 
   9724 static int
   9725 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9726 {
   9727 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9728 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9729 	uint16_t val;
   9730 	int rv;
   9731 
   9732 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9733 
   9734 	/* Page 800 works differently than the rest so it has its own func */
   9735 	if (page == BM_WUC_PAGE) {
   9736 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9737 		return val;
   9738 	}
   9739 
   9740 	/*
   9741 	 * Lower than page 768 works differently than the rest so it has its
   9742 	 * own func
   9743 	 */
   9744 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9745 		printf("gmii_hv_readreg!!!\n");
   9746 		return 0;
   9747 	}
   9748 
   9749 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9750 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9751 		    page << BME1000_PAGE_SHIFT);
   9752 	}
   9753 
   9754 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9755 	return rv;
   9756 }
   9757 
   9758 /*
   9759  * wm_gmii_hv_writereg:	[mii interface function]
   9760  *
   9761  *	Write a PHY register on the kumeran.
   9762  * This could be handled by the PHY layer if we didn't have to lock the
   9763  * ressource ...
   9764  */
   9765 static void
   9766 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9767 {
   9768 	struct wm_softc *sc = device_private(self);
   9769 
   9770 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9771 		device_xname(sc->sc_dev), __func__));
   9772 
   9773 	if (sc->phy.acquire(sc)) {
   9774 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9775 		    __func__);
   9776 		return;
   9777 	}
   9778 
   9779 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   9780 	sc->phy.release(sc);
   9781 }
   9782 
   9783 static void
   9784 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   9785 {
   9786 	struct wm_softc *sc = device_private(self);
   9787 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9788 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9789 
   9790 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9791 
   9792 	/* Page 800 works differently than the rest so it has its own func */
   9793 	if (page == BM_WUC_PAGE) {
   9794 		uint16_t tmp;
   9795 
   9796 		tmp = val;
   9797 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9798 		return;
   9799 	}
   9800 
   9801 	/*
   9802 	 * Lower than page 768 works differently than the rest so it has its
   9803 	 * own func
   9804 	 */
   9805 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9806 		printf("gmii_hv_writereg!!!\n");
   9807 		return;
   9808 	}
   9809 
   9810 	{
   9811 		/*
   9812 		 * XXX Workaround MDIO accesses being disabled after entering
   9813 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9814 		 * register is set)
   9815 		 */
   9816 		if (sc->sc_phytype == WMPHY_82578) {
   9817 			struct mii_softc *child;
   9818 
   9819 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9820 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   9821 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   9822 			    && ((val & (1 << 11)) != 0)) {
   9823 				printf("XXX need workaround\n");
   9824 			}
   9825 		}
   9826 
   9827 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9828 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9829 			    page << BME1000_PAGE_SHIFT);
   9830 		}
   9831 	}
   9832 
   9833 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   9834 }
   9835 
   9836 /*
   9837  * wm_gmii_82580_readreg:	[mii interface function]
   9838  *
   9839  *	Read a PHY register on the 82580 and I350.
   9840  * This could be handled by the PHY layer if we didn't have to lock the
   9841  * ressource ...
   9842  */
   9843 static int
   9844 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   9845 {
   9846 	struct wm_softc *sc = device_private(self);
   9847 	int rv;
   9848 
   9849 	if (sc->phy.acquire(sc) != 0) {
   9850 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9851 		    __func__);
   9852 		return 0;
   9853 	}
   9854 
   9855 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9856 
   9857 	sc->phy.release(sc);
   9858 	return rv;
   9859 }
   9860 
   9861 /*
   9862  * wm_gmii_82580_writereg:	[mii interface function]
   9863  *
   9864  *	Write a PHY register on the 82580 and I350.
   9865  * This could be handled by the PHY layer if we didn't have to lock the
   9866  * ressource ...
   9867  */
   9868 static void
   9869 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   9870 {
   9871 	struct wm_softc *sc = device_private(self);
   9872 
   9873 	if (sc->phy.acquire(sc) != 0) {
   9874 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9875 		    __func__);
   9876 		return;
   9877 	}
   9878 
   9879 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9880 
   9881 	sc->phy.release(sc);
   9882 }
   9883 
   9884 /*
   9885  * wm_gmii_gs40g_readreg:	[mii interface function]
   9886  *
   9887  *	Read a PHY register on the I2100 and I211.
   9888  * This could be handled by the PHY layer if we didn't have to lock the
   9889  * ressource ...
   9890  */
   9891 static int
   9892 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   9893 {
   9894 	struct wm_softc *sc = device_private(self);
   9895 	int page, offset;
   9896 	int rv;
   9897 
   9898 	/* Acquire semaphore */
   9899 	if (sc->phy.acquire(sc)) {
   9900 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9901 		    __func__);
   9902 		return 0;
   9903 	}
   9904 
   9905 	/* Page select */
   9906 	page = reg >> GS40G_PAGE_SHIFT;
   9907 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9908 
   9909 	/* Read reg */
   9910 	offset = reg & GS40G_OFFSET_MASK;
   9911 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   9912 
   9913 	sc->phy.release(sc);
   9914 	return rv;
   9915 }
   9916 
   9917 /*
   9918  * wm_gmii_gs40g_writereg:	[mii interface function]
   9919  *
   9920  *	Write a PHY register on the I210 and I211.
   9921  * This could be handled by the PHY layer if we didn't have to lock the
   9922  * ressource ...
   9923  */
   9924 static void
   9925 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   9926 {
   9927 	struct wm_softc *sc = device_private(self);
   9928 	int page, offset;
   9929 
   9930 	/* Acquire semaphore */
   9931 	if (sc->phy.acquire(sc)) {
   9932 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9933 		    __func__);
   9934 		return;
   9935 	}
   9936 
   9937 	/* Page select */
   9938 	page = reg >> GS40G_PAGE_SHIFT;
   9939 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9940 
   9941 	/* Write reg */
   9942 	offset = reg & GS40G_OFFSET_MASK;
   9943 	wm_gmii_mdic_writereg(self, phy, offset, val);
   9944 
   9945 	/* Release semaphore */
   9946 	sc->phy.release(sc);
   9947 }
   9948 
   9949 /*
   9950  * wm_gmii_statchg:	[mii interface function]
   9951  *
   9952  *	Callback from MII layer when media changes.
   9953  */
   9954 static void
   9955 wm_gmii_statchg(struct ifnet *ifp)
   9956 {
   9957 	struct wm_softc *sc = ifp->if_softc;
   9958 	struct mii_data *mii = &sc->sc_mii;
   9959 
   9960 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   9961 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9962 	sc->sc_fcrtl &= ~FCRTL_XONE;
   9963 
   9964 	/*
   9965 	 * Get flow control negotiation result.
   9966 	 */
   9967 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   9968 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   9969 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   9970 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   9971 	}
   9972 
   9973 	if (sc->sc_flowflags & IFM_FLOW) {
   9974 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   9975 			sc->sc_ctrl |= CTRL_TFCE;
   9976 			sc->sc_fcrtl |= FCRTL_XONE;
   9977 		}
   9978 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   9979 			sc->sc_ctrl |= CTRL_RFCE;
   9980 	}
   9981 
   9982 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   9983 		DPRINTF(WM_DEBUG_LINK,
   9984 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   9985 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9986 	} else {
   9987 		DPRINTF(WM_DEBUG_LINK,
   9988 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   9989 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9990 	}
   9991 
   9992 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9993 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9994 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   9995 						 : WMREG_FCRTL, sc->sc_fcrtl);
   9996 	if (sc->sc_type == WM_T_80003) {
   9997 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   9998 		case IFM_1000_T:
   9999 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10000 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10001 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10002 			break;
   10003 		default:
   10004 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10005 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10006 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10007 			break;
   10008 		}
   10009 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10010 	}
   10011 }
   10012 
   10013 /* kumeran related (80003, ICH* and PCH*) */
   10014 
   10015 /*
   10016  * wm_kmrn_readreg:
   10017  *
   10018  *	Read a kumeran register
   10019  */
   10020 static int
   10021 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   10022 {
   10023 	int rv;
   10024 
   10025 	if (sc->sc_type == WM_T_80003)
   10026 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10027 	else
   10028 		rv = sc->phy.acquire(sc);
   10029 	if (rv != 0) {
   10030 		aprint_error_dev(sc->sc_dev,
   10031 		    "%s: failed to get semaphore\n", __func__);
   10032 		return 0;
   10033 	}
   10034 
   10035 	rv = wm_kmrn_readreg_locked(sc, reg);
   10036 
   10037 	if (sc->sc_type == WM_T_80003)
   10038 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10039 	else
   10040 		sc->phy.release(sc);
   10041 
   10042 	return rv;
   10043 }
   10044 
   10045 static int
   10046 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   10047 {
   10048 	int rv;
   10049 
   10050 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10051 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10052 	    KUMCTRLSTA_REN);
   10053 	CSR_WRITE_FLUSH(sc);
   10054 	delay(2);
   10055 
   10056 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10057 
   10058 	return rv;
   10059 }
   10060 
   10061 /*
   10062  * wm_kmrn_writereg:
   10063  *
   10064  *	Write a kumeran register
   10065  */
   10066 static void
   10067 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   10068 {
   10069 	int rv;
   10070 
   10071 	if (sc->sc_type == WM_T_80003)
   10072 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10073 	else
   10074 		rv = sc->phy.acquire(sc);
   10075 	if (rv != 0) {
   10076 		aprint_error_dev(sc->sc_dev,
   10077 		    "%s: failed to get semaphore\n", __func__);
   10078 		return;
   10079 	}
   10080 
   10081 	wm_kmrn_writereg_locked(sc, reg, val);
   10082 
   10083 	if (sc->sc_type == WM_T_80003)
   10084 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10085 	else
   10086 		sc->phy.release(sc);
   10087 }
   10088 
   10089 static void
   10090 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   10091 {
   10092 
   10093 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10094 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10095 	    (val & KUMCTRLSTA_MASK));
   10096 }
   10097 
   10098 /* SGMII related */
   10099 
   10100 /*
   10101  * wm_sgmii_uses_mdio
   10102  *
   10103  * Check whether the transaction is to the internal PHY or the external
   10104  * MDIO interface. Return true if it's MDIO.
   10105  */
   10106 static bool
   10107 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10108 {
   10109 	uint32_t reg;
   10110 	bool ismdio = false;
   10111 
   10112 	switch (sc->sc_type) {
   10113 	case WM_T_82575:
   10114 	case WM_T_82576:
   10115 		reg = CSR_READ(sc, WMREG_MDIC);
   10116 		ismdio = ((reg & MDIC_DEST) != 0);
   10117 		break;
   10118 	case WM_T_82580:
   10119 	case WM_T_I350:
   10120 	case WM_T_I354:
   10121 	case WM_T_I210:
   10122 	case WM_T_I211:
   10123 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10124 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10125 		break;
   10126 	default:
   10127 		break;
   10128 	}
   10129 
   10130 	return ismdio;
   10131 }
   10132 
   10133 /*
   10134  * wm_sgmii_readreg:	[mii interface function]
   10135  *
   10136  *	Read a PHY register on the SGMII
   10137  * This could be handled by the PHY layer if we didn't have to lock the
   10138  * ressource ...
   10139  */
   10140 static int
   10141 wm_sgmii_readreg(device_t self, int phy, int reg)
   10142 {
   10143 	struct wm_softc *sc = device_private(self);
   10144 	uint32_t i2ccmd;
   10145 	int i, rv;
   10146 
   10147 	if (sc->phy.acquire(sc)) {
   10148 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10149 		    __func__);
   10150 		return 0;
   10151 	}
   10152 
   10153 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10154 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10155 	    | I2CCMD_OPCODE_READ;
   10156 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10157 
   10158 	/* Poll the ready bit */
   10159 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10160 		delay(50);
   10161 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10162 		if (i2ccmd & I2CCMD_READY)
   10163 			break;
   10164 	}
   10165 	if ((i2ccmd & I2CCMD_READY) == 0)
   10166 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   10167 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10168 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10169 
   10170 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10171 
   10172 	sc->phy.release(sc);
   10173 	return rv;
   10174 }
   10175 
   10176 /*
   10177  * wm_sgmii_writereg:	[mii interface function]
   10178  *
   10179  *	Write a PHY register on the SGMII.
   10180  * This could be handled by the PHY layer if we didn't have to lock the
   10181  * ressource ...
   10182  */
   10183 static void
   10184 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   10185 {
   10186 	struct wm_softc *sc = device_private(self);
   10187 	uint32_t i2ccmd;
   10188 	int i;
   10189 	int val_swapped;
   10190 
   10191 	if (sc->phy.acquire(sc) != 0) {
   10192 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10193 		    __func__);
   10194 		return;
   10195 	}
   10196 	/* Swap the data bytes for the I2C interface */
   10197 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10198 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10199 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10200 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10201 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10202 
   10203 	/* Poll the ready bit */
   10204 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10205 		delay(50);
   10206 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10207 		if (i2ccmd & I2CCMD_READY)
   10208 			break;
   10209 	}
   10210 	if ((i2ccmd & I2CCMD_READY) == 0)
   10211 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   10212 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10213 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10214 
   10215 	sc->phy.release(sc);
   10216 }
   10217 
   10218 /* TBI related */
   10219 
   10220 /*
   10221  * wm_tbi_mediainit:
   10222  *
   10223  *	Initialize media for use on 1000BASE-X devices.
   10224  */
   10225 static void
   10226 wm_tbi_mediainit(struct wm_softc *sc)
   10227 {
   10228 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10229 	const char *sep = "";
   10230 
   10231 	if (sc->sc_type < WM_T_82543)
   10232 		sc->sc_tipg = TIPG_WM_DFLT;
   10233 	else
   10234 		sc->sc_tipg = TIPG_LG_DFLT;
   10235 
   10236 	sc->sc_tbi_serdes_anegticks = 5;
   10237 
   10238 	/* Initialize our media structures */
   10239 	sc->sc_mii.mii_ifp = ifp;
   10240 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10241 
   10242 	if ((sc->sc_type >= WM_T_82575)
   10243 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10244 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10245 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10246 	else
   10247 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10248 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10249 
   10250 	/*
   10251 	 * SWD Pins:
   10252 	 *
   10253 	 *	0 = Link LED (output)
   10254 	 *	1 = Loss Of Signal (input)
   10255 	 */
   10256 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10257 
   10258 	/* XXX Perhaps this is only for TBI */
   10259 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10260 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10261 
   10262 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10263 		sc->sc_ctrl &= ~CTRL_LRST;
   10264 
   10265 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10266 
   10267 #define	ADD(ss, mm, dd)							\
   10268 do {									\
   10269 	aprint_normal("%s%s", sep, ss);					\
   10270 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10271 	sep = ", ";							\
   10272 } while (/*CONSTCOND*/0)
   10273 
   10274 	aprint_normal_dev(sc->sc_dev, "");
   10275 
   10276 	if (sc->sc_type == WM_T_I354) {
   10277 		uint32_t status;
   10278 
   10279 		status = CSR_READ(sc, WMREG_STATUS);
   10280 		if (((status & STATUS_2P5_SKU) != 0)
   10281 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10282 			ADD("2500baseKX-FDX", IFM_2500_SX | IFM_FDX,ANAR_X_FD);
   10283 		} else
   10284 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX,ANAR_X_FD);
   10285 	} else if (sc->sc_type == WM_T_82545) {
   10286 		/* Only 82545 is LX (XXX except SFP) */
   10287 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10288 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10289 	} else {
   10290 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10291 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10292 	}
   10293 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10294 	aprint_normal("\n");
   10295 
   10296 #undef ADD
   10297 
   10298 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10299 }
   10300 
   10301 /*
   10302  * wm_tbi_mediachange:	[ifmedia interface function]
   10303  *
   10304  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10305  */
   10306 static int
   10307 wm_tbi_mediachange(struct ifnet *ifp)
   10308 {
   10309 	struct wm_softc *sc = ifp->if_softc;
   10310 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10311 	uint32_t status;
   10312 	int i;
   10313 
   10314 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10315 		/* XXX need some work for >= 82571 and < 82575 */
   10316 		if (sc->sc_type < WM_T_82575)
   10317 			return 0;
   10318 	}
   10319 
   10320 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10321 	    || (sc->sc_type >= WM_T_82575))
   10322 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10323 
   10324 	sc->sc_ctrl &= ~CTRL_LRST;
   10325 	sc->sc_txcw = TXCW_ANE;
   10326 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10327 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10328 	else if (ife->ifm_media & IFM_FDX)
   10329 		sc->sc_txcw |= TXCW_FD;
   10330 	else
   10331 		sc->sc_txcw |= TXCW_HD;
   10332 
   10333 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10334 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10335 
   10336 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10337 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10338 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10339 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10340 	CSR_WRITE_FLUSH(sc);
   10341 	delay(1000);
   10342 
   10343 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10344 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10345 
   10346 	/*
   10347 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10348 	 * optics detect a signal, 0 if they don't.
   10349 	 */
   10350 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10351 		/* Have signal; wait for the link to come up. */
   10352 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10353 			delay(10000);
   10354 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10355 				break;
   10356 		}
   10357 
   10358 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10359 			    device_xname(sc->sc_dev),i));
   10360 
   10361 		status = CSR_READ(sc, WMREG_STATUS);
   10362 		DPRINTF(WM_DEBUG_LINK,
   10363 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10364 			device_xname(sc->sc_dev),status, STATUS_LU));
   10365 		if (status & STATUS_LU) {
   10366 			/* Link is up. */
   10367 			DPRINTF(WM_DEBUG_LINK,
   10368 			    ("%s: LINK: set media -> link up %s\n",
   10369 			    device_xname(sc->sc_dev),
   10370 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10371 
   10372 			/*
   10373 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10374 			 * so we should update sc->sc_ctrl
   10375 			 */
   10376 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10377 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10378 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10379 			if (status & STATUS_FD)
   10380 				sc->sc_tctl |=
   10381 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10382 			else
   10383 				sc->sc_tctl |=
   10384 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10385 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10386 				sc->sc_fcrtl |= FCRTL_XONE;
   10387 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10388 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10389 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10390 				      sc->sc_fcrtl);
   10391 			sc->sc_tbi_linkup = 1;
   10392 		} else {
   10393 			if (i == WM_LINKUP_TIMEOUT)
   10394 				wm_check_for_link(sc);
   10395 			/* Link is down. */
   10396 			DPRINTF(WM_DEBUG_LINK,
   10397 			    ("%s: LINK: set media -> link down\n",
   10398 			    device_xname(sc->sc_dev)));
   10399 			sc->sc_tbi_linkup = 0;
   10400 		}
   10401 	} else {
   10402 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10403 		    device_xname(sc->sc_dev)));
   10404 		sc->sc_tbi_linkup = 0;
   10405 	}
   10406 
   10407 	wm_tbi_serdes_set_linkled(sc);
   10408 
   10409 	return 0;
   10410 }
   10411 
   10412 /*
   10413  * wm_tbi_mediastatus:	[ifmedia interface function]
   10414  *
   10415  *	Get the current interface media status on a 1000BASE-X device.
   10416  */
   10417 static void
   10418 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10419 {
   10420 	struct wm_softc *sc = ifp->if_softc;
   10421 	uint32_t ctrl, status;
   10422 
   10423 	ifmr->ifm_status = IFM_AVALID;
   10424 	ifmr->ifm_active = IFM_ETHER;
   10425 
   10426 	status = CSR_READ(sc, WMREG_STATUS);
   10427 	if ((status & STATUS_LU) == 0) {
   10428 		ifmr->ifm_active |= IFM_NONE;
   10429 		return;
   10430 	}
   10431 
   10432 	ifmr->ifm_status |= IFM_ACTIVE;
   10433 	/* Only 82545 is LX */
   10434 	if (sc->sc_type == WM_T_82545)
   10435 		ifmr->ifm_active |= IFM_1000_LX;
   10436 	else
   10437 		ifmr->ifm_active |= IFM_1000_SX;
   10438 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10439 		ifmr->ifm_active |= IFM_FDX;
   10440 	else
   10441 		ifmr->ifm_active |= IFM_HDX;
   10442 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10443 	if (ctrl & CTRL_RFCE)
   10444 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10445 	if (ctrl & CTRL_TFCE)
   10446 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10447 }
   10448 
   10449 /* XXX TBI only */
   10450 static int
   10451 wm_check_for_link(struct wm_softc *sc)
   10452 {
   10453 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10454 	uint32_t rxcw;
   10455 	uint32_t ctrl;
   10456 	uint32_t status;
   10457 	uint32_t sig;
   10458 
   10459 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10460 		/* XXX need some work for >= 82571 */
   10461 		if (sc->sc_type >= WM_T_82571) {
   10462 			sc->sc_tbi_linkup = 1;
   10463 			return 0;
   10464 		}
   10465 	}
   10466 
   10467 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10468 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10469 	status = CSR_READ(sc, WMREG_STATUS);
   10470 
   10471 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10472 
   10473 	DPRINTF(WM_DEBUG_LINK,
   10474 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10475 		device_xname(sc->sc_dev), __func__,
   10476 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10477 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10478 
   10479 	/*
   10480 	 * SWDPIN   LU RXCW
   10481 	 *      0    0    0
   10482 	 *      0    0    1	(should not happen)
   10483 	 *      0    1    0	(should not happen)
   10484 	 *      0    1    1	(should not happen)
   10485 	 *      1    0    0	Disable autonego and force linkup
   10486 	 *      1    0    1	got /C/ but not linkup yet
   10487 	 *      1    1    0	(linkup)
   10488 	 *      1    1    1	If IFM_AUTO, back to autonego
   10489 	 *
   10490 	 */
   10491 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10492 	    && ((status & STATUS_LU) == 0)
   10493 	    && ((rxcw & RXCW_C) == 0)) {
   10494 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10495 			__func__));
   10496 		sc->sc_tbi_linkup = 0;
   10497 		/* Disable auto-negotiation in the TXCW register */
   10498 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10499 
   10500 		/*
   10501 		 * Force link-up and also force full-duplex.
   10502 		 *
   10503 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10504 		 * so we should update sc->sc_ctrl
   10505 		 */
   10506 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10507 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10508 	} else if (((status & STATUS_LU) != 0)
   10509 	    && ((rxcw & RXCW_C) != 0)
   10510 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10511 		sc->sc_tbi_linkup = 1;
   10512 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10513 			__func__));
   10514 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10515 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10516 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10517 	    && ((rxcw & RXCW_C) != 0)) {
   10518 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10519 	} else {
   10520 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10521 			status));
   10522 	}
   10523 
   10524 	return 0;
   10525 }
   10526 
   10527 /*
   10528  * wm_tbi_tick:
   10529  *
   10530  *	Check the link on TBI devices.
   10531  *	This function acts as mii_tick().
   10532  */
   10533 static void
   10534 wm_tbi_tick(struct wm_softc *sc)
   10535 {
   10536 	struct mii_data *mii = &sc->sc_mii;
   10537 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10538 	uint32_t status;
   10539 
   10540 	KASSERT(WM_CORE_LOCKED(sc));
   10541 
   10542 	status = CSR_READ(sc, WMREG_STATUS);
   10543 
   10544 	/* XXX is this needed? */
   10545 	(void)CSR_READ(sc, WMREG_RXCW);
   10546 	(void)CSR_READ(sc, WMREG_CTRL);
   10547 
   10548 	/* set link status */
   10549 	if ((status & STATUS_LU) == 0) {
   10550 		DPRINTF(WM_DEBUG_LINK,
   10551 		    ("%s: LINK: checklink -> down\n",
   10552 			device_xname(sc->sc_dev)));
   10553 		sc->sc_tbi_linkup = 0;
   10554 	} else if (sc->sc_tbi_linkup == 0) {
   10555 		DPRINTF(WM_DEBUG_LINK,
   10556 		    ("%s: LINK: checklink -> up %s\n",
   10557 			device_xname(sc->sc_dev),
   10558 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10559 		sc->sc_tbi_linkup = 1;
   10560 		sc->sc_tbi_serdes_ticks = 0;
   10561 	}
   10562 
   10563 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   10564 		goto setled;
   10565 
   10566 	if ((status & STATUS_LU) == 0) {
   10567 		sc->sc_tbi_linkup = 0;
   10568 		/* If the timer expired, retry autonegotiation */
   10569 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10570 		    && (++sc->sc_tbi_serdes_ticks
   10571 			>= sc->sc_tbi_serdes_anegticks)) {
   10572 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10573 			sc->sc_tbi_serdes_ticks = 0;
   10574 			/*
   10575 			 * Reset the link, and let autonegotiation do
   10576 			 * its thing
   10577 			 */
   10578 			sc->sc_ctrl |= CTRL_LRST;
   10579 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10580 			CSR_WRITE_FLUSH(sc);
   10581 			delay(1000);
   10582 			sc->sc_ctrl &= ~CTRL_LRST;
   10583 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10584 			CSR_WRITE_FLUSH(sc);
   10585 			delay(1000);
   10586 			CSR_WRITE(sc, WMREG_TXCW,
   10587 			    sc->sc_txcw & ~TXCW_ANE);
   10588 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10589 		}
   10590 	}
   10591 
   10592 setled:
   10593 	wm_tbi_serdes_set_linkled(sc);
   10594 }
   10595 
   10596 /* SERDES related */
   10597 static void
   10598 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10599 {
   10600 	uint32_t reg;
   10601 
   10602 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10603 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   10604 		return;
   10605 
   10606 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   10607 	reg |= PCS_CFG_PCS_EN;
   10608 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   10609 
   10610 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10611 	reg &= ~CTRL_EXT_SWDPIN(3);
   10612 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10613 	CSR_WRITE_FLUSH(sc);
   10614 }
   10615 
   10616 static int
   10617 wm_serdes_mediachange(struct ifnet *ifp)
   10618 {
   10619 	struct wm_softc *sc = ifp->if_softc;
   10620 	bool pcs_autoneg = true; /* XXX */
   10621 	uint32_t ctrl_ext, pcs_lctl, reg;
   10622 
   10623 	/* XXX Currently, this function is not called on 8257[12] */
   10624 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10625 	    || (sc->sc_type >= WM_T_82575))
   10626 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10627 
   10628 	wm_serdes_power_up_link_82575(sc);
   10629 
   10630 	sc->sc_ctrl |= CTRL_SLU;
   10631 
   10632 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10633 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10634 
   10635 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10636 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10637 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10638 	case CTRL_EXT_LINK_MODE_SGMII:
   10639 		pcs_autoneg = true;
   10640 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10641 		break;
   10642 	case CTRL_EXT_LINK_MODE_1000KX:
   10643 		pcs_autoneg = false;
   10644 		/* FALLTHROUGH */
   10645 	default:
   10646 		if ((sc->sc_type == WM_T_82575)
   10647 		    || (sc->sc_type == WM_T_82576)) {
   10648 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10649 				pcs_autoneg = false;
   10650 		}
   10651 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10652 		    | CTRL_FRCFDX;
   10653 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10654 	}
   10655 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10656 
   10657 	if (pcs_autoneg) {
   10658 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10659 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10660 
   10661 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10662 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10663 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10664 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10665 	} else
   10666 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10667 
   10668 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10669 
   10670 
   10671 	return 0;
   10672 }
   10673 
   10674 static void
   10675 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10676 {
   10677 	struct wm_softc *sc = ifp->if_softc;
   10678 	struct mii_data *mii = &sc->sc_mii;
   10679 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10680 	uint32_t pcs_adv, pcs_lpab, reg;
   10681 
   10682 	ifmr->ifm_status = IFM_AVALID;
   10683 	ifmr->ifm_active = IFM_ETHER;
   10684 
   10685 	/* Check PCS */
   10686 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10687 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10688 		ifmr->ifm_active |= IFM_NONE;
   10689 		sc->sc_tbi_linkup = 0;
   10690 		goto setled;
   10691 	}
   10692 
   10693 	sc->sc_tbi_linkup = 1;
   10694 	ifmr->ifm_status |= IFM_ACTIVE;
   10695 	if (sc->sc_type == WM_T_I354) {
   10696 		uint32_t status;
   10697 
   10698 		status = CSR_READ(sc, WMREG_STATUS);
   10699 		if (((status & STATUS_2P5_SKU) != 0)
   10700 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10701 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   10702 		} else
   10703 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   10704 	} else {
   10705 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   10706 		case PCS_LSTS_SPEED_10:
   10707 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   10708 			break;
   10709 		case PCS_LSTS_SPEED_100:
   10710 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   10711 			break;
   10712 		case PCS_LSTS_SPEED_1000:
   10713 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10714 			break;
   10715 		default:
   10716 			device_printf(sc->sc_dev, "Unknown speed\n");
   10717 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10718 			break;
   10719 		}
   10720 	}
   10721 	if ((reg & PCS_LSTS_FDX) != 0)
   10722 		ifmr->ifm_active |= IFM_FDX;
   10723 	else
   10724 		ifmr->ifm_active |= IFM_HDX;
   10725 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10726 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10727 		/* Check flow */
   10728 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10729 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10730 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10731 			goto setled;
   10732 		}
   10733 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10734 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10735 		DPRINTF(WM_DEBUG_LINK,
   10736 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10737 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10738 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10739 			mii->mii_media_active |= IFM_FLOW
   10740 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10741 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10742 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10743 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10744 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10745 			mii->mii_media_active |= IFM_FLOW
   10746 			    | IFM_ETH_TXPAUSE;
   10747 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10748 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10749 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10750 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10751 			mii->mii_media_active |= IFM_FLOW
   10752 			    | IFM_ETH_RXPAUSE;
   10753 		}
   10754 	}
   10755 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10756 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10757 setled:
   10758 	wm_tbi_serdes_set_linkled(sc);
   10759 }
   10760 
   10761 /*
   10762  * wm_serdes_tick:
   10763  *
   10764  *	Check the link on serdes devices.
   10765  */
   10766 static void
   10767 wm_serdes_tick(struct wm_softc *sc)
   10768 {
   10769 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10770 	struct mii_data *mii = &sc->sc_mii;
   10771 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10772 	uint32_t reg;
   10773 
   10774 	KASSERT(WM_CORE_LOCKED(sc));
   10775 
   10776 	mii->mii_media_status = IFM_AVALID;
   10777 	mii->mii_media_active = IFM_ETHER;
   10778 
   10779 	/* Check PCS */
   10780 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10781 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   10782 		mii->mii_media_status |= IFM_ACTIVE;
   10783 		sc->sc_tbi_linkup = 1;
   10784 		sc->sc_tbi_serdes_ticks = 0;
   10785 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   10786 		if ((reg & PCS_LSTS_FDX) != 0)
   10787 			mii->mii_media_active |= IFM_FDX;
   10788 		else
   10789 			mii->mii_media_active |= IFM_HDX;
   10790 	} else {
   10791 		mii->mii_media_status |= IFM_NONE;
   10792 		sc->sc_tbi_linkup = 0;
   10793 		/* If the timer expired, retry autonegotiation */
   10794 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10795 		    && (++sc->sc_tbi_serdes_ticks
   10796 			>= sc->sc_tbi_serdes_anegticks)) {
   10797 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10798 			sc->sc_tbi_serdes_ticks = 0;
   10799 			/* XXX */
   10800 			wm_serdes_mediachange(ifp);
   10801 		}
   10802 	}
   10803 
   10804 	wm_tbi_serdes_set_linkled(sc);
   10805 }
   10806 
   10807 /* SFP related */
   10808 
   10809 static int
   10810 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   10811 {
   10812 	uint32_t i2ccmd;
   10813 	int i;
   10814 
   10815 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10816 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10817 
   10818 	/* Poll the ready bit */
   10819 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10820 		delay(50);
   10821 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10822 		if (i2ccmd & I2CCMD_READY)
   10823 			break;
   10824 	}
   10825 	if ((i2ccmd & I2CCMD_READY) == 0)
   10826 		return -1;
   10827 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10828 		return -1;
   10829 
   10830 	*data = i2ccmd & 0x00ff;
   10831 
   10832 	return 0;
   10833 }
   10834 
   10835 static uint32_t
   10836 wm_sfp_get_media_type(struct wm_softc *sc)
   10837 {
   10838 	uint32_t ctrl_ext;
   10839 	uint8_t val = 0;
   10840 	int timeout = 3;
   10841 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   10842 	int rv = -1;
   10843 
   10844 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10845 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   10846 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   10847 	CSR_WRITE_FLUSH(sc);
   10848 
   10849 	/* Read SFP module data */
   10850 	while (timeout) {
   10851 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   10852 		if (rv == 0)
   10853 			break;
   10854 		delay(100*1000); /* XXX too big */
   10855 		timeout--;
   10856 	}
   10857 	if (rv != 0)
   10858 		goto out;
   10859 	switch (val) {
   10860 	case SFF_SFP_ID_SFF:
   10861 		aprint_normal_dev(sc->sc_dev,
   10862 		    "Module/Connector soldered to board\n");
   10863 		break;
   10864 	case SFF_SFP_ID_SFP:
   10865 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   10866 		break;
   10867 	case SFF_SFP_ID_UNKNOWN:
   10868 		goto out;
   10869 	default:
   10870 		break;
   10871 	}
   10872 
   10873 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   10874 	if (rv != 0) {
   10875 		goto out;
   10876 	}
   10877 
   10878 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   10879 		mediatype = WM_MEDIATYPE_SERDES;
   10880 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   10881 		sc->sc_flags |= WM_F_SGMII;
   10882 		mediatype = WM_MEDIATYPE_COPPER;
   10883 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   10884 		sc->sc_flags |= WM_F_SGMII;
   10885 		mediatype = WM_MEDIATYPE_SERDES;
   10886 	}
   10887 
   10888 out:
   10889 	/* Restore I2C interface setting */
   10890 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10891 
   10892 	return mediatype;
   10893 }
   10894 
   10895 /*
   10896  * NVM related.
   10897  * Microwire, SPI (w/wo EERD) and Flash.
   10898  */
   10899 
   10900 /* Both spi and uwire */
   10901 
   10902 /*
   10903  * wm_eeprom_sendbits:
   10904  *
   10905  *	Send a series of bits to the EEPROM.
   10906  */
   10907 static void
   10908 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   10909 {
   10910 	uint32_t reg;
   10911 	int x;
   10912 
   10913 	reg = CSR_READ(sc, WMREG_EECD);
   10914 
   10915 	for (x = nbits; x > 0; x--) {
   10916 		if (bits & (1U << (x - 1)))
   10917 			reg |= EECD_DI;
   10918 		else
   10919 			reg &= ~EECD_DI;
   10920 		CSR_WRITE(sc, WMREG_EECD, reg);
   10921 		CSR_WRITE_FLUSH(sc);
   10922 		delay(2);
   10923 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10924 		CSR_WRITE_FLUSH(sc);
   10925 		delay(2);
   10926 		CSR_WRITE(sc, WMREG_EECD, reg);
   10927 		CSR_WRITE_FLUSH(sc);
   10928 		delay(2);
   10929 	}
   10930 }
   10931 
   10932 /*
   10933  * wm_eeprom_recvbits:
   10934  *
   10935  *	Receive a series of bits from the EEPROM.
   10936  */
   10937 static void
   10938 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   10939 {
   10940 	uint32_t reg, val;
   10941 	int x;
   10942 
   10943 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   10944 
   10945 	val = 0;
   10946 	for (x = nbits; x > 0; x--) {
   10947 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10948 		CSR_WRITE_FLUSH(sc);
   10949 		delay(2);
   10950 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   10951 			val |= (1U << (x - 1));
   10952 		CSR_WRITE(sc, WMREG_EECD, reg);
   10953 		CSR_WRITE_FLUSH(sc);
   10954 		delay(2);
   10955 	}
   10956 	*valp = val;
   10957 }
   10958 
   10959 /* Microwire */
   10960 
   10961 /*
   10962  * wm_nvm_read_uwire:
   10963  *
   10964  *	Read a word from the EEPROM using the MicroWire protocol.
   10965  */
   10966 static int
   10967 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10968 {
   10969 	uint32_t reg, val;
   10970 	int i;
   10971 
   10972 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10973 		device_xname(sc->sc_dev), __func__));
   10974 
   10975 	for (i = 0; i < wordcnt; i++) {
   10976 		/* Clear SK and DI. */
   10977 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   10978 		CSR_WRITE(sc, WMREG_EECD, reg);
   10979 
   10980 		/*
   10981 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   10982 		 * and Xen.
   10983 		 *
   10984 		 * We use this workaround only for 82540 because qemu's
   10985 		 * e1000 act as 82540.
   10986 		 */
   10987 		if (sc->sc_type == WM_T_82540) {
   10988 			reg |= EECD_SK;
   10989 			CSR_WRITE(sc, WMREG_EECD, reg);
   10990 			reg &= ~EECD_SK;
   10991 			CSR_WRITE(sc, WMREG_EECD, reg);
   10992 			CSR_WRITE_FLUSH(sc);
   10993 			delay(2);
   10994 		}
   10995 		/* XXX: end of workaround */
   10996 
   10997 		/* Set CHIP SELECT. */
   10998 		reg |= EECD_CS;
   10999 		CSR_WRITE(sc, WMREG_EECD, reg);
   11000 		CSR_WRITE_FLUSH(sc);
   11001 		delay(2);
   11002 
   11003 		/* Shift in the READ command. */
   11004 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11005 
   11006 		/* Shift in address. */
   11007 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11008 
   11009 		/* Shift out the data. */
   11010 		wm_eeprom_recvbits(sc, &val, 16);
   11011 		data[i] = val & 0xffff;
   11012 
   11013 		/* Clear CHIP SELECT. */
   11014 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11015 		CSR_WRITE(sc, WMREG_EECD, reg);
   11016 		CSR_WRITE_FLUSH(sc);
   11017 		delay(2);
   11018 	}
   11019 
   11020 	return 0;
   11021 }
   11022 
   11023 /* SPI */
   11024 
   11025 /*
   11026  * Set SPI and FLASH related information from the EECD register.
   11027  * For 82541 and 82547, the word size is taken from EEPROM.
   11028  */
   11029 static int
   11030 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11031 {
   11032 	int size;
   11033 	uint32_t reg;
   11034 	uint16_t data;
   11035 
   11036 	reg = CSR_READ(sc, WMREG_EECD);
   11037 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11038 
   11039 	/* Read the size of NVM from EECD by default */
   11040 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11041 	switch (sc->sc_type) {
   11042 	case WM_T_82541:
   11043 	case WM_T_82541_2:
   11044 	case WM_T_82547:
   11045 	case WM_T_82547_2:
   11046 		/* Set dummy value to access EEPROM */
   11047 		sc->sc_nvm_wordsize = 64;
   11048 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11049 		reg = data;
   11050 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11051 		if (size == 0)
   11052 			size = 6; /* 64 word size */
   11053 		else
   11054 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11055 		break;
   11056 	case WM_T_80003:
   11057 	case WM_T_82571:
   11058 	case WM_T_82572:
   11059 	case WM_T_82573: /* SPI case */
   11060 	case WM_T_82574: /* SPI case */
   11061 	case WM_T_82583: /* SPI case */
   11062 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11063 		if (size > 14)
   11064 			size = 14;
   11065 		break;
   11066 	case WM_T_82575:
   11067 	case WM_T_82576:
   11068 	case WM_T_82580:
   11069 	case WM_T_I350:
   11070 	case WM_T_I354:
   11071 	case WM_T_I210:
   11072 	case WM_T_I211:
   11073 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11074 		if (size > 15)
   11075 			size = 15;
   11076 		break;
   11077 	default:
   11078 		aprint_error_dev(sc->sc_dev,
   11079 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11080 		return -1;
   11081 		break;
   11082 	}
   11083 
   11084 	sc->sc_nvm_wordsize = 1 << size;
   11085 
   11086 	return 0;
   11087 }
   11088 
   11089 /*
   11090  * wm_nvm_ready_spi:
   11091  *
   11092  *	Wait for a SPI EEPROM to be ready for commands.
   11093  */
   11094 static int
   11095 wm_nvm_ready_spi(struct wm_softc *sc)
   11096 {
   11097 	uint32_t val;
   11098 	int usec;
   11099 
   11100 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11101 		device_xname(sc->sc_dev), __func__));
   11102 
   11103 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11104 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11105 		wm_eeprom_recvbits(sc, &val, 8);
   11106 		if ((val & SPI_SR_RDY) == 0)
   11107 			break;
   11108 	}
   11109 	if (usec >= SPI_MAX_RETRIES) {
   11110 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11111 		return 1;
   11112 	}
   11113 	return 0;
   11114 }
   11115 
   11116 /*
   11117  * wm_nvm_read_spi:
   11118  *
   11119  *	Read a work from the EEPROM using the SPI protocol.
   11120  */
   11121 static int
   11122 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11123 {
   11124 	uint32_t reg, val;
   11125 	int i;
   11126 	uint8_t opc;
   11127 
   11128 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11129 		device_xname(sc->sc_dev), __func__));
   11130 
   11131 	/* Clear SK and CS. */
   11132 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11133 	CSR_WRITE(sc, WMREG_EECD, reg);
   11134 	CSR_WRITE_FLUSH(sc);
   11135 	delay(2);
   11136 
   11137 	if (wm_nvm_ready_spi(sc))
   11138 		return 1;
   11139 
   11140 	/* Toggle CS to flush commands. */
   11141 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11142 	CSR_WRITE_FLUSH(sc);
   11143 	delay(2);
   11144 	CSR_WRITE(sc, WMREG_EECD, reg);
   11145 	CSR_WRITE_FLUSH(sc);
   11146 	delay(2);
   11147 
   11148 	opc = SPI_OPC_READ;
   11149 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11150 		opc |= SPI_OPC_A8;
   11151 
   11152 	wm_eeprom_sendbits(sc, opc, 8);
   11153 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11154 
   11155 	for (i = 0; i < wordcnt; i++) {
   11156 		wm_eeprom_recvbits(sc, &val, 16);
   11157 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11158 	}
   11159 
   11160 	/* Raise CS and clear SK. */
   11161 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11162 	CSR_WRITE(sc, WMREG_EECD, reg);
   11163 	CSR_WRITE_FLUSH(sc);
   11164 	delay(2);
   11165 
   11166 	return 0;
   11167 }
   11168 
   11169 /* Using with EERD */
   11170 
   11171 static int
   11172 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11173 {
   11174 	uint32_t attempts = 100000;
   11175 	uint32_t i, reg = 0;
   11176 	int32_t done = -1;
   11177 
   11178 	for (i = 0; i < attempts; i++) {
   11179 		reg = CSR_READ(sc, rw);
   11180 
   11181 		if (reg & EERD_DONE) {
   11182 			done = 0;
   11183 			break;
   11184 		}
   11185 		delay(5);
   11186 	}
   11187 
   11188 	return done;
   11189 }
   11190 
   11191 static int
   11192 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11193     uint16_t *data)
   11194 {
   11195 	int i, eerd = 0;
   11196 	int error = 0;
   11197 
   11198 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11199 		device_xname(sc->sc_dev), __func__));
   11200 
   11201 	for (i = 0; i < wordcnt; i++) {
   11202 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11203 
   11204 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11205 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11206 		if (error != 0)
   11207 			break;
   11208 
   11209 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11210 	}
   11211 
   11212 	return error;
   11213 }
   11214 
   11215 /* Flash */
   11216 
   11217 static int
   11218 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11219 {
   11220 	uint32_t eecd;
   11221 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11222 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11223 	uint8_t sig_byte = 0;
   11224 
   11225 	switch (sc->sc_type) {
   11226 	case WM_T_PCH_SPT:
   11227 		/*
   11228 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11229 		 * sector valid bits from the NVM.
   11230 		 */
   11231 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11232 		if ((*bank == 0) || (*bank == 1)) {
   11233 			aprint_error_dev(sc->sc_dev,
   11234 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11235 				*bank);
   11236 			return -1;
   11237 		} else {
   11238 			*bank = *bank - 2;
   11239 			return 0;
   11240 		}
   11241 	case WM_T_ICH8:
   11242 	case WM_T_ICH9:
   11243 		eecd = CSR_READ(sc, WMREG_EECD);
   11244 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11245 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11246 			return 0;
   11247 		}
   11248 		/* FALLTHROUGH */
   11249 	default:
   11250 		/* Default to 0 */
   11251 		*bank = 0;
   11252 
   11253 		/* Check bank 0 */
   11254 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11255 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11256 			*bank = 0;
   11257 			return 0;
   11258 		}
   11259 
   11260 		/* Check bank 1 */
   11261 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11262 		    &sig_byte);
   11263 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11264 			*bank = 1;
   11265 			return 0;
   11266 		}
   11267 	}
   11268 
   11269 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11270 		device_xname(sc->sc_dev)));
   11271 	return -1;
   11272 }
   11273 
   11274 /******************************************************************************
   11275  * This function does initial flash setup so that a new read/write/erase cycle
   11276  * can be started.
   11277  *
   11278  * sc - The pointer to the hw structure
   11279  ****************************************************************************/
   11280 static int32_t
   11281 wm_ich8_cycle_init(struct wm_softc *sc)
   11282 {
   11283 	uint16_t hsfsts;
   11284 	int32_t error = 1;
   11285 	int32_t i     = 0;
   11286 
   11287 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11288 
   11289 	/* May be check the Flash Des Valid bit in Hw status */
   11290 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11291 		return error;
   11292 	}
   11293 
   11294 	/* Clear FCERR in Hw status by writing 1 */
   11295 	/* Clear DAEL in Hw status by writing a 1 */
   11296 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11297 
   11298 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11299 
   11300 	/*
   11301 	 * Either we should have a hardware SPI cycle in progress bit to check
   11302 	 * against, in order to start a new cycle or FDONE bit should be
   11303 	 * changed in the hardware so that it is 1 after harware reset, which
   11304 	 * can then be used as an indication whether a cycle is in progress or
   11305 	 * has been completed .. we should also have some software semaphore
   11306 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11307 	 * threads access to those bits can be sequentiallized or a way so that
   11308 	 * 2 threads dont start the cycle at the same time
   11309 	 */
   11310 
   11311 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11312 		/*
   11313 		 * There is no cycle running at present, so we can start a
   11314 		 * cycle
   11315 		 */
   11316 
   11317 		/* Begin by setting Flash Cycle Done. */
   11318 		hsfsts |= HSFSTS_DONE;
   11319 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11320 		error = 0;
   11321 	} else {
   11322 		/*
   11323 		 * otherwise poll for sometime so the current cycle has a
   11324 		 * chance to end before giving up.
   11325 		 */
   11326 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11327 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11328 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11329 				error = 0;
   11330 				break;
   11331 			}
   11332 			delay(1);
   11333 		}
   11334 		if (error == 0) {
   11335 			/*
   11336 			 * Successful in waiting for previous cycle to timeout,
   11337 			 * now set the Flash Cycle Done.
   11338 			 */
   11339 			hsfsts |= HSFSTS_DONE;
   11340 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11341 		}
   11342 	}
   11343 	return error;
   11344 }
   11345 
   11346 /******************************************************************************
   11347  * This function starts a flash cycle and waits for its completion
   11348  *
   11349  * sc - The pointer to the hw structure
   11350  ****************************************************************************/
   11351 static int32_t
   11352 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11353 {
   11354 	uint16_t hsflctl;
   11355 	uint16_t hsfsts;
   11356 	int32_t error = 1;
   11357 	uint32_t i = 0;
   11358 
   11359 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11360 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11361 	hsflctl |= HSFCTL_GO;
   11362 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11363 
   11364 	/* Wait till FDONE bit is set to 1 */
   11365 	do {
   11366 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11367 		if (hsfsts & HSFSTS_DONE)
   11368 			break;
   11369 		delay(1);
   11370 		i++;
   11371 	} while (i < timeout);
   11372 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11373 		error = 0;
   11374 
   11375 	return error;
   11376 }
   11377 
   11378 /******************************************************************************
   11379  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11380  *
   11381  * sc - The pointer to the hw structure
   11382  * index - The index of the byte or word to read.
   11383  * size - Size of data to read, 1=byte 2=word, 4=dword
   11384  * data - Pointer to the word to store the value read.
   11385  *****************************************************************************/
   11386 static int32_t
   11387 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11388     uint32_t size, uint32_t *data)
   11389 {
   11390 	uint16_t hsfsts;
   11391 	uint16_t hsflctl;
   11392 	uint32_t flash_linear_address;
   11393 	uint32_t flash_data = 0;
   11394 	int32_t error = 1;
   11395 	int32_t count = 0;
   11396 
   11397 	if (size < 1  || size > 4 || data == 0x0 ||
   11398 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11399 		return error;
   11400 
   11401 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11402 	    sc->sc_ich8_flash_base;
   11403 
   11404 	do {
   11405 		delay(1);
   11406 		/* Steps */
   11407 		error = wm_ich8_cycle_init(sc);
   11408 		if (error)
   11409 			break;
   11410 
   11411 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11412 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11413 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11414 		    & HSFCTL_BCOUNT_MASK;
   11415 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11416 		if (sc->sc_type == WM_T_PCH_SPT) {
   11417 			/*
   11418 			 * In SPT, This register is in Lan memory space, not
   11419 			 * flash. Therefore, only 32 bit access is supported.
   11420 			 */
   11421 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11422 			    (uint32_t)hsflctl);
   11423 		} else
   11424 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11425 
   11426 		/*
   11427 		 * Write the last 24 bits of index into Flash Linear address
   11428 		 * field in Flash Address
   11429 		 */
   11430 		/* TODO: TBD maybe check the index against the size of flash */
   11431 
   11432 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11433 
   11434 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11435 
   11436 		/*
   11437 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11438 		 * the whole sequence a few more times, else read in (shift in)
   11439 		 * the Flash Data0, the order is least significant byte first
   11440 		 * msb to lsb
   11441 		 */
   11442 		if (error == 0) {
   11443 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11444 			if (size == 1)
   11445 				*data = (uint8_t)(flash_data & 0x000000FF);
   11446 			else if (size == 2)
   11447 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11448 			else if (size == 4)
   11449 				*data = (uint32_t)flash_data;
   11450 			break;
   11451 		} else {
   11452 			/*
   11453 			 * If we've gotten here, then things are probably
   11454 			 * completely hosed, but if the error condition is
   11455 			 * detected, it won't hurt to give it another try...
   11456 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11457 			 */
   11458 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11459 			if (hsfsts & HSFSTS_ERR) {
   11460 				/* Repeat for some time before giving up. */
   11461 				continue;
   11462 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11463 				break;
   11464 		}
   11465 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11466 
   11467 	return error;
   11468 }
   11469 
   11470 /******************************************************************************
   11471  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11472  *
   11473  * sc - pointer to wm_hw structure
   11474  * index - The index of the byte to read.
   11475  * data - Pointer to a byte to store the value read.
   11476  *****************************************************************************/
   11477 static int32_t
   11478 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11479 {
   11480 	int32_t status;
   11481 	uint32_t word = 0;
   11482 
   11483 	status = wm_read_ich8_data(sc, index, 1, &word);
   11484 	if (status == 0)
   11485 		*data = (uint8_t)word;
   11486 	else
   11487 		*data = 0;
   11488 
   11489 	return status;
   11490 }
   11491 
   11492 /******************************************************************************
   11493  * Reads a word from the NVM using the ICH8 flash access registers.
   11494  *
   11495  * sc - pointer to wm_hw structure
   11496  * index - The starting byte index of the word to read.
   11497  * data - Pointer to a word to store the value read.
   11498  *****************************************************************************/
   11499 static int32_t
   11500 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11501 {
   11502 	int32_t status;
   11503 	uint32_t word = 0;
   11504 
   11505 	status = wm_read_ich8_data(sc, index, 2, &word);
   11506 	if (status == 0)
   11507 		*data = (uint16_t)word;
   11508 	else
   11509 		*data = 0;
   11510 
   11511 	return status;
   11512 }
   11513 
   11514 /******************************************************************************
   11515  * Reads a dword from the NVM using the ICH8 flash access registers.
   11516  *
   11517  * sc - pointer to wm_hw structure
   11518  * index - The starting byte index of the word to read.
   11519  * data - Pointer to a word to store the value read.
   11520  *****************************************************************************/
   11521 static int32_t
   11522 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11523 {
   11524 	int32_t status;
   11525 
   11526 	status = wm_read_ich8_data(sc, index, 4, data);
   11527 	return status;
   11528 }
   11529 
   11530 /******************************************************************************
   11531  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11532  * register.
   11533  *
   11534  * sc - Struct containing variables accessed by shared code
   11535  * offset - offset of word in the EEPROM to read
   11536  * data - word read from the EEPROM
   11537  * words - number of words to read
   11538  *****************************************************************************/
   11539 static int
   11540 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11541 {
   11542 	int32_t  error = 0;
   11543 	uint32_t flash_bank = 0;
   11544 	uint32_t act_offset = 0;
   11545 	uint32_t bank_offset = 0;
   11546 	uint16_t word = 0;
   11547 	uint16_t i = 0;
   11548 
   11549 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11550 		device_xname(sc->sc_dev), __func__));
   11551 
   11552 	/*
   11553 	 * We need to know which is the valid flash bank.  In the event
   11554 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11555 	 * managing flash_bank.  So it cannot be trusted and needs
   11556 	 * to be updated with each read.
   11557 	 */
   11558 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11559 	if (error) {
   11560 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11561 			device_xname(sc->sc_dev)));
   11562 		flash_bank = 0;
   11563 	}
   11564 
   11565 	/*
   11566 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11567 	 * size
   11568 	 */
   11569 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11570 
   11571 	error = wm_get_swfwhw_semaphore(sc);
   11572 	if (error) {
   11573 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11574 		    __func__);
   11575 		return error;
   11576 	}
   11577 
   11578 	for (i = 0; i < words; i++) {
   11579 		/* The NVM part needs a byte offset, hence * 2 */
   11580 		act_offset = bank_offset + ((offset + i) * 2);
   11581 		error = wm_read_ich8_word(sc, act_offset, &word);
   11582 		if (error) {
   11583 			aprint_error_dev(sc->sc_dev,
   11584 			    "%s: failed to read NVM\n", __func__);
   11585 			break;
   11586 		}
   11587 		data[i] = word;
   11588 	}
   11589 
   11590 	wm_put_swfwhw_semaphore(sc);
   11591 	return error;
   11592 }
   11593 
   11594 /******************************************************************************
   11595  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11596  * register.
   11597  *
   11598  * sc - Struct containing variables accessed by shared code
   11599  * offset - offset of word in the EEPROM to read
   11600  * data - word read from the EEPROM
   11601  * words - number of words to read
   11602  *****************************************************************************/
   11603 static int
   11604 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11605 {
   11606 	int32_t  error = 0;
   11607 	uint32_t flash_bank = 0;
   11608 	uint32_t act_offset = 0;
   11609 	uint32_t bank_offset = 0;
   11610 	uint32_t dword = 0;
   11611 	uint16_t i = 0;
   11612 
   11613 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11614 		device_xname(sc->sc_dev), __func__));
   11615 
   11616 	/*
   11617 	 * We need to know which is the valid flash bank.  In the event
   11618 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11619 	 * managing flash_bank.  So it cannot be trusted and needs
   11620 	 * to be updated with each read.
   11621 	 */
   11622 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11623 	if (error) {
   11624 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11625 			device_xname(sc->sc_dev)));
   11626 		flash_bank = 0;
   11627 	}
   11628 
   11629 	/*
   11630 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11631 	 * size
   11632 	 */
   11633 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11634 
   11635 	error = wm_get_swfwhw_semaphore(sc);
   11636 	if (error) {
   11637 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11638 		    __func__);
   11639 		return error;
   11640 	}
   11641 
   11642 	for (i = 0; i < words; i++) {
   11643 		/* The NVM part needs a byte offset, hence * 2 */
   11644 		act_offset = bank_offset + ((offset + i) * 2);
   11645 		/* but we must read dword aligned, so mask ... */
   11646 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11647 		if (error) {
   11648 			aprint_error_dev(sc->sc_dev,
   11649 			    "%s: failed to read NVM\n", __func__);
   11650 			break;
   11651 		}
   11652 		/* ... and pick out low or high word */
   11653 		if ((act_offset & 0x2) == 0)
   11654 			data[i] = (uint16_t)(dword & 0xFFFF);
   11655 		else
   11656 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11657 	}
   11658 
   11659 	wm_put_swfwhw_semaphore(sc);
   11660 	return error;
   11661 }
   11662 
   11663 /* iNVM */
   11664 
   11665 static int
   11666 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11667 {
   11668 	int32_t  rv = 0;
   11669 	uint32_t invm_dword;
   11670 	uint16_t i;
   11671 	uint8_t record_type, word_address;
   11672 
   11673 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11674 		device_xname(sc->sc_dev), __func__));
   11675 
   11676 	for (i = 0; i < INVM_SIZE; i++) {
   11677 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11678 		/* Get record type */
   11679 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11680 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11681 			break;
   11682 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11683 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11684 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11685 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11686 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11687 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11688 			if (word_address == address) {
   11689 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11690 				rv = 0;
   11691 				break;
   11692 			}
   11693 		}
   11694 	}
   11695 
   11696 	return rv;
   11697 }
   11698 
   11699 static int
   11700 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11701 {
   11702 	int rv = 0;
   11703 	int i;
   11704 
   11705 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11706 		device_xname(sc->sc_dev), __func__));
   11707 
   11708 	for (i = 0; i < words; i++) {
   11709 		switch (offset + i) {
   11710 		case NVM_OFF_MACADDR:
   11711 		case NVM_OFF_MACADDR1:
   11712 		case NVM_OFF_MACADDR2:
   11713 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11714 			if (rv != 0) {
   11715 				data[i] = 0xffff;
   11716 				rv = -1;
   11717 			}
   11718 			break;
   11719 		case NVM_OFF_CFG2:
   11720 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11721 			if (rv != 0) {
   11722 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11723 				rv = 0;
   11724 			}
   11725 			break;
   11726 		case NVM_OFF_CFG4:
   11727 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11728 			if (rv != 0) {
   11729 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11730 				rv = 0;
   11731 			}
   11732 			break;
   11733 		case NVM_OFF_LED_1_CFG:
   11734 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11735 			if (rv != 0) {
   11736 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11737 				rv = 0;
   11738 			}
   11739 			break;
   11740 		case NVM_OFF_LED_0_2_CFG:
   11741 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11742 			if (rv != 0) {
   11743 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11744 				rv = 0;
   11745 			}
   11746 			break;
   11747 		case NVM_OFF_ID_LED_SETTINGS:
   11748 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11749 			if (rv != 0) {
   11750 				*data = ID_LED_RESERVED_FFFF;
   11751 				rv = 0;
   11752 			}
   11753 			break;
   11754 		default:
   11755 			DPRINTF(WM_DEBUG_NVM,
   11756 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11757 			*data = NVM_RESERVED_WORD;
   11758 			break;
   11759 		}
   11760 	}
   11761 
   11762 	return rv;
   11763 }
   11764 
   11765 /* Lock, detecting NVM type, validate checksum, version and read */
   11766 
   11767 /*
   11768  * wm_nvm_acquire:
   11769  *
   11770  *	Perform the EEPROM handshake required on some chips.
   11771  */
   11772 static int
   11773 wm_nvm_acquire(struct wm_softc *sc)
   11774 {
   11775 	uint32_t reg;
   11776 	int x;
   11777 	int ret = 0;
   11778 
   11779 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11780 		device_xname(sc->sc_dev), __func__));
   11781 
   11782 	if (sc->sc_type >= WM_T_ICH8) {
   11783 		ret = wm_get_nvm_ich8lan(sc);
   11784 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   11785 		ret = wm_get_swfwhw_semaphore(sc);
   11786 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   11787 		/* This will also do wm_get_swsm_semaphore() if needed */
   11788 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   11789 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11790 		ret = wm_get_swsm_semaphore(sc);
   11791 	}
   11792 
   11793 	if (ret) {
   11794 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11795 			__func__);
   11796 		return 1;
   11797 	}
   11798 
   11799 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11800 		reg = CSR_READ(sc, WMREG_EECD);
   11801 
   11802 		/* Request EEPROM access. */
   11803 		reg |= EECD_EE_REQ;
   11804 		CSR_WRITE(sc, WMREG_EECD, reg);
   11805 
   11806 		/* ..and wait for it to be granted. */
   11807 		for (x = 0; x < 1000; x++) {
   11808 			reg = CSR_READ(sc, WMREG_EECD);
   11809 			if (reg & EECD_EE_GNT)
   11810 				break;
   11811 			delay(5);
   11812 		}
   11813 		if ((reg & EECD_EE_GNT) == 0) {
   11814 			aprint_error_dev(sc->sc_dev,
   11815 			    "could not acquire EEPROM GNT\n");
   11816 			reg &= ~EECD_EE_REQ;
   11817 			CSR_WRITE(sc, WMREG_EECD, reg);
   11818 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11819 				wm_put_swfwhw_semaphore(sc);
   11820 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   11821 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11822 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11823 				wm_put_swsm_semaphore(sc);
   11824 			return 1;
   11825 		}
   11826 	}
   11827 
   11828 	return 0;
   11829 }
   11830 
   11831 /*
   11832  * wm_nvm_release:
   11833  *
   11834  *	Release the EEPROM mutex.
   11835  */
   11836 static void
   11837 wm_nvm_release(struct wm_softc *sc)
   11838 {
   11839 	uint32_t reg;
   11840 
   11841 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11842 		device_xname(sc->sc_dev), __func__));
   11843 
   11844 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11845 		reg = CSR_READ(sc, WMREG_EECD);
   11846 		reg &= ~EECD_EE_REQ;
   11847 		CSR_WRITE(sc, WMREG_EECD, reg);
   11848 	}
   11849 
   11850 	if (sc->sc_type >= WM_T_ICH8) {
   11851 		wm_put_nvm_ich8lan(sc);
   11852 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11853 		wm_put_swfwhw_semaphore(sc);
   11854 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   11855 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11856 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11857 		wm_put_swsm_semaphore(sc);
   11858 }
   11859 
   11860 static int
   11861 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   11862 {
   11863 	uint32_t eecd = 0;
   11864 
   11865 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   11866 	    || sc->sc_type == WM_T_82583) {
   11867 		eecd = CSR_READ(sc, WMREG_EECD);
   11868 
   11869 		/* Isolate bits 15 & 16 */
   11870 		eecd = ((eecd >> 15) & 0x03);
   11871 
   11872 		/* If both bits are set, device is Flash type */
   11873 		if (eecd == 0x03)
   11874 			return 0;
   11875 	}
   11876 	return 1;
   11877 }
   11878 
   11879 static int
   11880 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   11881 {
   11882 	uint32_t eec;
   11883 
   11884 	eec = CSR_READ(sc, WMREG_EEC);
   11885 	if ((eec & EEC_FLASH_DETECTED) != 0)
   11886 		return 1;
   11887 
   11888 	return 0;
   11889 }
   11890 
   11891 /*
   11892  * wm_nvm_validate_checksum
   11893  *
   11894  * The checksum is defined as the sum of the first 64 (16 bit) words.
   11895  */
   11896 static int
   11897 wm_nvm_validate_checksum(struct wm_softc *sc)
   11898 {
   11899 	uint16_t checksum;
   11900 	uint16_t eeprom_data;
   11901 #ifdef WM_DEBUG
   11902 	uint16_t csum_wordaddr, valid_checksum;
   11903 #endif
   11904 	int i;
   11905 
   11906 	checksum = 0;
   11907 
   11908 	/* Don't check for I211 */
   11909 	if (sc->sc_type == WM_T_I211)
   11910 		return 0;
   11911 
   11912 #ifdef WM_DEBUG
   11913 	if (sc->sc_type == WM_T_PCH_LPT) {
   11914 		csum_wordaddr = NVM_OFF_COMPAT;
   11915 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   11916 	} else {
   11917 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   11918 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   11919 	}
   11920 
   11921 	/* Dump EEPROM image for debug */
   11922 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11923 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11924 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   11925 		/* XXX PCH_SPT? */
   11926 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   11927 		if ((eeprom_data & valid_checksum) == 0) {
   11928 			DPRINTF(WM_DEBUG_NVM,
   11929 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   11930 				device_xname(sc->sc_dev), eeprom_data,
   11931 				    valid_checksum));
   11932 		}
   11933 	}
   11934 
   11935 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   11936 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   11937 		for (i = 0; i < NVM_SIZE; i++) {
   11938 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11939 				printf("XXXX ");
   11940 			else
   11941 				printf("%04hx ", eeprom_data);
   11942 			if (i % 8 == 7)
   11943 				printf("\n");
   11944 		}
   11945 	}
   11946 
   11947 #endif /* WM_DEBUG */
   11948 
   11949 	for (i = 0; i < NVM_SIZE; i++) {
   11950 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11951 			return 1;
   11952 		checksum += eeprom_data;
   11953 	}
   11954 
   11955 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   11956 #ifdef WM_DEBUG
   11957 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   11958 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   11959 #endif
   11960 	}
   11961 
   11962 	return 0;
   11963 }
   11964 
   11965 static void
   11966 wm_nvm_version_invm(struct wm_softc *sc)
   11967 {
   11968 	uint32_t dword;
   11969 
   11970 	/*
   11971 	 * Linux's code to decode version is very strange, so we don't
   11972 	 * obey that algorithm and just use word 61 as the document.
   11973 	 * Perhaps it's not perfect though...
   11974 	 *
   11975 	 * Example:
   11976 	 *
   11977 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   11978 	 */
   11979 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   11980 	dword = __SHIFTOUT(dword, INVM_VER_1);
   11981 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   11982 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   11983 }
   11984 
   11985 static void
   11986 wm_nvm_version(struct wm_softc *sc)
   11987 {
   11988 	uint16_t major, minor, build, patch;
   11989 	uint16_t uid0, uid1;
   11990 	uint16_t nvm_data;
   11991 	uint16_t off;
   11992 	bool check_version = false;
   11993 	bool check_optionrom = false;
   11994 	bool have_build = false;
   11995 
   11996 	/*
   11997 	 * Version format:
   11998 	 *
   11999 	 * XYYZ
   12000 	 * X0YZ
   12001 	 * X0YY
   12002 	 *
   12003 	 * Example:
   12004 	 *
   12005 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12006 	 *	82571	0x50a6	5.10.6?
   12007 	 *	82572	0x506a	5.6.10?
   12008 	 *	82572EI	0x5069	5.6.9?
   12009 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12010 	 *		0x2013	2.1.3?
   12011 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12012 	 */
   12013 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12014 	switch (sc->sc_type) {
   12015 	case WM_T_82571:
   12016 	case WM_T_82572:
   12017 	case WM_T_82574:
   12018 	case WM_T_82583:
   12019 		check_version = true;
   12020 		check_optionrom = true;
   12021 		have_build = true;
   12022 		break;
   12023 	case WM_T_82575:
   12024 	case WM_T_82576:
   12025 	case WM_T_82580:
   12026 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12027 			check_version = true;
   12028 		break;
   12029 	case WM_T_I211:
   12030 		wm_nvm_version_invm(sc);
   12031 		goto printver;
   12032 	case WM_T_I210:
   12033 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12034 			wm_nvm_version_invm(sc);
   12035 			goto printver;
   12036 		}
   12037 		/* FALLTHROUGH */
   12038 	case WM_T_I350:
   12039 	case WM_T_I354:
   12040 		check_version = true;
   12041 		check_optionrom = true;
   12042 		break;
   12043 	default:
   12044 		return;
   12045 	}
   12046 	if (check_version) {
   12047 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12048 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12049 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12050 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12051 			build = nvm_data & NVM_BUILD_MASK;
   12052 			have_build = true;
   12053 		} else
   12054 			minor = nvm_data & 0x00ff;
   12055 
   12056 		/* Decimal */
   12057 		minor = (minor / 16) * 10 + (minor % 16);
   12058 		sc->sc_nvm_ver_major = major;
   12059 		sc->sc_nvm_ver_minor = minor;
   12060 
   12061 printver:
   12062 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12063 		    sc->sc_nvm_ver_minor);
   12064 		if (have_build) {
   12065 			sc->sc_nvm_ver_build = build;
   12066 			aprint_verbose(".%d", build);
   12067 		}
   12068 	}
   12069 	if (check_optionrom) {
   12070 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12071 		/* Option ROM Version */
   12072 		if ((off != 0x0000) && (off != 0xffff)) {
   12073 			off += NVM_COMBO_VER_OFF;
   12074 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12075 			wm_nvm_read(sc, off, 1, &uid0);
   12076 			if ((uid0 != 0) && (uid0 != 0xffff)
   12077 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12078 				/* 16bits */
   12079 				major = uid0 >> 8;
   12080 				build = (uid0 << 8) | (uid1 >> 8);
   12081 				patch = uid1 & 0x00ff;
   12082 				aprint_verbose(", option ROM Version %d.%d.%d",
   12083 				    major, build, patch);
   12084 			}
   12085 		}
   12086 	}
   12087 
   12088 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12089 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12090 }
   12091 
   12092 /*
   12093  * wm_nvm_read:
   12094  *
   12095  *	Read data from the serial EEPROM.
   12096  */
   12097 static int
   12098 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12099 {
   12100 	int rv;
   12101 
   12102 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12103 		device_xname(sc->sc_dev), __func__));
   12104 
   12105 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12106 		return 1;
   12107 
   12108 	if (wm_nvm_acquire(sc))
   12109 		return 1;
   12110 
   12111 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12112 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12113 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   12114 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   12115 	else if (sc->sc_type == WM_T_PCH_SPT)
   12116 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   12117 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   12118 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   12119 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   12120 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   12121 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   12122 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   12123 	else
   12124 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   12125 
   12126 	wm_nvm_release(sc);
   12127 	return rv;
   12128 }
   12129 
   12130 /*
   12131  * Hardware semaphores.
   12132  * Very complexed...
   12133  */
   12134 
   12135 static int
   12136 wm_get_null(struct wm_softc *sc)
   12137 {
   12138 
   12139 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12140 		device_xname(sc->sc_dev), __func__));
   12141 	return 0;
   12142 }
   12143 
   12144 static void
   12145 wm_put_null(struct wm_softc *sc)
   12146 {
   12147 
   12148 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12149 		device_xname(sc->sc_dev), __func__));
   12150 	return;
   12151 }
   12152 
   12153 /*
   12154  * Get hardware semaphore.
   12155  * Same as e1000_get_hw_semaphore_generic()
   12156  */
   12157 static int
   12158 wm_get_swsm_semaphore(struct wm_softc *sc)
   12159 {
   12160 	int32_t timeout;
   12161 	uint32_t swsm;
   12162 
   12163 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12164 		device_xname(sc->sc_dev), __func__));
   12165 	KASSERT(sc->sc_nvm_wordsize > 0);
   12166 
   12167 	/* Get the SW semaphore. */
   12168 	timeout = sc->sc_nvm_wordsize + 1;
   12169 	while (timeout) {
   12170 		swsm = CSR_READ(sc, WMREG_SWSM);
   12171 
   12172 		if ((swsm & SWSM_SMBI) == 0)
   12173 			break;
   12174 
   12175 		delay(50);
   12176 		timeout--;
   12177 	}
   12178 
   12179 	if (timeout == 0) {
   12180 		aprint_error_dev(sc->sc_dev,
   12181 		    "could not acquire SWSM SMBI\n");
   12182 		return 1;
   12183 	}
   12184 
   12185 	/* Get the FW semaphore. */
   12186 	timeout = sc->sc_nvm_wordsize + 1;
   12187 	while (timeout) {
   12188 		swsm = CSR_READ(sc, WMREG_SWSM);
   12189 		swsm |= SWSM_SWESMBI;
   12190 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12191 		/* If we managed to set the bit we got the semaphore. */
   12192 		swsm = CSR_READ(sc, WMREG_SWSM);
   12193 		if (swsm & SWSM_SWESMBI)
   12194 			break;
   12195 
   12196 		delay(50);
   12197 		timeout--;
   12198 	}
   12199 
   12200 	if (timeout == 0) {
   12201 		aprint_error_dev(sc->sc_dev,
   12202 		    "could not acquire SWSM SWESMBI\n");
   12203 		/* Release semaphores */
   12204 		wm_put_swsm_semaphore(sc);
   12205 		return 1;
   12206 	}
   12207 	return 0;
   12208 }
   12209 
   12210 /*
   12211  * Put hardware semaphore.
   12212  * Same as e1000_put_hw_semaphore_generic()
   12213  */
   12214 static void
   12215 wm_put_swsm_semaphore(struct wm_softc *sc)
   12216 {
   12217 	uint32_t swsm;
   12218 
   12219 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12220 		device_xname(sc->sc_dev), __func__));
   12221 
   12222 	swsm = CSR_READ(sc, WMREG_SWSM);
   12223 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12224 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12225 }
   12226 
   12227 /*
   12228  * Get SW/FW semaphore.
   12229  * Same as e1000_acquire_swfw_sync_82575().
   12230  */
   12231 static int
   12232 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12233 {
   12234 	uint32_t swfw_sync;
   12235 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12236 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12237 	int timeout = 200;
   12238 
   12239 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12240 		device_xname(sc->sc_dev), __func__));
   12241 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12242 
   12243 	for (timeout = 0; timeout < 200; timeout++) {
   12244 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12245 			if (wm_get_swsm_semaphore(sc)) {
   12246 				aprint_error_dev(sc->sc_dev,
   12247 				    "%s: failed to get semaphore\n",
   12248 				    __func__);
   12249 				return 1;
   12250 			}
   12251 		}
   12252 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12253 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12254 			swfw_sync |= swmask;
   12255 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12256 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   12257 				wm_put_swsm_semaphore(sc);
   12258 			return 0;
   12259 		}
   12260 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   12261 			wm_put_swsm_semaphore(sc);
   12262 		delay(5000);
   12263 	}
   12264 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12265 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12266 	return 1;
   12267 }
   12268 
   12269 static void
   12270 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12271 {
   12272 	uint32_t swfw_sync;
   12273 
   12274 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12275 		device_xname(sc->sc_dev), __func__));
   12276 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12277 
   12278 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12279 		while (wm_get_swsm_semaphore(sc) != 0)
   12280 			continue;
   12281 	}
   12282 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12283 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12284 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12285 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   12286 		wm_put_swsm_semaphore(sc);
   12287 }
   12288 
   12289 static int
   12290 wm_get_phy_82575(struct wm_softc *sc)
   12291 {
   12292 
   12293 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12294 		device_xname(sc->sc_dev), __func__));
   12295 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12296 }
   12297 
   12298 static void
   12299 wm_put_phy_82575(struct wm_softc *sc)
   12300 {
   12301 
   12302 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12303 		device_xname(sc->sc_dev), __func__));
   12304 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12305 }
   12306 
   12307 static int
   12308 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12309 {
   12310 	uint32_t ext_ctrl;
   12311 	int timeout = 200;
   12312 
   12313 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12314 		device_xname(sc->sc_dev), __func__));
   12315 
   12316 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12317 	for (timeout = 0; timeout < 200; timeout++) {
   12318 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12319 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12320 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12321 
   12322 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12323 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12324 			return 0;
   12325 		delay(5000);
   12326 	}
   12327 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12328 	    device_xname(sc->sc_dev), ext_ctrl);
   12329 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12330 	return 1;
   12331 }
   12332 
   12333 static void
   12334 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12335 {
   12336 	uint32_t ext_ctrl;
   12337 
   12338 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12339 		device_xname(sc->sc_dev), __func__));
   12340 
   12341 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12342 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12343 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12344 
   12345 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12346 }
   12347 
   12348 static int
   12349 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12350 {
   12351 	uint32_t ext_ctrl;
   12352 	int timeout;
   12353 
   12354 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12355 		device_xname(sc->sc_dev), __func__));
   12356 	mutex_enter(sc->sc_ich_phymtx);
   12357 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12358 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12359 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12360 			break;
   12361 		delay(1000);
   12362 	}
   12363 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12364 		printf("%s: SW has already locked the resource\n",
   12365 		    device_xname(sc->sc_dev));
   12366 		goto out;
   12367 	}
   12368 
   12369 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12370 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12371 	for (timeout = 0; timeout < 1000; timeout++) {
   12372 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12373 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12374 			break;
   12375 		delay(1000);
   12376 	}
   12377 	if (timeout >= 1000) {
   12378 		printf("%s: failed to acquire semaphore\n",
   12379 		    device_xname(sc->sc_dev));
   12380 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12381 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12382 		goto out;
   12383 	}
   12384 	return 0;
   12385 
   12386 out:
   12387 	mutex_exit(sc->sc_ich_phymtx);
   12388 	return 1;
   12389 }
   12390 
   12391 static void
   12392 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12393 {
   12394 	uint32_t ext_ctrl;
   12395 
   12396 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12397 		device_xname(sc->sc_dev), __func__));
   12398 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12399 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12400 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12401 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12402 	} else {
   12403 		printf("%s: Semaphore unexpectedly released\n",
   12404 		    device_xname(sc->sc_dev));
   12405 	}
   12406 
   12407 	mutex_exit(sc->sc_ich_phymtx);
   12408 }
   12409 
   12410 static int
   12411 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12412 {
   12413 
   12414 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12415 		device_xname(sc->sc_dev), __func__));
   12416 	mutex_enter(sc->sc_ich_nvmmtx);
   12417 
   12418 	return 0;
   12419 }
   12420 
   12421 static void
   12422 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12423 {
   12424 
   12425 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12426 		device_xname(sc->sc_dev), __func__));
   12427 	mutex_exit(sc->sc_ich_nvmmtx);
   12428 }
   12429 
   12430 static int
   12431 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12432 {
   12433 	int i = 0;
   12434 	uint32_t reg;
   12435 
   12436 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12437 		device_xname(sc->sc_dev), __func__));
   12438 
   12439 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12440 	do {
   12441 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12442 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12443 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12444 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12445 			break;
   12446 		delay(2*1000);
   12447 		i++;
   12448 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12449 
   12450 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12451 		wm_put_hw_semaphore_82573(sc);
   12452 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12453 		    device_xname(sc->sc_dev));
   12454 		return -1;
   12455 	}
   12456 
   12457 	return 0;
   12458 }
   12459 
   12460 static void
   12461 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12462 {
   12463 	uint32_t reg;
   12464 
   12465 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12466 		device_xname(sc->sc_dev), __func__));
   12467 
   12468 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12469 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12470 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12471 }
   12472 
   12473 /*
   12474  * Management mode and power management related subroutines.
   12475  * BMC, AMT, suspend/resume and EEE.
   12476  */
   12477 
   12478 #ifdef WM_WOL
   12479 static int
   12480 wm_check_mng_mode(struct wm_softc *sc)
   12481 {
   12482 	int rv;
   12483 
   12484 	switch (sc->sc_type) {
   12485 	case WM_T_ICH8:
   12486 	case WM_T_ICH9:
   12487 	case WM_T_ICH10:
   12488 	case WM_T_PCH:
   12489 	case WM_T_PCH2:
   12490 	case WM_T_PCH_LPT:
   12491 	case WM_T_PCH_SPT:
   12492 		rv = wm_check_mng_mode_ich8lan(sc);
   12493 		break;
   12494 	case WM_T_82574:
   12495 	case WM_T_82583:
   12496 		rv = wm_check_mng_mode_82574(sc);
   12497 		break;
   12498 	case WM_T_82571:
   12499 	case WM_T_82572:
   12500 	case WM_T_82573:
   12501 	case WM_T_80003:
   12502 		rv = wm_check_mng_mode_generic(sc);
   12503 		break;
   12504 	default:
   12505 		/* noting to do */
   12506 		rv = 0;
   12507 		break;
   12508 	}
   12509 
   12510 	return rv;
   12511 }
   12512 
   12513 static int
   12514 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12515 {
   12516 	uint32_t fwsm;
   12517 
   12518 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12519 
   12520 	if (((fwsm & FWSM_FW_VALID) != 0)
   12521 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12522 		return 1;
   12523 
   12524 	return 0;
   12525 }
   12526 
   12527 static int
   12528 wm_check_mng_mode_82574(struct wm_softc *sc)
   12529 {
   12530 	uint16_t data;
   12531 
   12532 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12533 
   12534 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12535 		return 1;
   12536 
   12537 	return 0;
   12538 }
   12539 
   12540 static int
   12541 wm_check_mng_mode_generic(struct wm_softc *sc)
   12542 {
   12543 	uint32_t fwsm;
   12544 
   12545 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12546 
   12547 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12548 		return 1;
   12549 
   12550 	return 0;
   12551 }
   12552 #endif /* WM_WOL */
   12553 
   12554 static int
   12555 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12556 {
   12557 	uint32_t manc, fwsm, factps;
   12558 
   12559 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12560 		return 0;
   12561 
   12562 	manc = CSR_READ(sc, WMREG_MANC);
   12563 
   12564 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   12565 		device_xname(sc->sc_dev), manc));
   12566 	if ((manc & MANC_RECV_TCO_EN) == 0)
   12567 		return 0;
   12568 
   12569 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   12570 		fwsm = CSR_READ(sc, WMREG_FWSM);
   12571 		factps = CSR_READ(sc, WMREG_FACTPS);
   12572 		if (((factps & FACTPS_MNGCG) == 0)
   12573 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12574 			return 1;
   12575 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   12576 		uint16_t data;
   12577 
   12578 		factps = CSR_READ(sc, WMREG_FACTPS);
   12579 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12580 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   12581 			device_xname(sc->sc_dev), factps, data));
   12582 		if (((factps & FACTPS_MNGCG) == 0)
   12583 		    && ((data & NVM_CFG2_MNGM_MASK)
   12584 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   12585 			return 1;
   12586 	} else if (((manc & MANC_SMBUS_EN) != 0)
   12587 	    && ((manc & MANC_ASF_EN) == 0))
   12588 		return 1;
   12589 
   12590 	return 0;
   12591 }
   12592 
   12593 static bool
   12594 wm_phy_resetisblocked(struct wm_softc *sc)
   12595 {
   12596 	bool blocked = false;
   12597 	uint32_t reg;
   12598 	int i = 0;
   12599 
   12600 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12601 		device_xname(sc->sc_dev), __func__));
   12602 
   12603 	switch (sc->sc_type) {
   12604 	case WM_T_ICH8:
   12605 	case WM_T_ICH9:
   12606 	case WM_T_ICH10:
   12607 	case WM_T_PCH:
   12608 	case WM_T_PCH2:
   12609 	case WM_T_PCH_LPT:
   12610 	case WM_T_PCH_SPT:
   12611 		do {
   12612 			reg = CSR_READ(sc, WMREG_FWSM);
   12613 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12614 				blocked = true;
   12615 				delay(10*1000);
   12616 				continue;
   12617 			}
   12618 			blocked = false;
   12619 		} while (blocked && (i++ < 30));
   12620 		return blocked;
   12621 		break;
   12622 	case WM_T_82571:
   12623 	case WM_T_82572:
   12624 	case WM_T_82573:
   12625 	case WM_T_82574:
   12626 	case WM_T_82583:
   12627 	case WM_T_80003:
   12628 		reg = CSR_READ(sc, WMREG_MANC);
   12629 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12630 			return true;
   12631 		else
   12632 			return false;
   12633 		break;
   12634 	default:
   12635 		/* no problem */
   12636 		break;
   12637 	}
   12638 
   12639 	return false;
   12640 }
   12641 
   12642 static void
   12643 wm_get_hw_control(struct wm_softc *sc)
   12644 {
   12645 	uint32_t reg;
   12646 
   12647 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12648 		device_xname(sc->sc_dev), __func__));
   12649 
   12650 	if (sc->sc_type == WM_T_82573) {
   12651 		reg = CSR_READ(sc, WMREG_SWSM);
   12652 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12653 	} else if (sc->sc_type >= WM_T_82571) {
   12654 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12655 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12656 	}
   12657 }
   12658 
   12659 static void
   12660 wm_release_hw_control(struct wm_softc *sc)
   12661 {
   12662 	uint32_t reg;
   12663 
   12664 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12665 		device_xname(sc->sc_dev), __func__));
   12666 
   12667 	if (sc->sc_type == WM_T_82573) {
   12668 		reg = CSR_READ(sc, WMREG_SWSM);
   12669 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12670 	} else if (sc->sc_type >= WM_T_82571) {
   12671 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12672 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12673 	}
   12674 }
   12675 
   12676 static void
   12677 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12678 {
   12679 	uint32_t reg;
   12680 
   12681 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12682 		device_xname(sc->sc_dev), __func__));
   12683 
   12684 	if (sc->sc_type < WM_T_PCH2)
   12685 		return;
   12686 
   12687 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12688 
   12689 	if (gate)
   12690 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12691 	else
   12692 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12693 
   12694 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12695 }
   12696 
   12697 static void
   12698 wm_smbustopci(struct wm_softc *sc)
   12699 {
   12700 	uint32_t fwsm, reg;
   12701 	int rv = 0;
   12702 
   12703 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12704 		device_xname(sc->sc_dev), __func__));
   12705 
   12706 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12707 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12708 
   12709 	/* Disable ULP */
   12710 	wm_ulp_disable(sc);
   12711 
   12712 	/* Acquire PHY semaphore */
   12713 	sc->phy.acquire(sc);
   12714 
   12715 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12716 	switch (sc->sc_type) {
   12717 	case WM_T_PCH_LPT:
   12718 	case WM_T_PCH_SPT:
   12719 		if (wm_phy_is_accessible_pchlan(sc))
   12720 			break;
   12721 
   12722 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12723 		reg |= CTRL_EXT_FORCE_SMBUS;
   12724 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12725 #if 0
   12726 		/* XXX Isn't this required??? */
   12727 		CSR_WRITE_FLUSH(sc);
   12728 #endif
   12729 		delay(50 * 1000);
   12730 		/* FALLTHROUGH */
   12731 	case WM_T_PCH2:
   12732 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12733 			break;
   12734 		/* FALLTHROUGH */
   12735 	case WM_T_PCH:
   12736 		if (sc->sc_type == WM_T_PCH)
   12737 			if ((fwsm & FWSM_FW_VALID) != 0)
   12738 				break;
   12739 
   12740 		if (wm_phy_resetisblocked(sc) == true) {
   12741 			printf("XXX reset is blocked(3)\n");
   12742 			break;
   12743 		}
   12744 
   12745 		wm_toggle_lanphypc_pch_lpt(sc);
   12746 
   12747 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12748 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12749 				break;
   12750 
   12751 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12752 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12753 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12754 
   12755 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12756 				break;
   12757 			rv = -1;
   12758 		}
   12759 		break;
   12760 	default:
   12761 		break;
   12762 	}
   12763 
   12764 	/* Release semaphore */
   12765 	sc->phy.release(sc);
   12766 
   12767 	if (rv == 0) {
   12768 		if (wm_phy_resetisblocked(sc)) {
   12769 			printf("XXX reset is blocked(4)\n");
   12770 			goto out;
   12771 		}
   12772 		wm_reset_phy(sc);
   12773 		if (wm_phy_resetisblocked(sc))
   12774 			printf("XXX reset is blocked(4)\n");
   12775 	}
   12776 
   12777 out:
   12778 	/*
   12779 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   12780 	 */
   12781 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   12782 		delay(10*1000);
   12783 		wm_gate_hw_phy_config_ich8lan(sc, false);
   12784 	}
   12785 }
   12786 
   12787 static void
   12788 wm_init_manageability(struct wm_softc *sc)
   12789 {
   12790 
   12791 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12792 		device_xname(sc->sc_dev), __func__));
   12793 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12794 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   12795 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12796 
   12797 		/* Disable hardware interception of ARP */
   12798 		manc &= ~MANC_ARP_EN;
   12799 
   12800 		/* Enable receiving management packets to the host */
   12801 		if (sc->sc_type >= WM_T_82571) {
   12802 			manc |= MANC_EN_MNG2HOST;
   12803 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   12804 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   12805 		}
   12806 
   12807 		CSR_WRITE(sc, WMREG_MANC, manc);
   12808 	}
   12809 }
   12810 
   12811 static void
   12812 wm_release_manageability(struct wm_softc *sc)
   12813 {
   12814 
   12815 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12816 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12817 
   12818 		manc |= MANC_ARP_EN;
   12819 		if (sc->sc_type >= WM_T_82571)
   12820 			manc &= ~MANC_EN_MNG2HOST;
   12821 
   12822 		CSR_WRITE(sc, WMREG_MANC, manc);
   12823 	}
   12824 }
   12825 
   12826 static void
   12827 wm_get_wakeup(struct wm_softc *sc)
   12828 {
   12829 
   12830 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   12831 	switch (sc->sc_type) {
   12832 	case WM_T_82573:
   12833 	case WM_T_82583:
   12834 		sc->sc_flags |= WM_F_HAS_AMT;
   12835 		/* FALLTHROUGH */
   12836 	case WM_T_80003:
   12837 	case WM_T_82575:
   12838 	case WM_T_82576:
   12839 	case WM_T_82580:
   12840 	case WM_T_I350:
   12841 	case WM_T_I354:
   12842 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   12843 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   12844 		/* FALLTHROUGH */
   12845 	case WM_T_82541:
   12846 	case WM_T_82541_2:
   12847 	case WM_T_82547:
   12848 	case WM_T_82547_2:
   12849 	case WM_T_82571:
   12850 	case WM_T_82572:
   12851 	case WM_T_82574:
   12852 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12853 		break;
   12854 	case WM_T_ICH8:
   12855 	case WM_T_ICH9:
   12856 	case WM_T_ICH10:
   12857 	case WM_T_PCH:
   12858 	case WM_T_PCH2:
   12859 	case WM_T_PCH_LPT:
   12860 	case WM_T_PCH_SPT:
   12861 		sc->sc_flags |= WM_F_HAS_AMT;
   12862 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12863 		break;
   12864 	default:
   12865 		break;
   12866 	}
   12867 
   12868 	/* 1: HAS_MANAGE */
   12869 	if (wm_enable_mng_pass_thru(sc) != 0)
   12870 		sc->sc_flags |= WM_F_HAS_MANAGE;
   12871 
   12872 #ifdef WM_DEBUG
   12873 	printf("\n");
   12874 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   12875 		printf("HAS_AMT,");
   12876 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   12877 		printf("ARC_SUBSYS_VALID,");
   12878 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   12879 		printf("ASF_FIRMWARE_PRES,");
   12880 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   12881 		printf("HAS_MANAGE,");
   12882 	printf("\n");
   12883 #endif
   12884 	/*
   12885 	 * Note that the WOL flags is set after the resetting of the eeprom
   12886 	 * stuff
   12887 	 */
   12888 }
   12889 
   12890 /*
   12891  * Unconfigure Ultra Low Power mode.
   12892  * Only for I217 and newer (see below).
   12893  */
   12894 static void
   12895 wm_ulp_disable(struct wm_softc *sc)
   12896 {
   12897 	uint32_t reg;
   12898 	int i = 0;
   12899 
   12900 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12901 		device_xname(sc->sc_dev), __func__));
   12902 	/* Exclude old devices */
   12903 	if ((sc->sc_type < WM_T_PCH_LPT)
   12904 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   12905 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   12906 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   12907 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   12908 		return;
   12909 
   12910 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   12911 		/* Request ME un-configure ULP mode in the PHY */
   12912 		reg = CSR_READ(sc, WMREG_H2ME);
   12913 		reg &= ~H2ME_ULP;
   12914 		reg |= H2ME_ENFORCE_SETTINGS;
   12915 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12916 
   12917 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   12918 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   12919 			if (i++ == 30) {
   12920 				printf("%s timed out\n", __func__);
   12921 				return;
   12922 			}
   12923 			delay(10 * 1000);
   12924 		}
   12925 		reg = CSR_READ(sc, WMREG_H2ME);
   12926 		reg &= ~H2ME_ENFORCE_SETTINGS;
   12927 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12928 
   12929 		return;
   12930 	}
   12931 
   12932 	/* Acquire semaphore */
   12933 	sc->phy.acquire(sc);
   12934 
   12935 	/* Toggle LANPHYPC */
   12936 	wm_toggle_lanphypc_pch_lpt(sc);
   12937 
   12938 	/* Unforce SMBus mode in PHY */
   12939 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12940 	if (reg == 0x0000 || reg == 0xffff) {
   12941 		uint32_t reg2;
   12942 
   12943 		printf("%s: Force SMBus first.\n", __func__);
   12944 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   12945 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   12946 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   12947 		delay(50 * 1000);
   12948 
   12949 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12950 	}
   12951 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   12952 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   12953 
   12954 	/* Unforce SMBus mode in MAC */
   12955 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12956 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   12957 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12958 
   12959 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   12960 	reg |= HV_PM_CTRL_K1_ENA;
   12961 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   12962 
   12963 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   12964 	reg &= ~(I218_ULP_CONFIG1_IND
   12965 	    | I218_ULP_CONFIG1_STICKY_ULP
   12966 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   12967 	    | I218_ULP_CONFIG1_WOL_HOST
   12968 	    | I218_ULP_CONFIG1_INBAND_EXIT
   12969 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   12970 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   12971 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   12972 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12973 	reg |= I218_ULP_CONFIG1_START;
   12974 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12975 
   12976 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   12977 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   12978 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   12979 
   12980 	/* Release semaphore */
   12981 	sc->phy.release(sc);
   12982 	wm_gmii_reset(sc);
   12983 	delay(50 * 1000);
   12984 }
   12985 
   12986 /* WOL in the newer chipset interfaces (pchlan) */
   12987 static void
   12988 wm_enable_phy_wakeup(struct wm_softc *sc)
   12989 {
   12990 #if 0
   12991 	uint16_t preg;
   12992 
   12993 	/* Copy MAC RARs to PHY RARs */
   12994 
   12995 	/* Copy MAC MTA to PHY MTA */
   12996 
   12997 	/* Configure PHY Rx Control register */
   12998 
   12999 	/* Enable PHY wakeup in MAC register */
   13000 
   13001 	/* Configure and enable PHY wakeup in PHY registers */
   13002 
   13003 	/* Activate PHY wakeup */
   13004 
   13005 	/* XXX */
   13006 #endif
   13007 }
   13008 
   13009 /* Power down workaround on D3 */
   13010 static void
   13011 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13012 {
   13013 	uint32_t reg;
   13014 	int i;
   13015 
   13016 	for (i = 0; i < 2; i++) {
   13017 		/* Disable link */
   13018 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13019 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13020 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13021 
   13022 		/*
   13023 		 * Call gig speed drop workaround on Gig disable before
   13024 		 * accessing any PHY registers
   13025 		 */
   13026 		if (sc->sc_type == WM_T_ICH8)
   13027 			wm_gig_downshift_workaround_ich8lan(sc);
   13028 
   13029 		/* Write VR power-down enable */
   13030 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13031 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13032 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13033 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13034 
   13035 		/* Read it back and test */
   13036 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13037 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13038 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13039 			break;
   13040 
   13041 		/* Issue PHY reset and repeat at most one more time */
   13042 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13043 	}
   13044 }
   13045 
   13046 static void
   13047 wm_enable_wakeup(struct wm_softc *sc)
   13048 {
   13049 	uint32_t reg, pmreg;
   13050 	pcireg_t pmode;
   13051 
   13052 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13053 		device_xname(sc->sc_dev), __func__));
   13054 
   13055 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13056 		&pmreg, NULL) == 0)
   13057 		return;
   13058 
   13059 	/* Advertise the wakeup capability */
   13060 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13061 	    | CTRL_SWDPIN(3));
   13062 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13063 
   13064 	/* ICH workaround */
   13065 	switch (sc->sc_type) {
   13066 	case WM_T_ICH8:
   13067 	case WM_T_ICH9:
   13068 	case WM_T_ICH10:
   13069 	case WM_T_PCH:
   13070 	case WM_T_PCH2:
   13071 	case WM_T_PCH_LPT:
   13072 	case WM_T_PCH_SPT:
   13073 		/* Disable gig during WOL */
   13074 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13075 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13076 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13077 		if (sc->sc_type == WM_T_PCH)
   13078 			wm_gmii_reset(sc);
   13079 
   13080 		/* Power down workaround */
   13081 		if (sc->sc_phytype == WMPHY_82577) {
   13082 			struct mii_softc *child;
   13083 
   13084 			/* Assume that the PHY is copper */
   13085 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13086 			if (child->mii_mpd_rev <= 2)
   13087 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13088 				    (768 << 5) | 25, 0x0444); /* magic num */
   13089 		}
   13090 		break;
   13091 	default:
   13092 		break;
   13093 	}
   13094 
   13095 	/* Keep the laser running on fiber adapters */
   13096 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13097 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13098 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13099 		reg |= CTRL_EXT_SWDPIN(3);
   13100 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13101 	}
   13102 
   13103 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13104 #if 0	/* for the multicast packet */
   13105 	reg |= WUFC_MC;
   13106 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13107 #endif
   13108 
   13109 	if (sc->sc_type >= WM_T_PCH)
   13110 		wm_enable_phy_wakeup(sc);
   13111 	else {
   13112 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13113 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13114 	}
   13115 
   13116 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13117 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13118 		|| (sc->sc_type == WM_T_PCH2))
   13119 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13120 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13121 
   13122 	/* Request PME */
   13123 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13124 #if 0
   13125 	/* Disable WOL */
   13126 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13127 #else
   13128 	/* For WOL */
   13129 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13130 #endif
   13131 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13132 }
   13133 
   13134 /* LPLU */
   13135 
   13136 static void
   13137 wm_lplu_d0_disable(struct wm_softc *sc)
   13138 {
   13139 	uint32_t reg;
   13140 
   13141 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13142 		device_xname(sc->sc_dev), __func__));
   13143 
   13144 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13145 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13146 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13147 }
   13148 
   13149 static void
   13150 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   13151 {
   13152 	uint32_t reg;
   13153 
   13154 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13155 		device_xname(sc->sc_dev), __func__));
   13156 
   13157 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13158 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13159 	reg |= HV_OEM_BITS_ANEGNOW;
   13160 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13161 }
   13162 
   13163 /* EEE */
   13164 
   13165 static void
   13166 wm_set_eee_i350(struct wm_softc *sc)
   13167 {
   13168 	uint32_t ipcnfg, eeer;
   13169 
   13170 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13171 	eeer = CSR_READ(sc, WMREG_EEER);
   13172 
   13173 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13174 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13175 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13176 		    | EEER_LPI_FC);
   13177 	} else {
   13178 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13179 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13180 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13181 		    | EEER_LPI_FC);
   13182 	}
   13183 
   13184 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13185 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13186 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13187 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13188 }
   13189 
   13190 /*
   13191  * Workarounds (mainly PHY related).
   13192  * Basically, PHY's workarounds are in the PHY drivers.
   13193  */
   13194 
   13195 /* Work-around for 82566 Kumeran PCS lock loss */
   13196 static void
   13197 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13198 {
   13199 #if 0
   13200 	int miistatus, active, i;
   13201 	int reg;
   13202 
   13203 	miistatus = sc->sc_mii.mii_media_status;
   13204 
   13205 	/* If the link is not up, do nothing */
   13206 	if ((miistatus & IFM_ACTIVE) == 0)
   13207 		return;
   13208 
   13209 	active = sc->sc_mii.mii_media_active;
   13210 
   13211 	/* Nothing to do if the link is other than 1Gbps */
   13212 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   13213 		return;
   13214 
   13215 	for (i = 0; i < 10; i++) {
   13216 		/* read twice */
   13217 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13218 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13219 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13220 			goto out;	/* GOOD! */
   13221 
   13222 		/* Reset the PHY */
   13223 		wm_gmii_reset(sc);
   13224 		delay(5*1000);
   13225 	}
   13226 
   13227 	/* Disable GigE link negotiation */
   13228 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13229 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13230 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13231 
   13232 	/*
   13233 	 * Call gig speed drop workaround on Gig disable before accessing
   13234 	 * any PHY registers.
   13235 	 */
   13236 	wm_gig_downshift_workaround_ich8lan(sc);
   13237 
   13238 out:
   13239 	return;
   13240 #endif
   13241 }
   13242 
   13243 /* WOL from S5 stops working */
   13244 static void
   13245 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13246 {
   13247 	uint16_t kmrn_reg;
   13248 
   13249 	/* Only for igp3 */
   13250 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13251 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13252 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13253 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13254 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13255 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13256 	}
   13257 }
   13258 
   13259 /*
   13260  * Workaround for pch's PHYs
   13261  * XXX should be moved to new PHY driver?
   13262  */
   13263 static void
   13264 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13265 {
   13266 
   13267 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13268 		device_xname(sc->sc_dev), __func__));
   13269 	KASSERT(sc->sc_type == WM_T_PCH);
   13270 
   13271 	if (sc->sc_phytype == WMPHY_82577)
   13272 		wm_set_mdio_slow_mode_hv(sc);
   13273 
   13274 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13275 
   13276 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13277 
   13278 	/* 82578 */
   13279 	if (sc->sc_phytype == WMPHY_82578) {
   13280 		struct mii_softc *child;
   13281 
   13282 		/*
   13283 		 * Return registers to default by doing a soft reset then
   13284 		 * writing 0x3140 to the control register
   13285 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13286 		 */
   13287 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13288 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13289 			PHY_RESET(child);
   13290 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13291 			    0x3140);
   13292 		}
   13293 	}
   13294 
   13295 	/* Select page 0 */
   13296 	sc->phy.acquire(sc);
   13297 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13298 	sc->phy.release(sc);
   13299 
   13300 	/*
   13301 	 * Configure the K1 Si workaround during phy reset assuming there is
   13302 	 * link so that it disables K1 if link is in 1Gbps.
   13303 	 */
   13304 	wm_k1_gig_workaround_hv(sc, 1);
   13305 }
   13306 
   13307 static void
   13308 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13309 {
   13310 
   13311 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13312 		device_xname(sc->sc_dev), __func__));
   13313 	KASSERT(sc->sc_type == WM_T_PCH2);
   13314 
   13315 	wm_set_mdio_slow_mode_hv(sc);
   13316 }
   13317 
   13318 static int
   13319 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13320 {
   13321 	int k1_enable = sc->sc_nvm_k1_enabled;
   13322 
   13323 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13324 		device_xname(sc->sc_dev), __func__));
   13325 
   13326 	if (sc->phy.acquire(sc) != 0)
   13327 		return -1;
   13328 
   13329 	if (link) {
   13330 		k1_enable = 0;
   13331 
   13332 		/* Link stall fix for link up */
   13333 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13334 	} else {
   13335 		/* Link stall fix for link down */
   13336 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13337 	}
   13338 
   13339 	wm_configure_k1_ich8lan(sc, k1_enable);
   13340 	sc->phy.release(sc);
   13341 
   13342 	return 0;
   13343 }
   13344 
   13345 static void
   13346 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13347 {
   13348 	uint32_t reg;
   13349 
   13350 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13351 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13352 	    reg | HV_KMRN_MDIO_SLOW);
   13353 }
   13354 
   13355 static void
   13356 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13357 {
   13358 	uint32_t ctrl, ctrl_ext, tmp;
   13359 	uint16_t kmrn_reg;
   13360 
   13361 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13362 
   13363 	if (k1_enable)
   13364 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13365 	else
   13366 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13367 
   13368 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13369 
   13370 	delay(20);
   13371 
   13372 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13373 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13374 
   13375 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13376 	tmp |= CTRL_FRCSPD;
   13377 
   13378 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13379 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13380 	CSR_WRITE_FLUSH(sc);
   13381 	delay(20);
   13382 
   13383 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13384 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13385 	CSR_WRITE_FLUSH(sc);
   13386 	delay(20);
   13387 }
   13388 
   13389 /* special case - for 82575 - need to do manual init ... */
   13390 static void
   13391 wm_reset_init_script_82575(struct wm_softc *sc)
   13392 {
   13393 	/*
   13394 	 * remark: this is untested code - we have no board without EEPROM
   13395 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13396 	 */
   13397 
   13398 	/* SerDes configuration via SERDESCTRL */
   13399 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13400 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13401 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13402 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13403 
   13404 	/* CCM configuration via CCMCTL register */
   13405 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13406 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13407 
   13408 	/* PCIe lanes configuration */
   13409 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13410 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13411 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13412 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13413 
   13414 	/* PCIe PLL Configuration */
   13415 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13416 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13417 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13418 }
   13419 
   13420 static void
   13421 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13422 {
   13423 	uint32_t reg;
   13424 	uint16_t nvmword;
   13425 	int rv;
   13426 
   13427 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13428 		return;
   13429 
   13430 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13431 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13432 	if (rv != 0) {
   13433 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13434 		    __func__);
   13435 		return;
   13436 	}
   13437 
   13438 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13439 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13440 		reg |= MDICNFG_DEST;
   13441 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13442 		reg |= MDICNFG_COM_MDIO;
   13443 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13444 }
   13445 
   13446 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13447 
   13448 static bool
   13449 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13450 {
   13451 	int i;
   13452 	uint32_t reg;
   13453 	uint16_t id1, id2;
   13454 
   13455 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13456 		device_xname(sc->sc_dev), __func__));
   13457 	id1 = id2 = 0xffff;
   13458 	for (i = 0; i < 2; i++) {
   13459 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13460 		if (MII_INVALIDID(id1))
   13461 			continue;
   13462 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13463 		if (MII_INVALIDID(id2))
   13464 			continue;
   13465 		break;
   13466 	}
   13467 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13468 		goto out;
   13469 	}
   13470 
   13471 	if (sc->sc_type < WM_T_PCH_LPT) {
   13472 		sc->phy.release(sc);
   13473 		wm_set_mdio_slow_mode_hv(sc);
   13474 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13475 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13476 		sc->phy.acquire(sc);
   13477 	}
   13478 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13479 		printf("XXX return with false\n");
   13480 		return false;
   13481 	}
   13482 out:
   13483 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13484 		/* Only unforce SMBus if ME is not active */
   13485 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13486 			/* Unforce SMBus mode in PHY */
   13487 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13488 			    CV_SMB_CTRL);
   13489 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13490 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13491 			    CV_SMB_CTRL, reg);
   13492 
   13493 			/* Unforce SMBus mode in MAC */
   13494 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13495 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13496 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13497 		}
   13498 	}
   13499 	return true;
   13500 }
   13501 
   13502 static void
   13503 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13504 {
   13505 	uint32_t reg;
   13506 	int i;
   13507 
   13508 	/* Set PHY Config Counter to 50msec */
   13509 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13510 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13511 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13512 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13513 
   13514 	/* Toggle LANPHYPC */
   13515 	reg = CSR_READ(sc, WMREG_CTRL);
   13516 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13517 	reg &= ~CTRL_LANPHYPC_VALUE;
   13518 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13519 	CSR_WRITE_FLUSH(sc);
   13520 	delay(1000);
   13521 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13522 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13523 	CSR_WRITE_FLUSH(sc);
   13524 
   13525 	if (sc->sc_type < WM_T_PCH_LPT)
   13526 		delay(50 * 1000);
   13527 	else {
   13528 		i = 20;
   13529 
   13530 		do {
   13531 			delay(5 * 1000);
   13532 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13533 		    && i--);
   13534 
   13535 		delay(30 * 1000);
   13536 	}
   13537 }
   13538 
   13539 static int
   13540 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13541 {
   13542 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13543 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13544 	uint32_t rxa;
   13545 	uint16_t scale = 0, lat_enc = 0;
   13546 	int64_t lat_ns, value;
   13547 
   13548 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13549 		device_xname(sc->sc_dev), __func__));
   13550 
   13551 	if (link) {
   13552 		pcireg_t preg;
   13553 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   13554 
   13555 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   13556 
   13557 		/*
   13558 		 * Determine the maximum latency tolerated by the device.
   13559 		 *
   13560 		 * Per the PCIe spec, the tolerated latencies are encoded as
   13561 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   13562 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   13563 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   13564 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   13565 		 */
   13566 		lat_ns = ((int64_t)rxa * 1024 -
   13567 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   13568 		if (lat_ns < 0)
   13569 			lat_ns = 0;
   13570 		else {
   13571 			uint32_t status;
   13572 			uint16_t speed;
   13573 
   13574 			status = CSR_READ(sc, WMREG_STATUS);
   13575 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   13576 			case STATUS_SPEED_10:
   13577 				speed = 10;
   13578 				break;
   13579 			case STATUS_SPEED_100:
   13580 				speed = 100;
   13581 				break;
   13582 			case STATUS_SPEED_1000:
   13583 				speed = 1000;
   13584 				break;
   13585 			default:
   13586 				printf("%s: Unknown speed (status = %08x)\n",
   13587 				    device_xname(sc->sc_dev), status);
   13588 				return -1;
   13589 			}
   13590 			lat_ns /= speed;
   13591 		}
   13592 		value = lat_ns;
   13593 
   13594 		while (value > LTRV_VALUE) {
   13595 			scale ++;
   13596 			value = howmany(value, __BIT(5));
   13597 		}
   13598 		if (scale > LTRV_SCALE_MAX) {
   13599 			printf("%s: Invalid LTR latency scale %d\n",
   13600 			    device_xname(sc->sc_dev), scale);
   13601 			return -1;
   13602 		}
   13603 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   13604 
   13605 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13606 		    WM_PCI_LTR_CAP_LPT);
   13607 		max_snoop = preg & 0xffff;
   13608 		max_nosnoop = preg >> 16;
   13609 
   13610 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   13611 
   13612 		if (lat_enc > max_ltr_enc) {
   13613 			lat_enc = max_ltr_enc;
   13614 		}
   13615 	}
   13616 	/* Snoop and No-Snoop latencies the same */
   13617 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   13618 	CSR_WRITE(sc, WMREG_LTRV, reg);
   13619 
   13620 	return 0;
   13621 }
   13622 
   13623 /*
   13624  * I210 Errata 25 and I211 Errata 10
   13625  * Slow System Clock.
   13626  */
   13627 static void
   13628 wm_pll_workaround_i210(struct wm_softc *sc)
   13629 {
   13630 	uint32_t mdicnfg, wuc;
   13631 	uint32_t reg;
   13632 	pcireg_t pcireg;
   13633 	uint32_t pmreg;
   13634 	uint16_t nvmword, tmp_nvmword;
   13635 	int phyval;
   13636 	bool wa_done = false;
   13637 	int i;
   13638 
   13639 	/* Save WUC and MDICNFG registers */
   13640 	wuc = CSR_READ(sc, WMREG_WUC);
   13641 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13642 
   13643 	reg = mdicnfg & ~MDICNFG_DEST;
   13644 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13645 
   13646 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13647 		nvmword = INVM_DEFAULT_AL;
   13648 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13649 
   13650 	/* Get Power Management cap offset */
   13651 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13652 		&pmreg, NULL) == 0)
   13653 		return;
   13654 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13655 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13656 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13657 
   13658 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13659 			break; /* OK */
   13660 		}
   13661 
   13662 		wa_done = true;
   13663 		/* Directly reset the internal PHY */
   13664 		reg = CSR_READ(sc, WMREG_CTRL);
   13665 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13666 
   13667 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13668 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13669 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13670 
   13671 		CSR_WRITE(sc, WMREG_WUC, 0);
   13672 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13673 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13674 
   13675 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13676 		    pmreg + PCI_PMCSR);
   13677 		pcireg |= PCI_PMCSR_STATE_D3;
   13678 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13679 		    pmreg + PCI_PMCSR, pcireg);
   13680 		delay(1000);
   13681 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13682 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13683 		    pmreg + PCI_PMCSR, pcireg);
   13684 
   13685 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13686 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13687 
   13688 		/* Restore WUC register */
   13689 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13690 	}
   13691 
   13692 	/* Restore MDICNFG setting */
   13693 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13694 	if (wa_done)
   13695 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13696 }
   13697