Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.496
      1 /*	$NetBSD: if_wm.c,v 1.496 2017/03/03 16:48:55 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Split header buffer for newer descriptors
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.496 2017/03/03 16:48:55 knakahara Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #include "opt_if_wm.h"
     92 #endif
     93 
     94 #include <sys/param.h>
     95 #include <sys/systm.h>
     96 #include <sys/callout.h>
     97 #include <sys/mbuf.h>
     98 #include <sys/malloc.h>
     99 #include <sys/kmem.h>
    100 #include <sys/kernel.h>
    101 #include <sys/socket.h>
    102 #include <sys/ioctl.h>
    103 #include <sys/errno.h>
    104 #include <sys/device.h>
    105 #include <sys/queue.h>
    106 #include <sys/syslog.h>
    107 #include <sys/interrupt.h>
    108 #include <sys/cpu.h>
    109 #include <sys/pcq.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/miivar.h>
    132 #include <dev/mii/miidevs.h>
    133 #include <dev/mii/mii_bitbang.h>
    134 #include <dev/mii/ikphyreg.h>
    135 #include <dev/mii/igphyreg.h>
    136 #include <dev/mii/igphyvar.h>
    137 #include <dev/mii/inbmphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 /*
    177  * Transmit descriptor list size.  Due to errata, we can only have
    178  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    179  * on >= 82544.  We tell the upper layers that they can queue a lot
    180  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    181  * of them at a time.
    182  *
    183  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    184  * chains containing many small mbufs have been observed in zero-copy
    185  * situations with jumbo frames.
    186  */
    187 #define	WM_NTXSEGS		256
    188 #define	WM_IFQUEUELEN		256
    189 #define	WM_TXQUEUELEN_MAX	64
    190 #define	WM_TXQUEUELEN_MAX_82547	16
    191 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    192 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    193 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    194 #define	WM_NTXDESC_82542	256
    195 #define	WM_NTXDESC_82544	4096
    196 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    197 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    198 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    199 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    200 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    201 
    202 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    203 
    204 #define	WM_TXINTERQSIZE		256
    205 
    206 /*
    207  * Receive descriptor list size.  We have one Rx buffer for normal
    208  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    209  * packet.  We allocate 256 receive descriptors, each with a 2k
    210  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    211  */
    212 #define	WM_NRXDESC		256
    213 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    214 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    215 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    216 
    217 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    218 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    219 #endif
    220 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    221 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    222 #endif
    223 
    224 typedef union txdescs {
    225 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    226 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    227 } txdescs_t;
    228 
    229 typedef union rxdescs {
    230 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    231 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    232 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    233 } rxdescs_t;
    234 
    235 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    236 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    237 
    238 /*
    239  * Software state for transmit jobs.
    240  */
    241 struct wm_txsoft {
    242 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    243 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    244 	int txs_firstdesc;		/* first descriptor in packet */
    245 	int txs_lastdesc;		/* last descriptor in packet */
    246 	int txs_ndesc;			/* # of descriptors used */
    247 };
    248 
    249 /*
    250  * Software state for receive buffers.  Each descriptor gets a
    251  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    252  * more than one buffer, we chain them together.
    253  */
    254 struct wm_rxsoft {
    255 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    256 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    257 };
    258 
    259 #define WM_LINKUP_TIMEOUT	50
    260 
    261 static uint16_t swfwphysem[] = {
    262 	SWFW_PHY0_SM,
    263 	SWFW_PHY1_SM,
    264 	SWFW_PHY2_SM,
    265 	SWFW_PHY3_SM
    266 };
    267 
    268 static const uint32_t wm_82580_rxpbs_table[] = {
    269 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    270 };
    271 
    272 struct wm_softc;
    273 
    274 #ifdef WM_EVENT_COUNTERS
    275 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    276 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    277 	struct evcnt qname##_ev_##evname;
    278 
    279 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    280 	do{								\
    281 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    282 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    283 		    "%s%02d%s", #qname, (qnum), #evname);		\
    284 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    285 		    (evtype), NULL, (xname),				\
    286 		    (q)->qname##_##evname##_evcnt_name);		\
    287 	}while(0)
    288 
    289 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    290 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    291 
    292 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    293 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    294 
    295 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    296 	evcnt_detach(&(q)->qname##_ev_##evname);
    297 #endif /* WM_EVENT_COUNTERS */
    298 
    299 struct wm_txqueue {
    300 	kmutex_t *txq_lock;		/* lock for tx operations */
    301 
    302 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    303 
    304 	/* Software state for the transmit descriptors. */
    305 	int txq_num;			/* must be a power of two */
    306 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    307 
    308 	/* TX control data structures. */
    309 	int txq_ndesc;			/* must be a power of two */
    310 	size_t txq_descsize;		/* a tx descriptor size */
    311 	txdescs_t *txq_descs_u;
    312         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    313 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    314 	int txq_desc_rseg;		/* real number of control segment */
    315 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    316 #define	txq_descs	txq_descs_u->sctxu_txdescs
    317 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    318 
    319 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    320 
    321 	int txq_free;			/* number of free Tx descriptors */
    322 	int txq_next;			/* next ready Tx descriptor */
    323 
    324 	int txq_sfree;			/* number of free Tx jobs */
    325 	int txq_snext;			/* next free Tx job */
    326 	int txq_sdirty;			/* dirty Tx jobs */
    327 
    328 	/* These 4 variables are used only on the 82547. */
    329 	int txq_fifo_size;		/* Tx FIFO size */
    330 	int txq_fifo_head;		/* current head of FIFO */
    331 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    332 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    333 
    334 	/*
    335 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    336 	 * CPUs. This queue intermediate them without block.
    337 	 */
    338 	pcq_t *txq_interq;
    339 
    340 	/*
    341 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    342 	 * to manage Tx H/W queue's busy flag.
    343 	 */
    344 	int txq_flags;			/* flags for H/W queue, see below */
    345 #define	WM_TXQ_NO_SPACE	0x1
    346 
    347 	bool txq_stopping;
    348 
    349 	uint32_t txq_packets;		/* for AIM */
    350 	uint32_t txq_bytes;		/* for AIM */
    351 #ifdef WM_EVENT_COUNTERS
    352 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    353 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    354 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    355 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    356 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    357 						/* XXX not used? */
    358 
    359 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    360 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    361 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    362 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    363 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    364 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    365 
    366 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    367 
    368 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    369 
    370 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    371 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    372 #endif /* WM_EVENT_COUNTERS */
    373 };
    374 
    375 struct wm_rxqueue {
    376 	kmutex_t *rxq_lock;		/* lock for rx operations */
    377 
    378 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    379 
    380 	/* Software state for the receive descriptors. */
    381 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    382 
    383 	/* RX control data structures. */
    384 	int rxq_ndesc;			/* must be a power of two */
    385 	size_t rxq_descsize;		/* a rx descriptor size */
    386 	rxdescs_t *rxq_descs_u;
    387 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    388 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    389 	int rxq_desc_rseg;		/* real number of control segment */
    390 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    391 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    392 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    393 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    394 
    395 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    396 
    397 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    398 	int rxq_discard;
    399 	int rxq_len;
    400 	struct mbuf *rxq_head;
    401 	struct mbuf *rxq_tail;
    402 	struct mbuf **rxq_tailp;
    403 
    404 	bool rxq_stopping;
    405 
    406 	uint32_t rxq_packets;		/* for AIM */
    407 	uint32_t rxq_bytes;		/* for AIM */
    408 #ifdef WM_EVENT_COUNTERS
    409 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    410 
    411 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    412 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    413 #endif
    414 };
    415 
    416 struct wm_queue {
    417 	int wmq_id;			/* index of transmit and receive queues */
    418 	int wmq_intr_idx;		/* index of MSI-X tables */
    419 
    420 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    421 	bool wmq_set_itr;
    422 
    423 	struct wm_txqueue wmq_txq;
    424 	struct wm_rxqueue wmq_rxq;
    425 
    426 	void *wmq_si;
    427 };
    428 
    429 struct wm_phyop {
    430 	int (*acquire)(struct wm_softc *);
    431 	void (*release)(struct wm_softc *);
    432 	int reset_delay_us;
    433 };
    434 
    435 /*
    436  * Software state per device.
    437  */
    438 struct wm_softc {
    439 	device_t sc_dev;		/* generic device information */
    440 	bus_space_tag_t sc_st;		/* bus space tag */
    441 	bus_space_handle_t sc_sh;	/* bus space handle */
    442 	bus_size_t sc_ss;		/* bus space size */
    443 	bus_space_tag_t sc_iot;		/* I/O space tag */
    444 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    445 	bus_size_t sc_ios;		/* I/O space size */
    446 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    447 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    448 	bus_size_t sc_flashs;		/* flash registers space size */
    449 	off_t sc_flashreg_offset;	/*
    450 					 * offset to flash registers from
    451 					 * start of BAR
    452 					 */
    453 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    454 
    455 	struct ethercom sc_ethercom;	/* ethernet common data */
    456 	struct mii_data sc_mii;		/* MII/media information */
    457 
    458 	pci_chipset_tag_t sc_pc;
    459 	pcitag_t sc_pcitag;
    460 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    461 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    462 
    463 	uint16_t sc_pcidevid;		/* PCI device ID */
    464 	wm_chip_type sc_type;		/* MAC type */
    465 	int sc_rev;			/* MAC revision */
    466 	wm_phy_type sc_phytype;		/* PHY type */
    467 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    468 #define	WM_MEDIATYPE_UNKNOWN		0x00
    469 #define	WM_MEDIATYPE_FIBER		0x01
    470 #define	WM_MEDIATYPE_COPPER		0x02
    471 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    472 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    473 	int sc_flags;			/* flags; see below */
    474 	int sc_if_flags;		/* last if_flags */
    475 	int sc_flowflags;		/* 802.3x flow control flags */
    476 	int sc_align_tweak;
    477 
    478 	void *sc_ihs[WM_MAX_NINTR];	/*
    479 					 * interrupt cookie.
    480 					 * legacy and msi use sc_ihs[0].
    481 					 */
    482 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    483 	int sc_nintrs;			/* number of interrupts */
    484 
    485 	int sc_link_intr_idx;		/* index of MSI-X tables */
    486 
    487 	callout_t sc_tick_ch;		/* tick callout */
    488 	bool sc_core_stopping;
    489 
    490 	int sc_nvm_ver_major;
    491 	int sc_nvm_ver_minor;
    492 	int sc_nvm_ver_build;
    493 	int sc_nvm_addrbits;		/* NVM address bits */
    494 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    495 	int sc_ich8_flash_base;
    496 	int sc_ich8_flash_bank_size;
    497 	int sc_nvm_k1_enabled;
    498 
    499 	int sc_nqueues;
    500 	struct wm_queue *sc_queue;
    501 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    502 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    503 
    504 	int sc_affinity_offset;
    505 
    506 #ifdef WM_EVENT_COUNTERS
    507 	/* Event counters. */
    508 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    509 
    510         /* WM_T_82542_2_1 only */
    511 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    512 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    513 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    514 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    515 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    516 #endif /* WM_EVENT_COUNTERS */
    517 
    518 	/* This variable are used only on the 82547. */
    519 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    520 
    521 	uint32_t sc_ctrl;		/* prototype CTRL register */
    522 #if 0
    523 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    524 #endif
    525 	uint32_t sc_icr;		/* prototype interrupt bits */
    526 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    527 	uint32_t sc_tctl;		/* prototype TCTL register */
    528 	uint32_t sc_rctl;		/* prototype RCTL register */
    529 	uint32_t sc_txcw;		/* prototype TXCW register */
    530 	uint32_t sc_tipg;		/* prototype TIPG register */
    531 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    532 	uint32_t sc_pba;		/* prototype PBA register */
    533 
    534 	int sc_tbi_linkup;		/* TBI link status */
    535 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    536 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    537 
    538 	int sc_mchash_type;		/* multicast filter offset */
    539 
    540 	krndsource_t rnd_source;	/* random source */
    541 
    542 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    543 
    544 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    545 	kmutex_t *sc_ich_phymtx;	/*
    546 					 * 82574/82583/ICH/PCH specific PHY
    547 					 * mutex. For 82574/82583, the mutex
    548 					 * is used for both PHY and NVM.
    549 					 */
    550 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    551 
    552 	struct wm_phyop phy;
    553 };
    554 
    555 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    556 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    557 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    558 
    559 #define	WM_RXCHAIN_RESET(rxq)						\
    560 do {									\
    561 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    562 	*(rxq)->rxq_tailp = NULL;					\
    563 	(rxq)->rxq_len = 0;						\
    564 } while (/*CONSTCOND*/0)
    565 
    566 #define	WM_RXCHAIN_LINK(rxq, m)						\
    567 do {									\
    568 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    569 	(rxq)->rxq_tailp = &(m)->m_next;				\
    570 } while (/*CONSTCOND*/0)
    571 
    572 #ifdef WM_EVENT_COUNTERS
    573 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    574 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    575 
    576 #define WM_Q_EVCNT_INCR(qname, evname)			\
    577 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    578 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    579 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    580 #else /* !WM_EVENT_COUNTERS */
    581 #define	WM_EVCNT_INCR(ev)	/* nothing */
    582 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    583 
    584 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    585 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    586 #endif /* !WM_EVENT_COUNTERS */
    587 
    588 #define	CSR_READ(sc, reg)						\
    589 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    590 #define	CSR_WRITE(sc, reg, val)						\
    591 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    592 #define	CSR_WRITE_FLUSH(sc)						\
    593 	(void) CSR_READ((sc), WMREG_STATUS)
    594 
    595 #define ICH8_FLASH_READ32(sc, reg)					\
    596 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    597 	    (reg) + sc->sc_flashreg_offset)
    598 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    599 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    600 	    (reg) + sc->sc_flashreg_offset, (data))
    601 
    602 #define ICH8_FLASH_READ16(sc, reg)					\
    603 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    604 	    (reg) + sc->sc_flashreg_offset)
    605 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    606 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    607 	    (reg) + sc->sc_flashreg_offset, (data))
    608 
    609 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    610 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    611 
    612 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    613 #define	WM_CDTXADDR_HI(txq, x)						\
    614 	(sizeof(bus_addr_t) == 8 ?					\
    615 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    616 
    617 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    618 #define	WM_CDRXADDR_HI(rxq, x)						\
    619 	(sizeof(bus_addr_t) == 8 ?					\
    620 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    621 
    622 /*
    623  * Register read/write functions.
    624  * Other than CSR_{READ|WRITE}().
    625  */
    626 #if 0
    627 static inline uint32_t wm_io_read(struct wm_softc *, int);
    628 #endif
    629 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    630 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    631 	uint32_t, uint32_t);
    632 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    633 
    634 /*
    635  * Descriptor sync/init functions.
    636  */
    637 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    638 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    639 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    640 
    641 /*
    642  * Device driver interface functions and commonly used functions.
    643  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    644  */
    645 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    646 static int	wm_match(device_t, cfdata_t, void *);
    647 static void	wm_attach(device_t, device_t, void *);
    648 static int	wm_detach(device_t, int);
    649 static bool	wm_suspend(device_t, const pmf_qual_t *);
    650 static bool	wm_resume(device_t, const pmf_qual_t *);
    651 static void	wm_watchdog(struct ifnet *);
    652 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    653 static void	wm_tick(void *);
    654 static int	wm_ifflags_cb(struct ethercom *);
    655 static int	wm_ioctl(struct ifnet *, u_long, void *);
    656 /* MAC address related */
    657 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    658 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    659 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    660 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    661 static void	wm_set_filter(struct wm_softc *);
    662 /* Reset and init related */
    663 static void	wm_set_vlan(struct wm_softc *);
    664 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    665 static void	wm_get_auto_rd_done(struct wm_softc *);
    666 static void	wm_lan_init_done(struct wm_softc *);
    667 static void	wm_get_cfg_done(struct wm_softc *);
    668 static void	wm_initialize_hardware_bits(struct wm_softc *);
    669 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    670 static void	wm_reset_phy(struct wm_softc *);
    671 static void	wm_flush_desc_rings(struct wm_softc *);
    672 static void	wm_reset(struct wm_softc *);
    673 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    674 static void	wm_rxdrain(struct wm_rxqueue *);
    675 static void	wm_rss_getkey(uint8_t *);
    676 static void	wm_init_rss(struct wm_softc *);
    677 static void	wm_adjust_qnum(struct wm_softc *, int);
    678 static int	wm_setup_legacy(struct wm_softc *);
    679 static int	wm_setup_msix(struct wm_softc *);
    680 static int	wm_init(struct ifnet *);
    681 static int	wm_init_locked(struct ifnet *);
    682 static void	wm_turnon(struct wm_softc *);
    683 static void	wm_turnoff(struct wm_softc *);
    684 static void	wm_stop(struct ifnet *, int);
    685 static void	wm_stop_locked(struct ifnet *, int);
    686 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    687 static void	wm_82547_txfifo_stall(void *);
    688 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    689 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    690 /* DMA related */
    691 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    692 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    693 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    694 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    695     struct wm_txqueue *);
    696 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    697 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    698 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    699     struct wm_rxqueue *);
    700 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    701 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    702 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    703 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    704 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    705 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    706 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    707     struct wm_txqueue *);
    708 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    709     struct wm_rxqueue *);
    710 static int	wm_alloc_txrx_queues(struct wm_softc *);
    711 static void	wm_free_txrx_queues(struct wm_softc *);
    712 static int	wm_init_txrx_queues(struct wm_softc *);
    713 /* Start */
    714 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    715     uint32_t *, uint8_t *);
    716 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    717 static void	wm_start(struct ifnet *);
    718 static void	wm_start_locked(struct ifnet *);
    719 static int	wm_transmit(struct ifnet *, struct mbuf *);
    720 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    721 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    722 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    723     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    724 static void	wm_nq_start(struct ifnet *);
    725 static void	wm_nq_start_locked(struct ifnet *);
    726 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    727 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    728 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    729 static void	wm_deferred_start_locked(struct wm_txqueue *);
    730 static void	wm_handle_queue(void *);
    731 /* Interrupt */
    732 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    733 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    734 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    735 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    736 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    737 static void	wm_linkintr(struct wm_softc *, uint32_t);
    738 static int	wm_intr_legacy(void *);
    739 static inline void	wm_txrxintr_disable(struct wm_queue *);
    740 static inline void	wm_txrxintr_enable(struct wm_queue *);
    741 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    742 static int	wm_txrxintr_msix(void *);
    743 static int	wm_linkintr_msix(void *);
    744 
    745 /*
    746  * Media related.
    747  * GMII, SGMII, TBI, SERDES and SFP.
    748  */
    749 /* Common */
    750 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    751 /* GMII related */
    752 static void	wm_gmii_reset(struct wm_softc *);
    753 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    754 static int	wm_get_phy_id_82575(struct wm_softc *);
    755 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    756 static int	wm_gmii_mediachange(struct ifnet *);
    757 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    758 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    759 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    760 static int	wm_gmii_i82543_readreg(device_t, int, int);
    761 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    762 static int	wm_gmii_mdic_readreg(device_t, int, int);
    763 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    764 static int	wm_gmii_i82544_readreg(device_t, int, int);
    765 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    766 static int	wm_gmii_i80003_readreg(device_t, int, int);
    767 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    768 static int	wm_gmii_bm_readreg(device_t, int, int);
    769 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    770 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    771 static int	wm_gmii_hv_readreg(device_t, int, int);
    772 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    773 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    774 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    775 static int	wm_gmii_82580_readreg(device_t, int, int);
    776 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    777 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    778 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    779 static void	wm_gmii_statchg(struct ifnet *);
    780 /*
    781  * kumeran related (80003, ICH* and PCH*).
    782  * These functions are not for accessing MII registers but for accessing
    783  * kumeran specific registers.
    784  */
    785 static int	wm_kmrn_readreg(struct wm_softc *, int);
    786 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    787 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    788 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    789 /* SGMII */
    790 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    791 static int	wm_sgmii_readreg(device_t, int, int);
    792 static void	wm_sgmii_writereg(device_t, int, int, int);
    793 /* TBI related */
    794 static void	wm_tbi_mediainit(struct wm_softc *);
    795 static int	wm_tbi_mediachange(struct ifnet *);
    796 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    797 static int	wm_check_for_link(struct wm_softc *);
    798 static void	wm_tbi_tick(struct wm_softc *);
    799 /* SERDES related */
    800 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    801 static int	wm_serdes_mediachange(struct ifnet *);
    802 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    803 static void	wm_serdes_tick(struct wm_softc *);
    804 /* SFP related */
    805 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    806 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    807 
    808 /*
    809  * NVM related.
    810  * Microwire, SPI (w/wo EERD) and Flash.
    811  */
    812 /* Misc functions */
    813 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    814 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    815 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    816 /* Microwire */
    817 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    818 /* SPI */
    819 static int	wm_nvm_ready_spi(struct wm_softc *);
    820 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    821 /* Using with EERD */
    822 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    823 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    824 /* Flash */
    825 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    826     unsigned int *);
    827 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    828 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    829 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    830 	uint32_t *);
    831 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    832 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    833 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    834 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    835 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    836 /* iNVM */
    837 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    838 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    839 /* Lock, detecting NVM type, validate checksum and read */
    840 static int	wm_nvm_acquire(struct wm_softc *);
    841 static void	wm_nvm_release(struct wm_softc *);
    842 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    843 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    844 static int	wm_nvm_validate_checksum(struct wm_softc *);
    845 static void	wm_nvm_version_invm(struct wm_softc *);
    846 static void	wm_nvm_version(struct wm_softc *);
    847 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    848 
    849 /*
    850  * Hardware semaphores.
    851  * Very complexed...
    852  */
    853 static int	wm_get_null(struct wm_softc *);
    854 static void	wm_put_null(struct wm_softc *);
    855 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    856 static void	wm_put_swsm_semaphore(struct wm_softc *);
    857 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    858 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    859 static int	wm_get_phy_82575(struct wm_softc *);
    860 static void	wm_put_phy_82575(struct wm_softc *);
    861 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    862 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    863 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    864 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    865 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    866 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    867 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    868 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    869 
    870 /*
    871  * Management mode and power management related subroutines.
    872  * BMC, AMT, suspend/resume and EEE.
    873  */
    874 #if 0
    875 static int	wm_check_mng_mode(struct wm_softc *);
    876 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    877 static int	wm_check_mng_mode_82574(struct wm_softc *);
    878 static int	wm_check_mng_mode_generic(struct wm_softc *);
    879 #endif
    880 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    881 static bool	wm_phy_resetisblocked(struct wm_softc *);
    882 static void	wm_get_hw_control(struct wm_softc *);
    883 static void	wm_release_hw_control(struct wm_softc *);
    884 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    885 static void	wm_smbustopci(struct wm_softc *);
    886 static void	wm_init_manageability(struct wm_softc *);
    887 static void	wm_release_manageability(struct wm_softc *);
    888 static void	wm_get_wakeup(struct wm_softc *);
    889 static void	wm_ulp_disable(struct wm_softc *);
    890 static void	wm_enable_phy_wakeup(struct wm_softc *);
    891 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    892 static void	wm_enable_wakeup(struct wm_softc *);
    893 /* LPLU (Low Power Link Up) */
    894 static void	wm_lplu_d0_disable(struct wm_softc *);
    895 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    896 /* EEE */
    897 static void	wm_set_eee_i350(struct wm_softc *);
    898 
    899 /*
    900  * Workarounds (mainly PHY related).
    901  * Basically, PHY's workarounds are in the PHY drivers.
    902  */
    903 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    904 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    905 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    906 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    907 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    908 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    909 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    910 static void	wm_reset_init_script_82575(struct wm_softc *);
    911 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    912 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    913 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    914 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    915 static void	wm_pll_workaround_i210(struct wm_softc *);
    916 
    917 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    918     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    919 
    920 /*
    921  * Devices supported by this driver.
    922  */
    923 static const struct wm_product {
    924 	pci_vendor_id_t		wmp_vendor;
    925 	pci_product_id_t	wmp_product;
    926 	const char		*wmp_name;
    927 	wm_chip_type		wmp_type;
    928 	uint32_t		wmp_flags;
    929 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    930 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    931 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    932 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    933 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    934 } wm_products[] = {
    935 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    936 	  "Intel i82542 1000BASE-X Ethernet",
    937 	  WM_T_82542_2_1,	WMP_F_FIBER },
    938 
    939 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    940 	  "Intel i82543GC 1000BASE-X Ethernet",
    941 	  WM_T_82543,		WMP_F_FIBER },
    942 
    943 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    944 	  "Intel i82543GC 1000BASE-T Ethernet",
    945 	  WM_T_82543,		WMP_F_COPPER },
    946 
    947 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    948 	  "Intel i82544EI 1000BASE-T Ethernet",
    949 	  WM_T_82544,		WMP_F_COPPER },
    950 
    951 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    952 	  "Intel i82544EI 1000BASE-X Ethernet",
    953 	  WM_T_82544,		WMP_F_FIBER },
    954 
    955 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    956 	  "Intel i82544GC 1000BASE-T Ethernet",
    957 	  WM_T_82544,		WMP_F_COPPER },
    958 
    959 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    960 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    961 	  WM_T_82544,		WMP_F_COPPER },
    962 
    963 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    964 	  "Intel i82540EM 1000BASE-T Ethernet",
    965 	  WM_T_82540,		WMP_F_COPPER },
    966 
    967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    968 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    969 	  WM_T_82540,		WMP_F_COPPER },
    970 
    971 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    972 	  "Intel i82540EP 1000BASE-T Ethernet",
    973 	  WM_T_82540,		WMP_F_COPPER },
    974 
    975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    976 	  "Intel i82540EP 1000BASE-T Ethernet",
    977 	  WM_T_82540,		WMP_F_COPPER },
    978 
    979 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    980 	  "Intel i82540EP 1000BASE-T Ethernet",
    981 	  WM_T_82540,		WMP_F_COPPER },
    982 
    983 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    984 	  "Intel i82545EM 1000BASE-T Ethernet",
    985 	  WM_T_82545,		WMP_F_COPPER },
    986 
    987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    988 	  "Intel i82545GM 1000BASE-T Ethernet",
    989 	  WM_T_82545_3,		WMP_F_COPPER },
    990 
    991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    992 	  "Intel i82545GM 1000BASE-X Ethernet",
    993 	  WM_T_82545_3,		WMP_F_FIBER },
    994 
    995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    996 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    997 	  WM_T_82545_3,		WMP_F_SERDES },
    998 
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1000 	  "Intel i82546EB 1000BASE-T Ethernet",
   1001 	  WM_T_82546,		WMP_F_COPPER },
   1002 
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1004 	  "Intel i82546EB 1000BASE-T Ethernet",
   1005 	  WM_T_82546,		WMP_F_COPPER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1008 	  "Intel i82545EM 1000BASE-X Ethernet",
   1009 	  WM_T_82545,		WMP_F_FIBER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1012 	  "Intel i82546EB 1000BASE-X Ethernet",
   1013 	  WM_T_82546,		WMP_F_FIBER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1016 	  "Intel i82546GB 1000BASE-T Ethernet",
   1017 	  WM_T_82546_3,		WMP_F_COPPER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1020 	  "Intel i82546GB 1000BASE-X Ethernet",
   1021 	  WM_T_82546_3,		WMP_F_FIBER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1024 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1025 	  WM_T_82546_3,		WMP_F_SERDES },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1028 	  "i82546GB quad-port Gigabit Ethernet",
   1029 	  WM_T_82546_3,		WMP_F_COPPER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1032 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1033 	  WM_T_82546_3,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1036 	  "Intel PRO/1000MT (82546GB)",
   1037 	  WM_T_82546_3,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1040 	  "Intel i82541EI 1000BASE-T Ethernet",
   1041 	  WM_T_82541,		WMP_F_COPPER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1044 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1045 	  WM_T_82541,		WMP_F_COPPER },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1048 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1049 	  WM_T_82541,		WMP_F_COPPER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1052 	  "Intel i82541ER 1000BASE-T Ethernet",
   1053 	  WM_T_82541_2,		WMP_F_COPPER },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1056 	  "Intel i82541GI 1000BASE-T Ethernet",
   1057 	  WM_T_82541_2,		WMP_F_COPPER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1060 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1061 	  WM_T_82541_2,		WMP_F_COPPER },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1064 	  "Intel i82541PI 1000BASE-T Ethernet",
   1065 	  WM_T_82541_2,		WMP_F_COPPER },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1068 	  "Intel i82547EI 1000BASE-T Ethernet",
   1069 	  WM_T_82547,		WMP_F_COPPER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1072 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1073 	  WM_T_82547,		WMP_F_COPPER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1076 	  "Intel i82547GI 1000BASE-T Ethernet",
   1077 	  WM_T_82547_2,		WMP_F_COPPER },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1080 	  "Intel PRO/1000 PT (82571EB)",
   1081 	  WM_T_82571,		WMP_F_COPPER },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1084 	  "Intel PRO/1000 PF (82571EB)",
   1085 	  WM_T_82571,		WMP_F_FIBER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1088 	  "Intel PRO/1000 PB (82571EB)",
   1089 	  WM_T_82571,		WMP_F_SERDES },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1092 	  "Intel PRO/1000 QT (82571EB)",
   1093 	  WM_T_82571,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1096 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1097 	  WM_T_82571,		WMP_F_COPPER, },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1100 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1101 	  WM_T_82571,		WMP_F_COPPER, },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1104 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1105 	  WM_T_82571,		WMP_F_SERDES, },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1108 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1109 	  WM_T_82571,		WMP_F_SERDES, },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1112 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1113 	  WM_T_82571,		WMP_F_FIBER, },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1116 	  "Intel i82572EI 1000baseT Ethernet",
   1117 	  WM_T_82572,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1120 	  "Intel i82572EI 1000baseX Ethernet",
   1121 	  WM_T_82572,		WMP_F_FIBER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1124 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1125 	  WM_T_82572,		WMP_F_SERDES },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1128 	  "Intel i82572EI 1000baseT Ethernet",
   1129 	  WM_T_82572,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1132 	  "Intel i82573E",
   1133 	  WM_T_82573,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1136 	  "Intel i82573E IAMT",
   1137 	  WM_T_82573,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1140 	  "Intel i82573L Gigabit Ethernet",
   1141 	  WM_T_82573,		WMP_F_COPPER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1144 	  "Intel i82574L",
   1145 	  WM_T_82574,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1148 	  "Intel i82574L",
   1149 	  WM_T_82574,		WMP_F_COPPER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1152 	  "Intel i82583V",
   1153 	  WM_T_82583,		WMP_F_COPPER },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1156 	  "i80003 dual 1000baseT Ethernet",
   1157 	  WM_T_80003,		WMP_F_COPPER },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1160 	  "i80003 dual 1000baseX Ethernet",
   1161 	  WM_T_80003,		WMP_F_COPPER },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1164 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1165 	  WM_T_80003,		WMP_F_SERDES },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1168 	  "Intel i80003 1000baseT Ethernet",
   1169 	  WM_T_80003,		WMP_F_COPPER },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1172 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1173 	  WM_T_80003,		WMP_F_SERDES },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1176 	  "Intel i82801H (M_AMT) LAN Controller",
   1177 	  WM_T_ICH8,		WMP_F_COPPER },
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1179 	  "Intel i82801H (AMT) LAN Controller",
   1180 	  WM_T_ICH8,		WMP_F_COPPER },
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1182 	  "Intel i82801H LAN Controller",
   1183 	  WM_T_ICH8,		WMP_F_COPPER },
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1185 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1186 	  WM_T_ICH8,		WMP_F_COPPER },
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1188 	  "Intel i82801H (M) LAN Controller",
   1189 	  WM_T_ICH8,		WMP_F_COPPER },
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1191 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1192 	  WM_T_ICH8,		WMP_F_COPPER },
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1194 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1195 	  WM_T_ICH8,		WMP_F_COPPER },
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1197 	  "82567V-3 LAN Controller",
   1198 	  WM_T_ICH8,		WMP_F_COPPER },
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1200 	  "82801I (AMT) LAN Controller",
   1201 	  WM_T_ICH9,		WMP_F_COPPER },
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1203 	  "82801I 10/100 LAN Controller",
   1204 	  WM_T_ICH9,		WMP_F_COPPER },
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1206 	  "82801I (G) 10/100 LAN Controller",
   1207 	  WM_T_ICH9,		WMP_F_COPPER },
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1209 	  "82801I (GT) 10/100 LAN Controller",
   1210 	  WM_T_ICH9,		WMP_F_COPPER },
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1212 	  "82801I (C) LAN Controller",
   1213 	  WM_T_ICH9,		WMP_F_COPPER },
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1215 	  "82801I mobile LAN Controller",
   1216 	  WM_T_ICH9,		WMP_F_COPPER },
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1218 	  "82801I mobile (V) LAN Controller",
   1219 	  WM_T_ICH9,		WMP_F_COPPER },
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1221 	  "82801I mobile (AMT) LAN Controller",
   1222 	  WM_T_ICH9,		WMP_F_COPPER },
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1224 	  "82567LM-4 LAN Controller",
   1225 	  WM_T_ICH9,		WMP_F_COPPER },
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1227 	  "82567LM-2 LAN Controller",
   1228 	  WM_T_ICH10,		WMP_F_COPPER },
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1230 	  "82567LF-2 LAN Controller",
   1231 	  WM_T_ICH10,		WMP_F_COPPER },
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1233 	  "82567LM-3 LAN Controller",
   1234 	  WM_T_ICH10,		WMP_F_COPPER },
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1236 	  "82567LF-3 LAN Controller",
   1237 	  WM_T_ICH10,		WMP_F_COPPER },
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1239 	  "82567V-2 LAN Controller",
   1240 	  WM_T_ICH10,		WMP_F_COPPER },
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1242 	  "82567V-3? LAN Controller",
   1243 	  WM_T_ICH10,		WMP_F_COPPER },
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1245 	  "HANKSVILLE LAN Controller",
   1246 	  WM_T_ICH10,		WMP_F_COPPER },
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1248 	  "PCH LAN (82577LM) Controller",
   1249 	  WM_T_PCH,		WMP_F_COPPER },
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1251 	  "PCH LAN (82577LC) Controller",
   1252 	  WM_T_PCH,		WMP_F_COPPER },
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1254 	  "PCH LAN (82578DM) Controller",
   1255 	  WM_T_PCH,		WMP_F_COPPER },
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1257 	  "PCH LAN (82578DC) Controller",
   1258 	  WM_T_PCH,		WMP_F_COPPER },
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1260 	  "PCH2 LAN (82579LM) Controller",
   1261 	  WM_T_PCH2,		WMP_F_COPPER },
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1263 	  "PCH2 LAN (82579V) Controller",
   1264 	  WM_T_PCH2,		WMP_F_COPPER },
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1266 	  "82575EB dual-1000baseT Ethernet",
   1267 	  WM_T_82575,		WMP_F_COPPER },
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1269 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1270 	  WM_T_82575,		WMP_F_SERDES },
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1272 	  "82575GB quad-1000baseT Ethernet",
   1273 	  WM_T_82575,		WMP_F_COPPER },
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1275 	  "82575GB quad-1000baseT Ethernet (PM)",
   1276 	  WM_T_82575,		WMP_F_COPPER },
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1278 	  "82576 1000BaseT Ethernet",
   1279 	  WM_T_82576,		WMP_F_COPPER },
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1281 	  "82576 1000BaseX Ethernet",
   1282 	  WM_T_82576,		WMP_F_FIBER },
   1283 
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1285 	  "82576 gigabit Ethernet (SERDES)",
   1286 	  WM_T_82576,		WMP_F_SERDES },
   1287 
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1289 	  "82576 quad-1000BaseT Ethernet",
   1290 	  WM_T_82576,		WMP_F_COPPER },
   1291 
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1293 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1294 	  WM_T_82576,		WMP_F_COPPER },
   1295 
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1297 	  "82576 gigabit Ethernet",
   1298 	  WM_T_82576,		WMP_F_COPPER },
   1299 
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1301 	  "82576 gigabit Ethernet (SERDES)",
   1302 	  WM_T_82576,		WMP_F_SERDES },
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1304 	  "82576 quad-gigabit Ethernet (SERDES)",
   1305 	  WM_T_82576,		WMP_F_SERDES },
   1306 
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1308 	  "82580 1000BaseT Ethernet",
   1309 	  WM_T_82580,		WMP_F_COPPER },
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1311 	  "82580 1000BaseX Ethernet",
   1312 	  WM_T_82580,		WMP_F_FIBER },
   1313 
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1315 	  "82580 1000BaseT Ethernet (SERDES)",
   1316 	  WM_T_82580,		WMP_F_SERDES },
   1317 
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1319 	  "82580 gigabit Ethernet (SGMII)",
   1320 	  WM_T_82580,		WMP_F_COPPER },
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1322 	  "82580 dual-1000BaseT Ethernet",
   1323 	  WM_T_82580,		WMP_F_COPPER },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1326 	  "82580 quad-1000BaseX Ethernet",
   1327 	  WM_T_82580,		WMP_F_FIBER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1330 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1331 	  WM_T_82580,		WMP_F_COPPER },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1334 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1335 	  WM_T_82580,		WMP_F_SERDES },
   1336 
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1338 	  "DH89XXCC 1000BASE-KX Ethernet",
   1339 	  WM_T_82580,		WMP_F_SERDES },
   1340 
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1342 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1343 	  WM_T_82580,		WMP_F_SERDES },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1346 	  "I350 Gigabit Network Connection",
   1347 	  WM_T_I350,		WMP_F_COPPER },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1350 	  "I350 Gigabit Fiber Network Connection",
   1351 	  WM_T_I350,		WMP_F_FIBER },
   1352 
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1354 	  "I350 Gigabit Backplane Connection",
   1355 	  WM_T_I350,		WMP_F_SERDES },
   1356 
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1358 	  "I350 Quad Port Gigabit Ethernet",
   1359 	  WM_T_I350,		WMP_F_SERDES },
   1360 
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1362 	  "I350 Gigabit Connection",
   1363 	  WM_T_I350,		WMP_F_COPPER },
   1364 
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1366 	  "I354 Gigabit Ethernet (KX)",
   1367 	  WM_T_I354,		WMP_F_SERDES },
   1368 
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1370 	  "I354 Gigabit Ethernet (SGMII)",
   1371 	  WM_T_I354,		WMP_F_COPPER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1374 	  "I354 Gigabit Ethernet (2.5G)",
   1375 	  WM_T_I354,		WMP_F_COPPER },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1378 	  "I210-T1 Ethernet Server Adapter",
   1379 	  WM_T_I210,		WMP_F_COPPER },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1382 	  "I210 Ethernet (Copper OEM)",
   1383 	  WM_T_I210,		WMP_F_COPPER },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1386 	  "I210 Ethernet (Copper IT)",
   1387 	  WM_T_I210,		WMP_F_COPPER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1390 	  "I210 Ethernet (FLASH less)",
   1391 	  WM_T_I210,		WMP_F_COPPER },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1394 	  "I210 Gigabit Ethernet (Fiber)",
   1395 	  WM_T_I210,		WMP_F_FIBER },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1398 	  "I210 Gigabit Ethernet (SERDES)",
   1399 	  WM_T_I210,		WMP_F_SERDES },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1402 	  "I210 Gigabit Ethernet (FLASH less)",
   1403 	  WM_T_I210,		WMP_F_SERDES },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1406 	  "I210 Gigabit Ethernet (SGMII)",
   1407 	  WM_T_I210,		WMP_F_COPPER },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1410 	  "I211 Ethernet (COPPER)",
   1411 	  WM_T_I211,		WMP_F_COPPER },
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1413 	  "I217 V Ethernet Connection",
   1414 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1416 	  "I217 LM Ethernet Connection",
   1417 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1419 	  "I218 V Ethernet Connection",
   1420 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1422 	  "I218 V Ethernet Connection",
   1423 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1425 	  "I218 V Ethernet Connection",
   1426 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1427 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1428 	  "I218 LM Ethernet Connection",
   1429 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1431 	  "I218 LM Ethernet Connection",
   1432 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1434 	  "I218 LM Ethernet Connection",
   1435 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1436 #if 0
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1438 	  "I219 V Ethernet Connection",
   1439 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1441 	  "I219 V Ethernet Connection",
   1442 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1444 	  "I219 V Ethernet Connection",
   1445 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1447 	  "I219 V Ethernet Connection",
   1448 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1450 	  "I219 LM Ethernet Connection",
   1451 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1453 	  "I219 LM Ethernet Connection",
   1454 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1456 	  "I219 LM Ethernet Connection",
   1457 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1459 	  "I219 LM Ethernet Connection",
   1460 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1462 	  "I219 LM Ethernet Connection",
   1463 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1464 #endif
   1465 	{ 0,			0,
   1466 	  NULL,
   1467 	  0,			0 },
   1468 };
   1469 
   1470 /*
   1471  * Register read/write functions.
   1472  * Other than CSR_{READ|WRITE}().
   1473  */
   1474 
   1475 #if 0 /* Not currently used */
   1476 static inline uint32_t
   1477 wm_io_read(struct wm_softc *sc, int reg)
   1478 {
   1479 
   1480 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1481 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1482 }
   1483 #endif
   1484 
   1485 static inline void
   1486 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1487 {
   1488 
   1489 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1490 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1491 }
   1492 
   1493 static inline void
   1494 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1495     uint32_t data)
   1496 {
   1497 	uint32_t regval;
   1498 	int i;
   1499 
   1500 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1501 
   1502 	CSR_WRITE(sc, reg, regval);
   1503 
   1504 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1505 		delay(5);
   1506 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1507 			break;
   1508 	}
   1509 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1510 		aprint_error("%s: WARNING:"
   1511 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1512 		    device_xname(sc->sc_dev), reg);
   1513 	}
   1514 }
   1515 
   1516 static inline void
   1517 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1518 {
   1519 	wa->wa_low = htole32(v & 0xffffffffU);
   1520 	if (sizeof(bus_addr_t) == 8)
   1521 		wa->wa_high = htole32((uint64_t) v >> 32);
   1522 	else
   1523 		wa->wa_high = 0;
   1524 }
   1525 
   1526 /*
   1527  * Descriptor sync/init functions.
   1528  */
   1529 static inline void
   1530 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1531 {
   1532 	struct wm_softc *sc = txq->txq_sc;
   1533 
   1534 	/* If it will wrap around, sync to the end of the ring. */
   1535 	if ((start + num) > WM_NTXDESC(txq)) {
   1536 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1537 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1538 		    (WM_NTXDESC(txq) - start), ops);
   1539 		num -= (WM_NTXDESC(txq) - start);
   1540 		start = 0;
   1541 	}
   1542 
   1543 	/* Now sync whatever is left. */
   1544 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1545 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1546 }
   1547 
   1548 static inline void
   1549 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1550 {
   1551 	struct wm_softc *sc = rxq->rxq_sc;
   1552 
   1553 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1554 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1555 }
   1556 
   1557 static inline void
   1558 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1559 {
   1560 	struct wm_softc *sc = rxq->rxq_sc;
   1561 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1562 	struct mbuf *m = rxs->rxs_mbuf;
   1563 
   1564 	/*
   1565 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1566 	 * so that the payload after the Ethernet header is aligned
   1567 	 * to a 4-byte boundary.
   1568 
   1569 	 * XXX BRAINDAMAGE ALERT!
   1570 	 * The stupid chip uses the same size for every buffer, which
   1571 	 * is set in the Receive Control register.  We are using the 2K
   1572 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1573 	 * reason, we can't "scoot" packets longer than the standard
   1574 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1575 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1576 	 * the upper layer copy the headers.
   1577 	 */
   1578 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1579 
   1580 	if (sc->sc_type == WM_T_82574) {
   1581 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1582 		rxd->erx_data.erxd_addr =
   1583 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1584 		rxd->erx_data.erxd_dd = 0;
   1585 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1586 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1587 
   1588 		rxd->nqrx_data.nrxd_paddr =
   1589 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1590 		/* Currently, split header is not supported. */
   1591 		rxd->nqrx_data.nrxd_haddr = 0;
   1592 	} else {
   1593 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1594 
   1595 		wm_set_dma_addr(&rxd->wrx_addr,
   1596 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1597 		rxd->wrx_len = 0;
   1598 		rxd->wrx_cksum = 0;
   1599 		rxd->wrx_status = 0;
   1600 		rxd->wrx_errors = 0;
   1601 		rxd->wrx_special = 0;
   1602 	}
   1603 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1604 
   1605 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1606 }
   1607 
   1608 /*
   1609  * Device driver interface functions and commonly used functions.
   1610  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1611  */
   1612 
   1613 /* Lookup supported device table */
   1614 static const struct wm_product *
   1615 wm_lookup(const struct pci_attach_args *pa)
   1616 {
   1617 	const struct wm_product *wmp;
   1618 
   1619 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1620 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1621 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1622 			return wmp;
   1623 	}
   1624 	return NULL;
   1625 }
   1626 
   1627 /* The match function (ca_match) */
   1628 static int
   1629 wm_match(device_t parent, cfdata_t cf, void *aux)
   1630 {
   1631 	struct pci_attach_args *pa = aux;
   1632 
   1633 	if (wm_lookup(pa) != NULL)
   1634 		return 1;
   1635 
   1636 	return 0;
   1637 }
   1638 
   1639 /* The attach function (ca_attach) */
   1640 static void
   1641 wm_attach(device_t parent, device_t self, void *aux)
   1642 {
   1643 	struct wm_softc *sc = device_private(self);
   1644 	struct pci_attach_args *pa = aux;
   1645 	prop_dictionary_t dict;
   1646 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1647 	pci_chipset_tag_t pc = pa->pa_pc;
   1648 	int counts[PCI_INTR_TYPE_SIZE];
   1649 	pci_intr_type_t max_type;
   1650 	const char *eetype, *xname;
   1651 	bus_space_tag_t memt;
   1652 	bus_space_handle_t memh;
   1653 	bus_size_t memsize;
   1654 	int memh_valid;
   1655 	int i, error;
   1656 	const struct wm_product *wmp;
   1657 	prop_data_t ea;
   1658 	prop_number_t pn;
   1659 	uint8_t enaddr[ETHER_ADDR_LEN];
   1660 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1661 	pcireg_t preg, memtype;
   1662 	uint16_t eeprom_data, apme_mask;
   1663 	bool force_clear_smbi;
   1664 	uint32_t link_mode;
   1665 	uint32_t reg;
   1666 
   1667 	sc->sc_dev = self;
   1668 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1669 	sc->sc_core_stopping = false;
   1670 
   1671 	wmp = wm_lookup(pa);
   1672 #ifdef DIAGNOSTIC
   1673 	if (wmp == NULL) {
   1674 		printf("\n");
   1675 		panic("wm_attach: impossible");
   1676 	}
   1677 #endif
   1678 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1679 
   1680 	sc->sc_pc = pa->pa_pc;
   1681 	sc->sc_pcitag = pa->pa_tag;
   1682 
   1683 	if (pci_dma64_available(pa))
   1684 		sc->sc_dmat = pa->pa_dmat64;
   1685 	else
   1686 		sc->sc_dmat = pa->pa_dmat;
   1687 
   1688 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1689 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1690 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1691 
   1692 	sc->sc_type = wmp->wmp_type;
   1693 
   1694 	/* Set default function pointers */
   1695 	sc->phy.acquire = wm_get_null;
   1696 	sc->phy.release = wm_put_null;
   1697 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1698 
   1699 	if (sc->sc_type < WM_T_82543) {
   1700 		if (sc->sc_rev < 2) {
   1701 			aprint_error_dev(sc->sc_dev,
   1702 			    "i82542 must be at least rev. 2\n");
   1703 			return;
   1704 		}
   1705 		if (sc->sc_rev < 3)
   1706 			sc->sc_type = WM_T_82542_2_0;
   1707 	}
   1708 
   1709 	/*
   1710 	 * Disable MSI for Errata:
   1711 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1712 	 *
   1713 	 *  82544: Errata 25
   1714 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1715 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1716 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1717 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1718 	 *
   1719 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1720 	 *
   1721 	 *  82571 & 82572: Errata 63
   1722 	 */
   1723 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1724 	    || (sc->sc_type == WM_T_82572))
   1725 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1726 
   1727 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1728 	    || (sc->sc_type == WM_T_82580)
   1729 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1730 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1731 		sc->sc_flags |= WM_F_NEWQUEUE;
   1732 
   1733 	/* Set device properties (mactype) */
   1734 	dict = device_properties(sc->sc_dev);
   1735 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1736 
   1737 	/*
   1738 	 * Map the device.  All devices support memory-mapped acccess,
   1739 	 * and it is really required for normal operation.
   1740 	 */
   1741 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1742 	switch (memtype) {
   1743 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1744 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1745 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1746 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1747 		break;
   1748 	default:
   1749 		memh_valid = 0;
   1750 		break;
   1751 	}
   1752 
   1753 	if (memh_valid) {
   1754 		sc->sc_st = memt;
   1755 		sc->sc_sh = memh;
   1756 		sc->sc_ss = memsize;
   1757 	} else {
   1758 		aprint_error_dev(sc->sc_dev,
   1759 		    "unable to map device registers\n");
   1760 		return;
   1761 	}
   1762 
   1763 	/*
   1764 	 * In addition, i82544 and later support I/O mapped indirect
   1765 	 * register access.  It is not desirable (nor supported in
   1766 	 * this driver) to use it for normal operation, though it is
   1767 	 * required to work around bugs in some chip versions.
   1768 	 */
   1769 	if (sc->sc_type >= WM_T_82544) {
   1770 		/* First we have to find the I/O BAR. */
   1771 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1772 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1773 			if (memtype == PCI_MAPREG_TYPE_IO)
   1774 				break;
   1775 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1776 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1777 				i += 4;	/* skip high bits, too */
   1778 		}
   1779 		if (i < PCI_MAPREG_END) {
   1780 			/*
   1781 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1782 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1783 			 * It's no problem because newer chips has no this
   1784 			 * bug.
   1785 			 *
   1786 			 * The i8254x doesn't apparently respond when the
   1787 			 * I/O BAR is 0, which looks somewhat like it's not
   1788 			 * been configured.
   1789 			 */
   1790 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1791 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1792 				aprint_error_dev(sc->sc_dev,
   1793 				    "WARNING: I/O BAR at zero.\n");
   1794 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1795 					0, &sc->sc_iot, &sc->sc_ioh,
   1796 					NULL, &sc->sc_ios) == 0) {
   1797 				sc->sc_flags |= WM_F_IOH_VALID;
   1798 			} else {
   1799 				aprint_error_dev(sc->sc_dev,
   1800 				    "WARNING: unable to map I/O space\n");
   1801 			}
   1802 		}
   1803 
   1804 	}
   1805 
   1806 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1807 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1808 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1809 	if (sc->sc_type < WM_T_82542_2_1)
   1810 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1811 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1812 
   1813 	/* power up chip */
   1814 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1815 	    NULL)) && error != EOPNOTSUPP) {
   1816 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1817 		return;
   1818 	}
   1819 
   1820 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1821 
   1822 	/* Allocation settings */
   1823 	max_type = PCI_INTR_TYPE_MSIX;
   1824 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1825 	counts[PCI_INTR_TYPE_MSI] = 1;
   1826 	counts[PCI_INTR_TYPE_INTX] = 1;
   1827 
   1828 alloc_retry:
   1829 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1830 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1831 		return;
   1832 	}
   1833 
   1834 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1835 		error = wm_setup_msix(sc);
   1836 		if (error) {
   1837 			pci_intr_release(pc, sc->sc_intrs,
   1838 			    counts[PCI_INTR_TYPE_MSIX]);
   1839 
   1840 			/* Setup for MSI: Disable MSI-X */
   1841 			max_type = PCI_INTR_TYPE_MSI;
   1842 			counts[PCI_INTR_TYPE_MSI] = 1;
   1843 			counts[PCI_INTR_TYPE_INTX] = 1;
   1844 			goto alloc_retry;
   1845 		}
   1846 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1847 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1848 		error = wm_setup_legacy(sc);
   1849 		if (error) {
   1850 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1851 			    counts[PCI_INTR_TYPE_MSI]);
   1852 
   1853 			/* The next try is for INTx: Disable MSI */
   1854 			max_type = PCI_INTR_TYPE_INTX;
   1855 			counts[PCI_INTR_TYPE_INTX] = 1;
   1856 			goto alloc_retry;
   1857 		}
   1858 	} else {
   1859 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1860 		error = wm_setup_legacy(sc);
   1861 		if (error) {
   1862 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1863 			    counts[PCI_INTR_TYPE_INTX]);
   1864 			return;
   1865 		}
   1866 	}
   1867 
   1868 	/*
   1869 	 * Check the function ID (unit number of the chip).
   1870 	 */
   1871 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1872 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1873 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1874 	    || (sc->sc_type == WM_T_82580)
   1875 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1876 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1877 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1878 	else
   1879 		sc->sc_funcid = 0;
   1880 
   1881 	/*
   1882 	 * Determine a few things about the bus we're connected to.
   1883 	 */
   1884 	if (sc->sc_type < WM_T_82543) {
   1885 		/* We don't really know the bus characteristics here. */
   1886 		sc->sc_bus_speed = 33;
   1887 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1888 		/*
   1889 		 * CSA (Communication Streaming Architecture) is about as fast
   1890 		 * a 32-bit 66MHz PCI Bus.
   1891 		 */
   1892 		sc->sc_flags |= WM_F_CSA;
   1893 		sc->sc_bus_speed = 66;
   1894 		aprint_verbose_dev(sc->sc_dev,
   1895 		    "Communication Streaming Architecture\n");
   1896 		if (sc->sc_type == WM_T_82547) {
   1897 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1898 			callout_setfunc(&sc->sc_txfifo_ch,
   1899 					wm_82547_txfifo_stall, sc);
   1900 			aprint_verbose_dev(sc->sc_dev,
   1901 			    "using 82547 Tx FIFO stall work-around\n");
   1902 		}
   1903 	} else if (sc->sc_type >= WM_T_82571) {
   1904 		sc->sc_flags |= WM_F_PCIE;
   1905 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1906 		    && (sc->sc_type != WM_T_ICH10)
   1907 		    && (sc->sc_type != WM_T_PCH)
   1908 		    && (sc->sc_type != WM_T_PCH2)
   1909 		    && (sc->sc_type != WM_T_PCH_LPT)
   1910 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1911 			/* ICH* and PCH* have no PCIe capability registers */
   1912 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1913 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1914 				NULL) == 0)
   1915 				aprint_error_dev(sc->sc_dev,
   1916 				    "unable to find PCIe capability\n");
   1917 		}
   1918 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1919 	} else {
   1920 		reg = CSR_READ(sc, WMREG_STATUS);
   1921 		if (reg & STATUS_BUS64)
   1922 			sc->sc_flags |= WM_F_BUS64;
   1923 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1924 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1925 
   1926 			sc->sc_flags |= WM_F_PCIX;
   1927 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1928 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1929 				aprint_error_dev(sc->sc_dev,
   1930 				    "unable to find PCIX capability\n");
   1931 			else if (sc->sc_type != WM_T_82545_3 &&
   1932 				 sc->sc_type != WM_T_82546_3) {
   1933 				/*
   1934 				 * Work around a problem caused by the BIOS
   1935 				 * setting the max memory read byte count
   1936 				 * incorrectly.
   1937 				 */
   1938 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1939 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1940 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1941 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1942 
   1943 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1944 				    PCIX_CMD_BYTECNT_SHIFT;
   1945 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1946 				    PCIX_STATUS_MAXB_SHIFT;
   1947 				if (bytecnt > maxb) {
   1948 					aprint_verbose_dev(sc->sc_dev,
   1949 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1950 					    512 << bytecnt, 512 << maxb);
   1951 					pcix_cmd = (pcix_cmd &
   1952 					    ~PCIX_CMD_BYTECNT_MASK) |
   1953 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1954 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1955 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1956 					    pcix_cmd);
   1957 				}
   1958 			}
   1959 		}
   1960 		/*
   1961 		 * The quad port adapter is special; it has a PCIX-PCIX
   1962 		 * bridge on the board, and can run the secondary bus at
   1963 		 * a higher speed.
   1964 		 */
   1965 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1966 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1967 								      : 66;
   1968 		} else if (sc->sc_flags & WM_F_PCIX) {
   1969 			switch (reg & STATUS_PCIXSPD_MASK) {
   1970 			case STATUS_PCIXSPD_50_66:
   1971 				sc->sc_bus_speed = 66;
   1972 				break;
   1973 			case STATUS_PCIXSPD_66_100:
   1974 				sc->sc_bus_speed = 100;
   1975 				break;
   1976 			case STATUS_PCIXSPD_100_133:
   1977 				sc->sc_bus_speed = 133;
   1978 				break;
   1979 			default:
   1980 				aprint_error_dev(sc->sc_dev,
   1981 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1982 				    reg & STATUS_PCIXSPD_MASK);
   1983 				sc->sc_bus_speed = 66;
   1984 				break;
   1985 			}
   1986 		} else
   1987 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1988 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1989 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1990 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1991 	}
   1992 
   1993 	/* clear interesting stat counters */
   1994 	CSR_READ(sc, WMREG_COLC);
   1995 	CSR_READ(sc, WMREG_RXERRC);
   1996 
   1997 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   1998 	    || (sc->sc_type >= WM_T_ICH8))
   1999 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2000 	if (sc->sc_type >= WM_T_ICH8)
   2001 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2002 
   2003 	/* Set PHY, NVM mutex related stuff */
   2004 	switch (sc->sc_type) {
   2005 	case WM_T_82542_2_0:
   2006 	case WM_T_82542_2_1:
   2007 	case WM_T_82543:
   2008 	case WM_T_82544:
   2009 		/* Microwire */
   2010 		sc->sc_nvm_wordsize = 64;
   2011 		sc->sc_nvm_addrbits = 6;
   2012 		break;
   2013 	case WM_T_82540:
   2014 	case WM_T_82545:
   2015 	case WM_T_82545_3:
   2016 	case WM_T_82546:
   2017 	case WM_T_82546_3:
   2018 		/* Microwire */
   2019 		reg = CSR_READ(sc, WMREG_EECD);
   2020 		if (reg & EECD_EE_SIZE) {
   2021 			sc->sc_nvm_wordsize = 256;
   2022 			sc->sc_nvm_addrbits = 8;
   2023 		} else {
   2024 			sc->sc_nvm_wordsize = 64;
   2025 			sc->sc_nvm_addrbits = 6;
   2026 		}
   2027 		sc->sc_flags |= WM_F_LOCK_EECD;
   2028 		break;
   2029 	case WM_T_82541:
   2030 	case WM_T_82541_2:
   2031 	case WM_T_82547:
   2032 	case WM_T_82547_2:
   2033 		sc->sc_flags |= WM_F_LOCK_EECD;
   2034 		reg = CSR_READ(sc, WMREG_EECD);
   2035 		if (reg & EECD_EE_TYPE) {
   2036 			/* SPI */
   2037 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2038 			wm_nvm_set_addrbits_size_eecd(sc);
   2039 		} else {
   2040 			/* Microwire */
   2041 			if ((reg & EECD_EE_ABITS) != 0) {
   2042 				sc->sc_nvm_wordsize = 256;
   2043 				sc->sc_nvm_addrbits = 8;
   2044 			} else {
   2045 				sc->sc_nvm_wordsize = 64;
   2046 				sc->sc_nvm_addrbits = 6;
   2047 			}
   2048 		}
   2049 		break;
   2050 	case WM_T_82571:
   2051 	case WM_T_82572:
   2052 		/* SPI */
   2053 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2054 		wm_nvm_set_addrbits_size_eecd(sc);
   2055 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2056 		sc->phy.acquire = wm_get_swsm_semaphore;
   2057 		sc->phy.release = wm_put_swsm_semaphore;
   2058 		break;
   2059 	case WM_T_82573:
   2060 	case WM_T_82574:
   2061 	case WM_T_82583:
   2062 		if (sc->sc_type == WM_T_82573) {
   2063 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2064 			sc->phy.acquire = wm_get_swsm_semaphore;
   2065 			sc->phy.release = wm_put_swsm_semaphore;
   2066 		} else {
   2067 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2068 			/* Both PHY and NVM use the same semaphore. */
   2069 			sc->phy.acquire
   2070 			    = wm_get_swfwhw_semaphore;
   2071 			sc->phy.release
   2072 			    = wm_put_swfwhw_semaphore;
   2073 		}
   2074 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2075 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2076 			sc->sc_nvm_wordsize = 2048;
   2077 		} else {
   2078 			/* SPI */
   2079 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2080 			wm_nvm_set_addrbits_size_eecd(sc);
   2081 		}
   2082 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2083 		break;
   2084 	case WM_T_82575:
   2085 	case WM_T_82576:
   2086 	case WM_T_82580:
   2087 	case WM_T_I350:
   2088 	case WM_T_I354:
   2089 	case WM_T_80003:
   2090 		/* SPI */
   2091 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2092 		wm_nvm_set_addrbits_size_eecd(sc);
   2093 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2094 		    | WM_F_LOCK_SWSM;
   2095 		sc->phy.acquire = wm_get_phy_82575;
   2096 		sc->phy.release = wm_put_phy_82575;
   2097 		break;
   2098 	case WM_T_ICH8:
   2099 	case WM_T_ICH9:
   2100 	case WM_T_ICH10:
   2101 	case WM_T_PCH:
   2102 	case WM_T_PCH2:
   2103 	case WM_T_PCH_LPT:
   2104 		/* FLASH */
   2105 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2106 		sc->sc_nvm_wordsize = 2048;
   2107 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2108 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2109 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2110 			aprint_error_dev(sc->sc_dev,
   2111 			    "can't map FLASH registers\n");
   2112 			goto out;
   2113 		}
   2114 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2115 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2116 		    ICH_FLASH_SECTOR_SIZE;
   2117 		sc->sc_ich8_flash_bank_size =
   2118 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2119 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2120 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2121 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2122 		sc->sc_flashreg_offset = 0;
   2123 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2124 		sc->phy.release = wm_put_swflag_ich8lan;
   2125 		break;
   2126 	case WM_T_PCH_SPT:
   2127 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2128 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2129 		sc->sc_flasht = sc->sc_st;
   2130 		sc->sc_flashh = sc->sc_sh;
   2131 		sc->sc_ich8_flash_base = 0;
   2132 		sc->sc_nvm_wordsize =
   2133 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2134 			* NVM_SIZE_MULTIPLIER;
   2135 		/* It is size in bytes, we want words */
   2136 		sc->sc_nvm_wordsize /= 2;
   2137 		/* assume 2 banks */
   2138 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2139 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2140 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2141 		sc->phy.release = wm_put_swflag_ich8lan;
   2142 		break;
   2143 	case WM_T_I210:
   2144 	case WM_T_I211:
   2145 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2146 			wm_nvm_set_addrbits_size_eecd(sc);
   2147 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2148 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2149 		} else {
   2150 			sc->sc_nvm_wordsize = INVM_SIZE;
   2151 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2152 		}
   2153 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2154 		sc->phy.acquire = wm_get_phy_82575;
   2155 		sc->phy.release = wm_put_phy_82575;
   2156 		break;
   2157 	default:
   2158 		break;
   2159 	}
   2160 
   2161 	/* Reset the chip to a known state. */
   2162 	wm_reset(sc);
   2163 
   2164 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2165 	switch (sc->sc_type) {
   2166 	case WM_T_82571:
   2167 	case WM_T_82572:
   2168 		reg = CSR_READ(sc, WMREG_SWSM2);
   2169 		if ((reg & SWSM2_LOCK) == 0) {
   2170 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2171 			force_clear_smbi = true;
   2172 		} else
   2173 			force_clear_smbi = false;
   2174 		break;
   2175 	case WM_T_82573:
   2176 	case WM_T_82574:
   2177 	case WM_T_82583:
   2178 		force_clear_smbi = true;
   2179 		break;
   2180 	default:
   2181 		force_clear_smbi = false;
   2182 		break;
   2183 	}
   2184 	if (force_clear_smbi) {
   2185 		reg = CSR_READ(sc, WMREG_SWSM);
   2186 		if ((reg & SWSM_SMBI) != 0)
   2187 			aprint_error_dev(sc->sc_dev,
   2188 			    "Please update the Bootagent\n");
   2189 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2190 	}
   2191 
   2192 	/*
   2193 	 * Defer printing the EEPROM type until after verifying the checksum
   2194 	 * This allows the EEPROM type to be printed correctly in the case
   2195 	 * that no EEPROM is attached.
   2196 	 */
   2197 	/*
   2198 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2199 	 * this for later, so we can fail future reads from the EEPROM.
   2200 	 */
   2201 	if (wm_nvm_validate_checksum(sc)) {
   2202 		/*
   2203 		 * Read twice again because some PCI-e parts fail the
   2204 		 * first check due to the link being in sleep state.
   2205 		 */
   2206 		if (wm_nvm_validate_checksum(sc))
   2207 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2208 	}
   2209 
   2210 	/* Set device properties (macflags) */
   2211 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2212 
   2213 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2214 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2215 	else {
   2216 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2217 		    sc->sc_nvm_wordsize);
   2218 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2219 			aprint_verbose("iNVM");
   2220 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2221 			aprint_verbose("FLASH(HW)");
   2222 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2223 			aprint_verbose("FLASH");
   2224 		else {
   2225 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2226 				eetype = "SPI";
   2227 			else
   2228 				eetype = "MicroWire";
   2229 			aprint_verbose("(%d address bits) %s EEPROM",
   2230 			    sc->sc_nvm_addrbits, eetype);
   2231 		}
   2232 	}
   2233 	wm_nvm_version(sc);
   2234 	aprint_verbose("\n");
   2235 
   2236 	/* Check for I21[01] PLL workaround */
   2237 	if (sc->sc_type == WM_T_I210)
   2238 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2239 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2240 		/* NVM image release 3.25 has a workaround */
   2241 		if ((sc->sc_nvm_ver_major < 3)
   2242 		    || ((sc->sc_nvm_ver_major == 3)
   2243 			&& (sc->sc_nvm_ver_minor < 25))) {
   2244 			aprint_verbose_dev(sc->sc_dev,
   2245 			    "ROM image version %d.%d is older than 3.25\n",
   2246 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2247 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2248 		}
   2249 	}
   2250 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2251 		wm_pll_workaround_i210(sc);
   2252 
   2253 	wm_get_wakeup(sc);
   2254 
   2255 	/* Non-AMT based hardware can now take control from firmware */
   2256 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2257 		wm_get_hw_control(sc);
   2258 
   2259 	/*
   2260 	 * Read the Ethernet address from the EEPROM, if not first found
   2261 	 * in device properties.
   2262 	 */
   2263 	ea = prop_dictionary_get(dict, "mac-address");
   2264 	if (ea != NULL) {
   2265 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2266 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2267 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2268 	} else {
   2269 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2270 			aprint_error_dev(sc->sc_dev,
   2271 			    "unable to read Ethernet address\n");
   2272 			goto out;
   2273 		}
   2274 	}
   2275 
   2276 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2277 	    ether_sprintf(enaddr));
   2278 
   2279 	/*
   2280 	 * Read the config info from the EEPROM, and set up various
   2281 	 * bits in the control registers based on their contents.
   2282 	 */
   2283 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2284 	if (pn != NULL) {
   2285 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2286 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2287 	} else {
   2288 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2289 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2290 			goto out;
   2291 		}
   2292 	}
   2293 
   2294 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2295 	if (pn != NULL) {
   2296 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2297 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2298 	} else {
   2299 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2300 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2301 			goto out;
   2302 		}
   2303 	}
   2304 
   2305 	/* check for WM_F_WOL */
   2306 	switch (sc->sc_type) {
   2307 	case WM_T_82542_2_0:
   2308 	case WM_T_82542_2_1:
   2309 	case WM_T_82543:
   2310 		/* dummy? */
   2311 		eeprom_data = 0;
   2312 		apme_mask = NVM_CFG3_APME;
   2313 		break;
   2314 	case WM_T_82544:
   2315 		apme_mask = NVM_CFG2_82544_APM_EN;
   2316 		eeprom_data = cfg2;
   2317 		break;
   2318 	case WM_T_82546:
   2319 	case WM_T_82546_3:
   2320 	case WM_T_82571:
   2321 	case WM_T_82572:
   2322 	case WM_T_82573:
   2323 	case WM_T_82574:
   2324 	case WM_T_82583:
   2325 	case WM_T_80003:
   2326 	default:
   2327 		apme_mask = NVM_CFG3_APME;
   2328 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2329 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2330 		break;
   2331 	case WM_T_82575:
   2332 	case WM_T_82576:
   2333 	case WM_T_82580:
   2334 	case WM_T_I350:
   2335 	case WM_T_I354: /* XXX ok? */
   2336 	case WM_T_ICH8:
   2337 	case WM_T_ICH9:
   2338 	case WM_T_ICH10:
   2339 	case WM_T_PCH:
   2340 	case WM_T_PCH2:
   2341 	case WM_T_PCH_LPT:
   2342 	case WM_T_PCH_SPT:
   2343 		/* XXX The funcid should be checked on some devices */
   2344 		apme_mask = WUC_APME;
   2345 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2346 		break;
   2347 	}
   2348 
   2349 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2350 	if ((eeprom_data & apme_mask) != 0)
   2351 		sc->sc_flags |= WM_F_WOL;
   2352 #ifdef WM_DEBUG
   2353 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2354 		printf("WOL\n");
   2355 #endif
   2356 
   2357 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2358 		/* Check NVM for autonegotiation */
   2359 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2360 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2361 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2362 		}
   2363 	}
   2364 
   2365 	/*
   2366 	 * XXX need special handling for some multiple port cards
   2367 	 * to disable a paticular port.
   2368 	 */
   2369 
   2370 	if (sc->sc_type >= WM_T_82544) {
   2371 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2372 		if (pn != NULL) {
   2373 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2374 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2375 		} else {
   2376 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2377 				aprint_error_dev(sc->sc_dev,
   2378 				    "unable to read SWDPIN\n");
   2379 				goto out;
   2380 			}
   2381 		}
   2382 	}
   2383 
   2384 	if (cfg1 & NVM_CFG1_ILOS)
   2385 		sc->sc_ctrl |= CTRL_ILOS;
   2386 
   2387 	/*
   2388 	 * XXX
   2389 	 * This code isn't correct because pin 2 and 3 are located
   2390 	 * in different position on newer chips. Check all datasheet.
   2391 	 *
   2392 	 * Until resolve this problem, check if a chip < 82580
   2393 	 */
   2394 	if (sc->sc_type <= WM_T_82580) {
   2395 		if (sc->sc_type >= WM_T_82544) {
   2396 			sc->sc_ctrl |=
   2397 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2398 			    CTRL_SWDPIO_SHIFT;
   2399 			sc->sc_ctrl |=
   2400 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2401 			    CTRL_SWDPINS_SHIFT;
   2402 		} else {
   2403 			sc->sc_ctrl |=
   2404 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2405 			    CTRL_SWDPIO_SHIFT;
   2406 		}
   2407 	}
   2408 
   2409 	/* XXX For other than 82580? */
   2410 	if (sc->sc_type == WM_T_82580) {
   2411 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2412 		if (nvmword & __BIT(13))
   2413 			sc->sc_ctrl |= CTRL_ILOS;
   2414 	}
   2415 
   2416 #if 0
   2417 	if (sc->sc_type >= WM_T_82544) {
   2418 		if (cfg1 & NVM_CFG1_IPS0)
   2419 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2420 		if (cfg1 & NVM_CFG1_IPS1)
   2421 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2422 		sc->sc_ctrl_ext |=
   2423 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2424 		    CTRL_EXT_SWDPIO_SHIFT;
   2425 		sc->sc_ctrl_ext |=
   2426 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2427 		    CTRL_EXT_SWDPINS_SHIFT;
   2428 	} else {
   2429 		sc->sc_ctrl_ext |=
   2430 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2431 		    CTRL_EXT_SWDPIO_SHIFT;
   2432 	}
   2433 #endif
   2434 
   2435 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2436 #if 0
   2437 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2438 #endif
   2439 
   2440 	if (sc->sc_type == WM_T_PCH) {
   2441 		uint16_t val;
   2442 
   2443 		/* Save the NVM K1 bit setting */
   2444 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2445 
   2446 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2447 			sc->sc_nvm_k1_enabled = 1;
   2448 		else
   2449 			sc->sc_nvm_k1_enabled = 0;
   2450 	}
   2451 
   2452 	/*
   2453 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2454 	 * media structures accordingly.
   2455 	 */
   2456 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2457 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2458 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2459 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2460 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2461 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2462 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2463 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2464 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2465 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2466 	    || (sc->sc_type ==WM_T_I211)) {
   2467 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2468 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2469 		switch (link_mode) {
   2470 		case CTRL_EXT_LINK_MODE_1000KX:
   2471 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2472 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2473 			break;
   2474 		case CTRL_EXT_LINK_MODE_SGMII:
   2475 			if (wm_sgmii_uses_mdio(sc)) {
   2476 				aprint_verbose_dev(sc->sc_dev,
   2477 				    "SGMII(MDIO)\n");
   2478 				sc->sc_flags |= WM_F_SGMII;
   2479 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2480 				break;
   2481 			}
   2482 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2483 			/*FALLTHROUGH*/
   2484 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2485 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2486 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2487 				if (link_mode
   2488 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2489 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2490 					sc->sc_flags |= WM_F_SGMII;
   2491 				} else {
   2492 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2493 					aprint_verbose_dev(sc->sc_dev,
   2494 					    "SERDES\n");
   2495 				}
   2496 				break;
   2497 			}
   2498 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2499 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2500 
   2501 			/* Change current link mode setting */
   2502 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2503 			switch (sc->sc_mediatype) {
   2504 			case WM_MEDIATYPE_COPPER:
   2505 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2506 				break;
   2507 			case WM_MEDIATYPE_SERDES:
   2508 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2509 				break;
   2510 			default:
   2511 				break;
   2512 			}
   2513 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2514 			break;
   2515 		case CTRL_EXT_LINK_MODE_GMII:
   2516 		default:
   2517 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2518 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2519 			break;
   2520 		}
   2521 
   2522 		reg &= ~CTRL_EXT_I2C_ENA;
   2523 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2524 			reg |= CTRL_EXT_I2C_ENA;
   2525 		else
   2526 			reg &= ~CTRL_EXT_I2C_ENA;
   2527 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2528 
   2529 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2530 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2531 		else
   2532 			wm_tbi_mediainit(sc);
   2533 	} else if (sc->sc_type < WM_T_82543 ||
   2534 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2535 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2536 			aprint_error_dev(sc->sc_dev,
   2537 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2538 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2539 		}
   2540 		wm_tbi_mediainit(sc);
   2541 	} else {
   2542 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2543 			aprint_error_dev(sc->sc_dev,
   2544 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2545 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2546 		}
   2547 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2548 	}
   2549 
   2550 	ifp = &sc->sc_ethercom.ec_if;
   2551 	xname = device_xname(sc->sc_dev);
   2552 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2553 	ifp->if_softc = sc;
   2554 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2555 #ifdef WM_MPSAFE
   2556 	ifp->if_extflags = IFEF_START_MPSAFE;
   2557 #endif
   2558 	ifp->if_ioctl = wm_ioctl;
   2559 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2560 		ifp->if_start = wm_nq_start;
   2561 		if (sc->sc_nqueues > 1)
   2562 			ifp->if_transmit = wm_nq_transmit;
   2563 	} else {
   2564 		ifp->if_start = wm_start;
   2565 		if (sc->sc_nqueues > 1)
   2566 			ifp->if_transmit = wm_transmit;
   2567 	}
   2568 	ifp->if_watchdog = wm_watchdog;
   2569 	ifp->if_init = wm_init;
   2570 	ifp->if_stop = wm_stop;
   2571 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2572 	IFQ_SET_READY(&ifp->if_snd);
   2573 
   2574 	/* Check for jumbo frame */
   2575 	switch (sc->sc_type) {
   2576 	case WM_T_82573:
   2577 		/* XXX limited to 9234 if ASPM is disabled */
   2578 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2579 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2580 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2581 		break;
   2582 	case WM_T_82571:
   2583 	case WM_T_82572:
   2584 	case WM_T_82574:
   2585 	case WM_T_82575:
   2586 	case WM_T_82576:
   2587 	case WM_T_82580:
   2588 	case WM_T_I350:
   2589 	case WM_T_I354: /* XXXX ok? */
   2590 	case WM_T_I210:
   2591 	case WM_T_I211:
   2592 	case WM_T_80003:
   2593 	case WM_T_ICH9:
   2594 	case WM_T_ICH10:
   2595 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2596 	case WM_T_PCH_LPT:
   2597 	case WM_T_PCH_SPT:
   2598 		/* XXX limited to 9234 */
   2599 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2600 		break;
   2601 	case WM_T_PCH:
   2602 		/* XXX limited to 4096 */
   2603 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2604 		break;
   2605 	case WM_T_82542_2_0:
   2606 	case WM_T_82542_2_1:
   2607 	case WM_T_82583:
   2608 	case WM_T_ICH8:
   2609 		/* No support for jumbo frame */
   2610 		break;
   2611 	default:
   2612 		/* ETHER_MAX_LEN_JUMBO */
   2613 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2614 		break;
   2615 	}
   2616 
   2617 	/* If we're a i82543 or greater, we can support VLANs. */
   2618 	if (sc->sc_type >= WM_T_82543)
   2619 		sc->sc_ethercom.ec_capabilities |=
   2620 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2621 
   2622 	/*
   2623 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2624 	 * on i82543 and later.
   2625 	 */
   2626 	if (sc->sc_type >= WM_T_82543) {
   2627 		ifp->if_capabilities |=
   2628 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2629 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2630 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2631 		    IFCAP_CSUM_TCPv6_Tx |
   2632 		    IFCAP_CSUM_UDPv6_Tx;
   2633 	}
   2634 
   2635 	/*
   2636 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2637 	 *
   2638 	 *	82541GI (8086:1076) ... no
   2639 	 *	82572EI (8086:10b9) ... yes
   2640 	 */
   2641 	if (sc->sc_type >= WM_T_82571) {
   2642 		ifp->if_capabilities |=
   2643 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2644 	}
   2645 
   2646 	/*
   2647 	 * If we're a i82544 or greater (except i82547), we can do
   2648 	 * TCP segmentation offload.
   2649 	 */
   2650 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2651 		ifp->if_capabilities |= IFCAP_TSOv4;
   2652 	}
   2653 
   2654 	if (sc->sc_type >= WM_T_82571) {
   2655 		ifp->if_capabilities |= IFCAP_TSOv6;
   2656 	}
   2657 
   2658 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2659 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2660 
   2661 #ifdef WM_MPSAFE
   2662 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2663 #else
   2664 	sc->sc_core_lock = NULL;
   2665 #endif
   2666 
   2667 	/* Attach the interface. */
   2668 	if_initialize(ifp);
   2669 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2670 	ether_ifattach(ifp, enaddr);
   2671 	if_register(ifp);
   2672 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2673 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2674 			  RND_FLAG_DEFAULT);
   2675 
   2676 #ifdef WM_EVENT_COUNTERS
   2677 	/* Attach event counters. */
   2678 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2679 	    NULL, xname, "linkintr");
   2680 
   2681 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2682 	    NULL, xname, "tx_xoff");
   2683 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2684 	    NULL, xname, "tx_xon");
   2685 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2686 	    NULL, xname, "rx_xoff");
   2687 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2688 	    NULL, xname, "rx_xon");
   2689 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2690 	    NULL, xname, "rx_macctl");
   2691 #endif /* WM_EVENT_COUNTERS */
   2692 
   2693 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2694 		pmf_class_network_register(self, ifp);
   2695 	else
   2696 		aprint_error_dev(self, "couldn't establish power handler\n");
   2697 
   2698 	sc->sc_flags |= WM_F_ATTACHED;
   2699  out:
   2700 	return;
   2701 }
   2702 
   2703 /* The detach function (ca_detach) */
   2704 static int
   2705 wm_detach(device_t self, int flags __unused)
   2706 {
   2707 	struct wm_softc *sc = device_private(self);
   2708 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2709 	int i;
   2710 
   2711 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2712 		return 0;
   2713 
   2714 	/* Stop the interface. Callouts are stopped in it. */
   2715 	wm_stop(ifp, 1);
   2716 
   2717 	pmf_device_deregister(self);
   2718 
   2719 #ifdef WM_EVENT_COUNTERS
   2720 	evcnt_detach(&sc->sc_ev_linkintr);
   2721 
   2722 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2723 	evcnt_detach(&sc->sc_ev_tx_xon);
   2724 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2725 	evcnt_detach(&sc->sc_ev_rx_xon);
   2726 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2727 #endif /* WM_EVENT_COUNTERS */
   2728 
   2729 	/* Tell the firmware about the release */
   2730 	WM_CORE_LOCK(sc);
   2731 	wm_release_manageability(sc);
   2732 	wm_release_hw_control(sc);
   2733 	wm_enable_wakeup(sc);
   2734 	WM_CORE_UNLOCK(sc);
   2735 
   2736 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2737 
   2738 	/* Delete all remaining media. */
   2739 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2740 
   2741 	ether_ifdetach(ifp);
   2742 	if_detach(ifp);
   2743 	if_percpuq_destroy(sc->sc_ipq);
   2744 
   2745 	/* Unload RX dmamaps and free mbufs */
   2746 	for (i = 0; i < sc->sc_nqueues; i++) {
   2747 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2748 		mutex_enter(rxq->rxq_lock);
   2749 		wm_rxdrain(rxq);
   2750 		mutex_exit(rxq->rxq_lock);
   2751 	}
   2752 	/* Must unlock here */
   2753 
   2754 	/* Disestablish the interrupt handler */
   2755 	for (i = 0; i < sc->sc_nintrs; i++) {
   2756 		if (sc->sc_ihs[i] != NULL) {
   2757 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2758 			sc->sc_ihs[i] = NULL;
   2759 		}
   2760 	}
   2761 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2762 
   2763 	wm_free_txrx_queues(sc);
   2764 
   2765 	/* Unmap the registers */
   2766 	if (sc->sc_ss) {
   2767 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2768 		sc->sc_ss = 0;
   2769 	}
   2770 	if (sc->sc_ios) {
   2771 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2772 		sc->sc_ios = 0;
   2773 	}
   2774 	if (sc->sc_flashs) {
   2775 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2776 		sc->sc_flashs = 0;
   2777 	}
   2778 
   2779 	if (sc->sc_core_lock)
   2780 		mutex_obj_free(sc->sc_core_lock);
   2781 	if (sc->sc_ich_phymtx)
   2782 		mutex_obj_free(sc->sc_ich_phymtx);
   2783 	if (sc->sc_ich_nvmmtx)
   2784 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2785 
   2786 	return 0;
   2787 }
   2788 
   2789 static bool
   2790 wm_suspend(device_t self, const pmf_qual_t *qual)
   2791 {
   2792 	struct wm_softc *sc = device_private(self);
   2793 
   2794 	wm_release_manageability(sc);
   2795 	wm_release_hw_control(sc);
   2796 	wm_enable_wakeup(sc);
   2797 
   2798 	return true;
   2799 }
   2800 
   2801 static bool
   2802 wm_resume(device_t self, const pmf_qual_t *qual)
   2803 {
   2804 	struct wm_softc *sc = device_private(self);
   2805 
   2806 	wm_init_manageability(sc);
   2807 
   2808 	return true;
   2809 }
   2810 
   2811 /*
   2812  * wm_watchdog:		[ifnet interface function]
   2813  *
   2814  *	Watchdog timer handler.
   2815  */
   2816 static void
   2817 wm_watchdog(struct ifnet *ifp)
   2818 {
   2819 	int qid;
   2820 	struct wm_softc *sc = ifp->if_softc;
   2821 
   2822 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2823 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2824 
   2825 		wm_watchdog_txq(ifp, txq);
   2826 	}
   2827 
   2828 	/* Reset the interface. */
   2829 	(void) wm_init(ifp);
   2830 
   2831 	/*
   2832 	 * There are still some upper layer processing which call
   2833 	 * ifp->if_start(). e.g. ALTQ
   2834 	 */
   2835 	/* Try to get more packets going. */
   2836 	ifp->if_start(ifp);
   2837 }
   2838 
   2839 static void
   2840 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2841 {
   2842 	struct wm_softc *sc = ifp->if_softc;
   2843 
   2844 	/*
   2845 	 * Since we're using delayed interrupts, sweep up
   2846 	 * before we report an error.
   2847 	 */
   2848 	mutex_enter(txq->txq_lock);
   2849 	wm_txeof(sc, txq);
   2850 	mutex_exit(txq->txq_lock);
   2851 
   2852 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2853 #ifdef WM_DEBUG
   2854 		int i, j;
   2855 		struct wm_txsoft *txs;
   2856 #endif
   2857 		log(LOG_ERR,
   2858 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2859 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2860 		    txq->txq_next);
   2861 		ifp->if_oerrors++;
   2862 #ifdef WM_DEBUG
   2863 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2864 		    i = WM_NEXTTXS(txq, i)) {
   2865 		    txs = &txq->txq_soft[i];
   2866 		    printf("txs %d tx %d -> %d\n",
   2867 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2868 		    for (j = txs->txs_firstdesc; ;
   2869 			j = WM_NEXTTX(txq, j)) {
   2870 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2871 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2872 			printf("\t %#08x%08x\n",
   2873 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2874 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2875 			if (j == txs->txs_lastdesc)
   2876 				break;
   2877 			}
   2878 		}
   2879 #endif
   2880 	}
   2881 }
   2882 
   2883 /*
   2884  * wm_tick:
   2885  *
   2886  *	One second timer, used to check link status, sweep up
   2887  *	completed transmit jobs, etc.
   2888  */
   2889 static void
   2890 wm_tick(void *arg)
   2891 {
   2892 	struct wm_softc *sc = arg;
   2893 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2894 #ifndef WM_MPSAFE
   2895 	int s = splnet();
   2896 #endif
   2897 
   2898 	WM_CORE_LOCK(sc);
   2899 
   2900 	if (sc->sc_core_stopping)
   2901 		goto out;
   2902 
   2903 	if (sc->sc_type >= WM_T_82542_2_1) {
   2904 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2905 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2906 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2907 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2908 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2909 	}
   2910 
   2911 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2912 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2913 	    + CSR_READ(sc, WMREG_CRCERRS)
   2914 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2915 	    + CSR_READ(sc, WMREG_SYMERRC)
   2916 	    + CSR_READ(sc, WMREG_RXERRC)
   2917 	    + CSR_READ(sc, WMREG_SEC)
   2918 	    + CSR_READ(sc, WMREG_CEXTERR)
   2919 	    + CSR_READ(sc, WMREG_RLEC);
   2920 	/*
   2921 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2922 	 * memory. It does not mean the number of dropped packet. Because
   2923 	 * ethernet controller can receive packets in such case if there is
   2924 	 * space in phy's FIFO.
   2925 	 *
   2926 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2927 	 * own EVCNT instead of if_iqdrops.
   2928 	 */
   2929 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2930 
   2931 	if (sc->sc_flags & WM_F_HAS_MII)
   2932 		mii_tick(&sc->sc_mii);
   2933 	else if ((sc->sc_type >= WM_T_82575)
   2934 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2935 		wm_serdes_tick(sc);
   2936 	else
   2937 		wm_tbi_tick(sc);
   2938 
   2939 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2940 out:
   2941 	WM_CORE_UNLOCK(sc);
   2942 #ifndef WM_MPSAFE
   2943 	splx(s);
   2944 #endif
   2945 }
   2946 
   2947 static int
   2948 wm_ifflags_cb(struct ethercom *ec)
   2949 {
   2950 	struct ifnet *ifp = &ec->ec_if;
   2951 	struct wm_softc *sc = ifp->if_softc;
   2952 	int rc = 0;
   2953 
   2954 	WM_CORE_LOCK(sc);
   2955 
   2956 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2957 	sc->sc_if_flags = ifp->if_flags;
   2958 
   2959 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2960 		rc = ENETRESET;
   2961 		goto out;
   2962 	}
   2963 
   2964 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2965 		wm_set_filter(sc);
   2966 
   2967 	wm_set_vlan(sc);
   2968 
   2969 out:
   2970 	WM_CORE_UNLOCK(sc);
   2971 
   2972 	return rc;
   2973 }
   2974 
   2975 /*
   2976  * wm_ioctl:		[ifnet interface function]
   2977  *
   2978  *	Handle control requests from the operator.
   2979  */
   2980 static int
   2981 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2982 {
   2983 	struct wm_softc *sc = ifp->if_softc;
   2984 	struct ifreq *ifr = (struct ifreq *) data;
   2985 	struct ifaddr *ifa = (struct ifaddr *)data;
   2986 	struct sockaddr_dl *sdl;
   2987 	int s, error;
   2988 
   2989 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2990 		device_xname(sc->sc_dev), __func__));
   2991 
   2992 #ifndef WM_MPSAFE
   2993 	s = splnet();
   2994 #endif
   2995 	switch (cmd) {
   2996 	case SIOCSIFMEDIA:
   2997 	case SIOCGIFMEDIA:
   2998 		WM_CORE_LOCK(sc);
   2999 		/* Flow control requires full-duplex mode. */
   3000 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3001 		    (ifr->ifr_media & IFM_FDX) == 0)
   3002 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3003 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3004 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3005 				/* We can do both TXPAUSE and RXPAUSE. */
   3006 				ifr->ifr_media |=
   3007 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3008 			}
   3009 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3010 		}
   3011 		WM_CORE_UNLOCK(sc);
   3012 #ifdef WM_MPSAFE
   3013 		s = splnet();
   3014 #endif
   3015 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3016 #ifdef WM_MPSAFE
   3017 		splx(s);
   3018 #endif
   3019 		break;
   3020 	case SIOCINITIFADDR:
   3021 		WM_CORE_LOCK(sc);
   3022 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3023 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3024 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3025 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3026 			/* unicast address is first multicast entry */
   3027 			wm_set_filter(sc);
   3028 			error = 0;
   3029 			WM_CORE_UNLOCK(sc);
   3030 			break;
   3031 		}
   3032 		WM_CORE_UNLOCK(sc);
   3033 		/*FALLTHROUGH*/
   3034 	default:
   3035 #ifdef WM_MPSAFE
   3036 		s = splnet();
   3037 #endif
   3038 		/* It may call wm_start, so unlock here */
   3039 		error = ether_ioctl(ifp, cmd, data);
   3040 #ifdef WM_MPSAFE
   3041 		splx(s);
   3042 #endif
   3043 		if (error != ENETRESET)
   3044 			break;
   3045 
   3046 		error = 0;
   3047 
   3048 		if (cmd == SIOCSIFCAP) {
   3049 			error = (*ifp->if_init)(ifp);
   3050 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3051 			;
   3052 		else if (ifp->if_flags & IFF_RUNNING) {
   3053 			/*
   3054 			 * Multicast list has changed; set the hardware filter
   3055 			 * accordingly.
   3056 			 */
   3057 			WM_CORE_LOCK(sc);
   3058 			wm_set_filter(sc);
   3059 			WM_CORE_UNLOCK(sc);
   3060 		}
   3061 		break;
   3062 	}
   3063 
   3064 #ifndef WM_MPSAFE
   3065 	splx(s);
   3066 #endif
   3067 	return error;
   3068 }
   3069 
   3070 /* MAC address related */
   3071 
   3072 /*
   3073  * Get the offset of MAC address and return it.
   3074  * If error occured, use offset 0.
   3075  */
   3076 static uint16_t
   3077 wm_check_alt_mac_addr(struct wm_softc *sc)
   3078 {
   3079 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3080 	uint16_t offset = NVM_OFF_MACADDR;
   3081 
   3082 	/* Try to read alternative MAC address pointer */
   3083 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3084 		return 0;
   3085 
   3086 	/* Check pointer if it's valid or not. */
   3087 	if ((offset == 0x0000) || (offset == 0xffff))
   3088 		return 0;
   3089 
   3090 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3091 	/*
   3092 	 * Check whether alternative MAC address is valid or not.
   3093 	 * Some cards have non 0xffff pointer but those don't use
   3094 	 * alternative MAC address in reality.
   3095 	 *
   3096 	 * Check whether the broadcast bit is set or not.
   3097 	 */
   3098 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3099 		if (((myea[0] & 0xff) & 0x01) == 0)
   3100 			return offset; /* Found */
   3101 
   3102 	/* Not found */
   3103 	return 0;
   3104 }
   3105 
   3106 static int
   3107 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3108 {
   3109 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3110 	uint16_t offset = NVM_OFF_MACADDR;
   3111 	int do_invert = 0;
   3112 
   3113 	switch (sc->sc_type) {
   3114 	case WM_T_82580:
   3115 	case WM_T_I350:
   3116 	case WM_T_I354:
   3117 		/* EEPROM Top Level Partitioning */
   3118 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3119 		break;
   3120 	case WM_T_82571:
   3121 	case WM_T_82575:
   3122 	case WM_T_82576:
   3123 	case WM_T_80003:
   3124 	case WM_T_I210:
   3125 	case WM_T_I211:
   3126 		offset = wm_check_alt_mac_addr(sc);
   3127 		if (offset == 0)
   3128 			if ((sc->sc_funcid & 0x01) == 1)
   3129 				do_invert = 1;
   3130 		break;
   3131 	default:
   3132 		if ((sc->sc_funcid & 0x01) == 1)
   3133 			do_invert = 1;
   3134 		break;
   3135 	}
   3136 
   3137 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3138 		goto bad;
   3139 
   3140 	enaddr[0] = myea[0] & 0xff;
   3141 	enaddr[1] = myea[0] >> 8;
   3142 	enaddr[2] = myea[1] & 0xff;
   3143 	enaddr[3] = myea[1] >> 8;
   3144 	enaddr[4] = myea[2] & 0xff;
   3145 	enaddr[5] = myea[2] >> 8;
   3146 
   3147 	/*
   3148 	 * Toggle the LSB of the MAC address on the second port
   3149 	 * of some dual port cards.
   3150 	 */
   3151 	if (do_invert != 0)
   3152 		enaddr[5] ^= 1;
   3153 
   3154 	return 0;
   3155 
   3156  bad:
   3157 	return -1;
   3158 }
   3159 
   3160 /*
   3161  * wm_set_ral:
   3162  *
   3163  *	Set an entery in the receive address list.
   3164  */
   3165 static void
   3166 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3167 {
   3168 	uint32_t ral_lo, ral_hi;
   3169 
   3170 	if (enaddr != NULL) {
   3171 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3172 		    (enaddr[3] << 24);
   3173 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3174 		ral_hi |= RAL_AV;
   3175 	} else {
   3176 		ral_lo = 0;
   3177 		ral_hi = 0;
   3178 	}
   3179 
   3180 	if (sc->sc_type >= WM_T_82544) {
   3181 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3182 		    ral_lo);
   3183 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3184 		    ral_hi);
   3185 	} else {
   3186 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3187 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3188 	}
   3189 }
   3190 
   3191 /*
   3192  * wm_mchash:
   3193  *
   3194  *	Compute the hash of the multicast address for the 4096-bit
   3195  *	multicast filter.
   3196  */
   3197 static uint32_t
   3198 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3199 {
   3200 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3201 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3202 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3203 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3204 	uint32_t hash;
   3205 
   3206 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3207 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3208 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3209 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3210 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3211 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3212 		return (hash & 0x3ff);
   3213 	}
   3214 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3215 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3216 
   3217 	return (hash & 0xfff);
   3218 }
   3219 
   3220 /*
   3221  * wm_set_filter:
   3222  *
   3223  *	Set up the receive filter.
   3224  */
   3225 static void
   3226 wm_set_filter(struct wm_softc *sc)
   3227 {
   3228 	struct ethercom *ec = &sc->sc_ethercom;
   3229 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3230 	struct ether_multi *enm;
   3231 	struct ether_multistep step;
   3232 	bus_addr_t mta_reg;
   3233 	uint32_t hash, reg, bit;
   3234 	int i, size, ralmax;
   3235 
   3236 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3237 		device_xname(sc->sc_dev), __func__));
   3238 
   3239 	if (sc->sc_type >= WM_T_82544)
   3240 		mta_reg = WMREG_CORDOVA_MTA;
   3241 	else
   3242 		mta_reg = WMREG_MTA;
   3243 
   3244 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3245 
   3246 	if (ifp->if_flags & IFF_BROADCAST)
   3247 		sc->sc_rctl |= RCTL_BAM;
   3248 	if (ifp->if_flags & IFF_PROMISC) {
   3249 		sc->sc_rctl |= RCTL_UPE;
   3250 		goto allmulti;
   3251 	}
   3252 
   3253 	/*
   3254 	 * Set the station address in the first RAL slot, and
   3255 	 * clear the remaining slots.
   3256 	 */
   3257 	if (sc->sc_type == WM_T_ICH8)
   3258 		size = WM_RAL_TABSIZE_ICH8 -1;
   3259 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3260 	    || (sc->sc_type == WM_T_PCH))
   3261 		size = WM_RAL_TABSIZE_ICH8;
   3262 	else if (sc->sc_type == WM_T_PCH2)
   3263 		size = WM_RAL_TABSIZE_PCH2;
   3264 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3265 		size = WM_RAL_TABSIZE_PCH_LPT;
   3266 	else if (sc->sc_type == WM_T_82575)
   3267 		size = WM_RAL_TABSIZE_82575;
   3268 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3269 		size = WM_RAL_TABSIZE_82576;
   3270 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3271 		size = WM_RAL_TABSIZE_I350;
   3272 	else
   3273 		size = WM_RAL_TABSIZE;
   3274 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3275 
   3276 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3277 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3278 		switch (i) {
   3279 		case 0:
   3280 			/* We can use all entries */
   3281 			ralmax = size;
   3282 			break;
   3283 		case 1:
   3284 			/* Only RAR[0] */
   3285 			ralmax = 1;
   3286 			break;
   3287 		default:
   3288 			/* available SHRA + RAR[0] */
   3289 			ralmax = i + 1;
   3290 		}
   3291 	} else
   3292 		ralmax = size;
   3293 	for (i = 1; i < size; i++) {
   3294 		if (i < ralmax)
   3295 			wm_set_ral(sc, NULL, i);
   3296 	}
   3297 
   3298 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3299 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3300 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3301 	    || (sc->sc_type == WM_T_PCH_SPT))
   3302 		size = WM_ICH8_MC_TABSIZE;
   3303 	else
   3304 		size = WM_MC_TABSIZE;
   3305 	/* Clear out the multicast table. */
   3306 	for (i = 0; i < size; i++)
   3307 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3308 
   3309 	ETHER_LOCK(ec);
   3310 	ETHER_FIRST_MULTI(step, ec, enm);
   3311 	while (enm != NULL) {
   3312 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3313 			ETHER_UNLOCK(ec);
   3314 			/*
   3315 			 * We must listen to a range of multicast addresses.
   3316 			 * For now, just accept all multicasts, rather than
   3317 			 * trying to set only those filter bits needed to match
   3318 			 * the range.  (At this time, the only use of address
   3319 			 * ranges is for IP multicast routing, for which the
   3320 			 * range is big enough to require all bits set.)
   3321 			 */
   3322 			goto allmulti;
   3323 		}
   3324 
   3325 		hash = wm_mchash(sc, enm->enm_addrlo);
   3326 
   3327 		reg = (hash >> 5);
   3328 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3329 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3330 		    || (sc->sc_type == WM_T_PCH2)
   3331 		    || (sc->sc_type == WM_T_PCH_LPT)
   3332 		    || (sc->sc_type == WM_T_PCH_SPT))
   3333 			reg &= 0x1f;
   3334 		else
   3335 			reg &= 0x7f;
   3336 		bit = hash & 0x1f;
   3337 
   3338 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3339 		hash |= 1U << bit;
   3340 
   3341 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3342 			/*
   3343 			 * 82544 Errata 9: Certain register cannot be written
   3344 			 * with particular alignments in PCI-X bus operation
   3345 			 * (FCAH, MTA and VFTA).
   3346 			 */
   3347 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3348 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3349 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3350 		} else
   3351 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3352 
   3353 		ETHER_NEXT_MULTI(step, enm);
   3354 	}
   3355 	ETHER_UNLOCK(ec);
   3356 
   3357 	ifp->if_flags &= ~IFF_ALLMULTI;
   3358 	goto setit;
   3359 
   3360  allmulti:
   3361 	ifp->if_flags |= IFF_ALLMULTI;
   3362 	sc->sc_rctl |= RCTL_MPE;
   3363 
   3364  setit:
   3365 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3366 }
   3367 
   3368 /* Reset and init related */
   3369 
   3370 static void
   3371 wm_set_vlan(struct wm_softc *sc)
   3372 {
   3373 
   3374 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3375 		device_xname(sc->sc_dev), __func__));
   3376 
   3377 	/* Deal with VLAN enables. */
   3378 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3379 		sc->sc_ctrl |= CTRL_VME;
   3380 	else
   3381 		sc->sc_ctrl &= ~CTRL_VME;
   3382 
   3383 	/* Write the control registers. */
   3384 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3385 }
   3386 
   3387 static void
   3388 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3389 {
   3390 	uint32_t gcr;
   3391 	pcireg_t ctrl2;
   3392 
   3393 	gcr = CSR_READ(sc, WMREG_GCR);
   3394 
   3395 	/* Only take action if timeout value is defaulted to 0 */
   3396 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3397 		goto out;
   3398 
   3399 	if ((gcr & GCR_CAP_VER2) == 0) {
   3400 		gcr |= GCR_CMPL_TMOUT_10MS;
   3401 		goto out;
   3402 	}
   3403 
   3404 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3405 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3406 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3407 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3408 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3409 
   3410 out:
   3411 	/* Disable completion timeout resend */
   3412 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3413 
   3414 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3415 }
   3416 
   3417 void
   3418 wm_get_auto_rd_done(struct wm_softc *sc)
   3419 {
   3420 	int i;
   3421 
   3422 	/* wait for eeprom to reload */
   3423 	switch (sc->sc_type) {
   3424 	case WM_T_82571:
   3425 	case WM_T_82572:
   3426 	case WM_T_82573:
   3427 	case WM_T_82574:
   3428 	case WM_T_82583:
   3429 	case WM_T_82575:
   3430 	case WM_T_82576:
   3431 	case WM_T_82580:
   3432 	case WM_T_I350:
   3433 	case WM_T_I354:
   3434 	case WM_T_I210:
   3435 	case WM_T_I211:
   3436 	case WM_T_80003:
   3437 	case WM_T_ICH8:
   3438 	case WM_T_ICH9:
   3439 		for (i = 0; i < 10; i++) {
   3440 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3441 				break;
   3442 			delay(1000);
   3443 		}
   3444 		if (i == 10) {
   3445 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3446 			    "complete\n", device_xname(sc->sc_dev));
   3447 		}
   3448 		break;
   3449 	default:
   3450 		break;
   3451 	}
   3452 }
   3453 
   3454 void
   3455 wm_lan_init_done(struct wm_softc *sc)
   3456 {
   3457 	uint32_t reg = 0;
   3458 	int i;
   3459 
   3460 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3461 		device_xname(sc->sc_dev), __func__));
   3462 
   3463 	/* Wait for eeprom to reload */
   3464 	switch (sc->sc_type) {
   3465 	case WM_T_ICH10:
   3466 	case WM_T_PCH:
   3467 	case WM_T_PCH2:
   3468 	case WM_T_PCH_LPT:
   3469 	case WM_T_PCH_SPT:
   3470 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3471 			reg = CSR_READ(sc, WMREG_STATUS);
   3472 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3473 				break;
   3474 			delay(100);
   3475 		}
   3476 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3477 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3478 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3479 		}
   3480 		break;
   3481 	default:
   3482 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3483 		    __func__);
   3484 		break;
   3485 	}
   3486 
   3487 	reg &= ~STATUS_LAN_INIT_DONE;
   3488 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3489 }
   3490 
   3491 void
   3492 wm_get_cfg_done(struct wm_softc *sc)
   3493 {
   3494 	int mask;
   3495 	uint32_t reg;
   3496 	int i;
   3497 
   3498 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3499 		device_xname(sc->sc_dev), __func__));
   3500 
   3501 	/* Wait for eeprom to reload */
   3502 	switch (sc->sc_type) {
   3503 	case WM_T_82542_2_0:
   3504 	case WM_T_82542_2_1:
   3505 		/* null */
   3506 		break;
   3507 	case WM_T_82543:
   3508 	case WM_T_82544:
   3509 	case WM_T_82540:
   3510 	case WM_T_82545:
   3511 	case WM_T_82545_3:
   3512 	case WM_T_82546:
   3513 	case WM_T_82546_3:
   3514 	case WM_T_82541:
   3515 	case WM_T_82541_2:
   3516 	case WM_T_82547:
   3517 	case WM_T_82547_2:
   3518 	case WM_T_82573:
   3519 	case WM_T_82574:
   3520 	case WM_T_82583:
   3521 		/* generic */
   3522 		delay(10*1000);
   3523 		break;
   3524 	case WM_T_80003:
   3525 	case WM_T_82571:
   3526 	case WM_T_82572:
   3527 	case WM_T_82575:
   3528 	case WM_T_82576:
   3529 	case WM_T_82580:
   3530 	case WM_T_I350:
   3531 	case WM_T_I354:
   3532 	case WM_T_I210:
   3533 	case WM_T_I211:
   3534 		if (sc->sc_type == WM_T_82571) {
   3535 			/* Only 82571 shares port 0 */
   3536 			mask = EEMNGCTL_CFGDONE_0;
   3537 		} else
   3538 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3539 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3540 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3541 				break;
   3542 			delay(1000);
   3543 		}
   3544 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3545 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3546 				device_xname(sc->sc_dev), __func__));
   3547 		}
   3548 		break;
   3549 	case WM_T_ICH8:
   3550 	case WM_T_ICH9:
   3551 	case WM_T_ICH10:
   3552 	case WM_T_PCH:
   3553 	case WM_T_PCH2:
   3554 	case WM_T_PCH_LPT:
   3555 	case WM_T_PCH_SPT:
   3556 		delay(10*1000);
   3557 		if (sc->sc_type >= WM_T_ICH10)
   3558 			wm_lan_init_done(sc);
   3559 		else
   3560 			wm_get_auto_rd_done(sc);
   3561 
   3562 		reg = CSR_READ(sc, WMREG_STATUS);
   3563 		if ((reg & STATUS_PHYRA) != 0)
   3564 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3565 		break;
   3566 	default:
   3567 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3568 		    __func__);
   3569 		break;
   3570 	}
   3571 }
   3572 
   3573 /* Init hardware bits */
   3574 void
   3575 wm_initialize_hardware_bits(struct wm_softc *sc)
   3576 {
   3577 	uint32_t tarc0, tarc1, reg;
   3578 
   3579 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3580 		device_xname(sc->sc_dev), __func__));
   3581 
   3582 	/* For 82571 variant, 80003 and ICHs */
   3583 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3584 	    || (sc->sc_type >= WM_T_80003)) {
   3585 
   3586 		/* Transmit Descriptor Control 0 */
   3587 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3588 		reg |= TXDCTL_COUNT_DESC;
   3589 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3590 
   3591 		/* Transmit Descriptor Control 1 */
   3592 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3593 		reg |= TXDCTL_COUNT_DESC;
   3594 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3595 
   3596 		/* TARC0 */
   3597 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3598 		switch (sc->sc_type) {
   3599 		case WM_T_82571:
   3600 		case WM_T_82572:
   3601 		case WM_T_82573:
   3602 		case WM_T_82574:
   3603 		case WM_T_82583:
   3604 		case WM_T_80003:
   3605 			/* Clear bits 30..27 */
   3606 			tarc0 &= ~__BITS(30, 27);
   3607 			break;
   3608 		default:
   3609 			break;
   3610 		}
   3611 
   3612 		switch (sc->sc_type) {
   3613 		case WM_T_82571:
   3614 		case WM_T_82572:
   3615 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3616 
   3617 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3618 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3619 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3620 			/* 8257[12] Errata No.7 */
   3621 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3622 
   3623 			/* TARC1 bit 28 */
   3624 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3625 				tarc1 &= ~__BIT(28);
   3626 			else
   3627 				tarc1 |= __BIT(28);
   3628 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3629 
   3630 			/*
   3631 			 * 8257[12] Errata No.13
   3632 			 * Disable Dyamic Clock Gating.
   3633 			 */
   3634 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3635 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3636 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3637 			break;
   3638 		case WM_T_82573:
   3639 		case WM_T_82574:
   3640 		case WM_T_82583:
   3641 			if ((sc->sc_type == WM_T_82574)
   3642 			    || (sc->sc_type == WM_T_82583))
   3643 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3644 
   3645 			/* Extended Device Control */
   3646 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3647 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3648 			reg |= __BIT(22);	/* Set bit 22 */
   3649 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3650 
   3651 			/* Device Control */
   3652 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3653 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3654 
   3655 			/* PCIe Control Register */
   3656 			/*
   3657 			 * 82573 Errata (unknown).
   3658 			 *
   3659 			 * 82574 Errata 25 and 82583 Errata 12
   3660 			 * "Dropped Rx Packets":
   3661 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3662 			 */
   3663 			reg = CSR_READ(sc, WMREG_GCR);
   3664 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3665 			CSR_WRITE(sc, WMREG_GCR, reg);
   3666 
   3667 			if ((sc->sc_type == WM_T_82574)
   3668 			    || (sc->sc_type == WM_T_82583)) {
   3669 				/*
   3670 				 * Document says this bit must be set for
   3671 				 * proper operation.
   3672 				 */
   3673 				reg = CSR_READ(sc, WMREG_GCR);
   3674 				reg |= __BIT(22);
   3675 				CSR_WRITE(sc, WMREG_GCR, reg);
   3676 
   3677 				/*
   3678 				 * Apply workaround for hardware errata
   3679 				 * documented in errata docs Fixes issue where
   3680 				 * some error prone or unreliable PCIe
   3681 				 * completions are occurring, particularly
   3682 				 * with ASPM enabled. Without fix, issue can
   3683 				 * cause Tx timeouts.
   3684 				 */
   3685 				reg = CSR_READ(sc, WMREG_GCR2);
   3686 				reg |= __BIT(0);
   3687 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3688 			}
   3689 			break;
   3690 		case WM_T_80003:
   3691 			/* TARC0 */
   3692 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3693 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3694 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3695 
   3696 			/* TARC1 bit 28 */
   3697 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3698 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3699 				tarc1 &= ~__BIT(28);
   3700 			else
   3701 				tarc1 |= __BIT(28);
   3702 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3703 			break;
   3704 		case WM_T_ICH8:
   3705 		case WM_T_ICH9:
   3706 		case WM_T_ICH10:
   3707 		case WM_T_PCH:
   3708 		case WM_T_PCH2:
   3709 		case WM_T_PCH_LPT:
   3710 		case WM_T_PCH_SPT:
   3711 			/* TARC0 */
   3712 			if ((sc->sc_type == WM_T_ICH8)
   3713 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3714 				/* Set TARC0 bits 29 and 28 */
   3715 				tarc0 |= __BITS(29, 28);
   3716 			}
   3717 			/* Set TARC0 bits 23,24,26,27 */
   3718 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3719 
   3720 			/* CTRL_EXT */
   3721 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3722 			reg |= __BIT(22);	/* Set bit 22 */
   3723 			/*
   3724 			 * Enable PHY low-power state when MAC is at D3
   3725 			 * w/o WoL
   3726 			 */
   3727 			if (sc->sc_type >= WM_T_PCH)
   3728 				reg |= CTRL_EXT_PHYPDEN;
   3729 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3730 
   3731 			/* TARC1 */
   3732 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3733 			/* bit 28 */
   3734 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3735 				tarc1 &= ~__BIT(28);
   3736 			else
   3737 				tarc1 |= __BIT(28);
   3738 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3739 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3740 
   3741 			/* Device Status */
   3742 			if (sc->sc_type == WM_T_ICH8) {
   3743 				reg = CSR_READ(sc, WMREG_STATUS);
   3744 				reg &= ~__BIT(31);
   3745 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3746 
   3747 			}
   3748 
   3749 			/* IOSFPC */
   3750 			if (sc->sc_type == WM_T_PCH_SPT) {
   3751 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3752 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3753 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3754 			}
   3755 			/*
   3756 			 * Work-around descriptor data corruption issue during
   3757 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3758 			 * capability.
   3759 			 */
   3760 			reg = CSR_READ(sc, WMREG_RFCTL);
   3761 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3762 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3763 			break;
   3764 		default:
   3765 			break;
   3766 		}
   3767 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3768 
   3769 		switch (sc->sc_type) {
   3770 		/*
   3771 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3772 		 * Avoid RSS Hash Value bug.
   3773 		 */
   3774 		case WM_T_82571:
   3775 		case WM_T_82572:
   3776 		case WM_T_82573:
   3777 		case WM_T_80003:
   3778 		case WM_T_ICH8:
   3779 			reg = CSR_READ(sc, WMREG_RFCTL);
   3780 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3781 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3782 			break;
   3783 		case WM_T_82574:
   3784 			/* use extened Rx descriptor. */
   3785 			reg = CSR_READ(sc, WMREG_RFCTL);
   3786 			reg |= WMREG_RFCTL_EXSTEN;
   3787 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3788 			break;
   3789 		default:
   3790 			break;
   3791 		}
   3792 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   3793 		/*
   3794 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   3795 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   3796 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   3797 		 * Correctly by the Device"
   3798 		 *
   3799 		 * I354(C2000) Errata AVR53:
   3800 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   3801 		 * Hang"
   3802 		 */
   3803 		reg = CSR_READ(sc, WMREG_RFCTL);
   3804 		reg |= WMREG_RFCTL_IPV6EXDIS;
   3805 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   3806 	}
   3807 }
   3808 
   3809 static uint32_t
   3810 wm_rxpbs_adjust_82580(uint32_t val)
   3811 {
   3812 	uint32_t rv = 0;
   3813 
   3814 	if (val < __arraycount(wm_82580_rxpbs_table))
   3815 		rv = wm_82580_rxpbs_table[val];
   3816 
   3817 	return rv;
   3818 }
   3819 
   3820 /*
   3821  * wm_reset_phy:
   3822  *
   3823  *	generic PHY reset function.
   3824  *	Same as e1000_phy_hw_reset_generic()
   3825  */
   3826 static void
   3827 wm_reset_phy(struct wm_softc *sc)
   3828 {
   3829 	uint32_t reg;
   3830 
   3831 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3832 		device_xname(sc->sc_dev), __func__));
   3833 	if (wm_phy_resetisblocked(sc))
   3834 		return;
   3835 
   3836 	sc->phy.acquire(sc);
   3837 
   3838 	reg = CSR_READ(sc, WMREG_CTRL);
   3839 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3840 	CSR_WRITE_FLUSH(sc);
   3841 
   3842 	delay(sc->phy.reset_delay_us);
   3843 
   3844 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3845 	CSR_WRITE_FLUSH(sc);
   3846 
   3847 	delay(150);
   3848 
   3849 	sc->phy.release(sc);
   3850 
   3851 	wm_get_cfg_done(sc);
   3852 }
   3853 
   3854 static void
   3855 wm_flush_desc_rings(struct wm_softc *sc)
   3856 {
   3857 	pcireg_t preg;
   3858 	uint32_t reg;
   3859 	int nexttx;
   3860 
   3861 	/* First, disable MULR fix in FEXTNVM11 */
   3862 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3863 	reg |= FEXTNVM11_DIS_MULRFIX;
   3864 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3865 
   3866 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3867 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3868 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3869 		struct wm_txqueue *txq;
   3870 		wiseman_txdesc_t *txd;
   3871 
   3872 		/* TX */
   3873 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3874 		    device_xname(sc->sc_dev), preg, reg);
   3875 		reg = CSR_READ(sc, WMREG_TCTL);
   3876 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3877 
   3878 		txq = &sc->sc_queue[0].wmq_txq;
   3879 		nexttx = txq->txq_next;
   3880 		txd = &txq->txq_descs[nexttx];
   3881 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3882 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3883 		txd->wtx_fields.wtxu_status = 0;
   3884 		txd->wtx_fields.wtxu_options = 0;
   3885 		txd->wtx_fields.wtxu_vlan = 0;
   3886 
   3887 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3888 			BUS_SPACE_BARRIER_WRITE);
   3889 
   3890 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3891 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3892 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3893 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3894 		delay(250);
   3895 	}
   3896 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3897 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3898 		uint32_t rctl;
   3899 
   3900 		/* RX */
   3901 		printf("%s: Need RX flush (reg = %08x)\n",
   3902 		    device_xname(sc->sc_dev), preg);
   3903 		rctl = CSR_READ(sc, WMREG_RCTL);
   3904 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3905 		CSR_WRITE_FLUSH(sc);
   3906 		delay(150);
   3907 
   3908 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3909 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3910 		reg &= 0xffffc000;
   3911 		/*
   3912 		 * update thresholds: prefetch threshold to 31, host threshold
   3913 		 * to 1 and make sure the granularity is "descriptors" and not
   3914 		 * "cache lines"
   3915 		 */
   3916 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   3917 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   3918 
   3919 		/*
   3920 		 * momentarily enable the RX ring for the changes to take
   3921 		 * effect
   3922 		 */
   3923 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   3924 		CSR_WRITE_FLUSH(sc);
   3925 		delay(150);
   3926 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3927 	}
   3928 }
   3929 
   3930 /*
   3931  * wm_reset:
   3932  *
   3933  *	Reset the i82542 chip.
   3934  */
   3935 static void
   3936 wm_reset(struct wm_softc *sc)
   3937 {
   3938 	int phy_reset = 0;
   3939 	int i, error = 0;
   3940 	uint32_t reg;
   3941 
   3942 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3943 		device_xname(sc->sc_dev), __func__));
   3944 	KASSERT(sc->sc_type != 0);
   3945 
   3946 	/*
   3947 	 * Allocate on-chip memory according to the MTU size.
   3948 	 * The Packet Buffer Allocation register must be written
   3949 	 * before the chip is reset.
   3950 	 */
   3951 	switch (sc->sc_type) {
   3952 	case WM_T_82547:
   3953 	case WM_T_82547_2:
   3954 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3955 		    PBA_22K : PBA_30K;
   3956 		for (i = 0; i < sc->sc_nqueues; i++) {
   3957 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3958 			txq->txq_fifo_head = 0;
   3959 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3960 			txq->txq_fifo_size =
   3961 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3962 			txq->txq_fifo_stall = 0;
   3963 		}
   3964 		break;
   3965 	case WM_T_82571:
   3966 	case WM_T_82572:
   3967 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3968 	case WM_T_80003:
   3969 		sc->sc_pba = PBA_32K;
   3970 		break;
   3971 	case WM_T_82573:
   3972 		sc->sc_pba = PBA_12K;
   3973 		break;
   3974 	case WM_T_82574:
   3975 	case WM_T_82583:
   3976 		sc->sc_pba = PBA_20K;
   3977 		break;
   3978 	case WM_T_82576:
   3979 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3980 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3981 		break;
   3982 	case WM_T_82580:
   3983 	case WM_T_I350:
   3984 	case WM_T_I354:
   3985 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3986 		break;
   3987 	case WM_T_I210:
   3988 	case WM_T_I211:
   3989 		sc->sc_pba = PBA_34K;
   3990 		break;
   3991 	case WM_T_ICH8:
   3992 		/* Workaround for a bit corruption issue in FIFO memory */
   3993 		sc->sc_pba = PBA_8K;
   3994 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3995 		break;
   3996 	case WM_T_ICH9:
   3997 	case WM_T_ICH10:
   3998 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3999 		    PBA_14K : PBA_10K;
   4000 		break;
   4001 	case WM_T_PCH:
   4002 	case WM_T_PCH2:
   4003 	case WM_T_PCH_LPT:
   4004 	case WM_T_PCH_SPT:
   4005 		sc->sc_pba = PBA_26K;
   4006 		break;
   4007 	default:
   4008 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4009 		    PBA_40K : PBA_48K;
   4010 		break;
   4011 	}
   4012 	/*
   4013 	 * Only old or non-multiqueue devices have the PBA register
   4014 	 * XXX Need special handling for 82575.
   4015 	 */
   4016 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4017 	    || (sc->sc_type == WM_T_82575))
   4018 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4019 
   4020 	/* Prevent the PCI-E bus from sticking */
   4021 	if (sc->sc_flags & WM_F_PCIE) {
   4022 		int timeout = 800;
   4023 
   4024 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4025 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4026 
   4027 		while (timeout--) {
   4028 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4029 			    == 0)
   4030 				break;
   4031 			delay(100);
   4032 		}
   4033 	}
   4034 
   4035 	/* Set the completion timeout for interface */
   4036 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4037 	    || (sc->sc_type == WM_T_82580)
   4038 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4039 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4040 		wm_set_pcie_completion_timeout(sc);
   4041 
   4042 	/* Clear interrupt */
   4043 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4044 	if (sc->sc_nintrs > 1) {
   4045 		if (sc->sc_type != WM_T_82574) {
   4046 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4047 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4048 		} else {
   4049 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4050 		}
   4051 	}
   4052 
   4053 	/* Stop the transmit and receive processes. */
   4054 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4055 	sc->sc_rctl &= ~RCTL_EN;
   4056 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4057 	CSR_WRITE_FLUSH(sc);
   4058 
   4059 	/* XXX set_tbi_sbp_82543() */
   4060 
   4061 	delay(10*1000);
   4062 
   4063 	/* Must acquire the MDIO ownership before MAC reset */
   4064 	switch (sc->sc_type) {
   4065 	case WM_T_82573:
   4066 	case WM_T_82574:
   4067 	case WM_T_82583:
   4068 		error = wm_get_hw_semaphore_82573(sc);
   4069 		break;
   4070 	default:
   4071 		break;
   4072 	}
   4073 
   4074 	/*
   4075 	 * 82541 Errata 29? & 82547 Errata 28?
   4076 	 * See also the description about PHY_RST bit in CTRL register
   4077 	 * in 8254x_GBe_SDM.pdf.
   4078 	 */
   4079 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4080 		CSR_WRITE(sc, WMREG_CTRL,
   4081 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4082 		CSR_WRITE_FLUSH(sc);
   4083 		delay(5000);
   4084 	}
   4085 
   4086 	switch (sc->sc_type) {
   4087 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4088 	case WM_T_82541:
   4089 	case WM_T_82541_2:
   4090 	case WM_T_82547:
   4091 	case WM_T_82547_2:
   4092 		/*
   4093 		 * On some chipsets, a reset through a memory-mapped write
   4094 		 * cycle can cause the chip to reset before completing the
   4095 		 * write cycle.  This causes major headache that can be
   4096 		 * avoided by issuing the reset via indirect register writes
   4097 		 * through I/O space.
   4098 		 *
   4099 		 * So, if we successfully mapped the I/O BAR at attach time,
   4100 		 * use that.  Otherwise, try our luck with a memory-mapped
   4101 		 * reset.
   4102 		 */
   4103 		if (sc->sc_flags & WM_F_IOH_VALID)
   4104 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4105 		else
   4106 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4107 		break;
   4108 	case WM_T_82545_3:
   4109 	case WM_T_82546_3:
   4110 		/* Use the shadow control register on these chips. */
   4111 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4112 		break;
   4113 	case WM_T_80003:
   4114 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4115 		sc->phy.acquire(sc);
   4116 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4117 		sc->phy.release(sc);
   4118 		break;
   4119 	case WM_T_ICH8:
   4120 	case WM_T_ICH9:
   4121 	case WM_T_ICH10:
   4122 	case WM_T_PCH:
   4123 	case WM_T_PCH2:
   4124 	case WM_T_PCH_LPT:
   4125 	case WM_T_PCH_SPT:
   4126 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4127 		if (wm_phy_resetisblocked(sc) == false) {
   4128 			/*
   4129 			 * Gate automatic PHY configuration by hardware on
   4130 			 * non-managed 82579
   4131 			 */
   4132 			if ((sc->sc_type == WM_T_PCH2)
   4133 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4134 				== 0))
   4135 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4136 
   4137 			reg |= CTRL_PHY_RESET;
   4138 			phy_reset = 1;
   4139 		} else
   4140 			printf("XXX reset is blocked!!!\n");
   4141 		sc->phy.acquire(sc);
   4142 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4143 		/* Don't insert a completion barrier when reset */
   4144 		delay(20*1000);
   4145 		mutex_exit(sc->sc_ich_phymtx);
   4146 		break;
   4147 	case WM_T_82580:
   4148 	case WM_T_I350:
   4149 	case WM_T_I354:
   4150 	case WM_T_I210:
   4151 	case WM_T_I211:
   4152 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4153 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4154 			CSR_WRITE_FLUSH(sc);
   4155 		delay(5000);
   4156 		break;
   4157 	case WM_T_82542_2_0:
   4158 	case WM_T_82542_2_1:
   4159 	case WM_T_82543:
   4160 	case WM_T_82540:
   4161 	case WM_T_82545:
   4162 	case WM_T_82546:
   4163 	case WM_T_82571:
   4164 	case WM_T_82572:
   4165 	case WM_T_82573:
   4166 	case WM_T_82574:
   4167 	case WM_T_82575:
   4168 	case WM_T_82576:
   4169 	case WM_T_82583:
   4170 	default:
   4171 		/* Everything else can safely use the documented method. */
   4172 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4173 		break;
   4174 	}
   4175 
   4176 	/* Must release the MDIO ownership after MAC reset */
   4177 	switch (sc->sc_type) {
   4178 	case WM_T_82573:
   4179 	case WM_T_82574:
   4180 	case WM_T_82583:
   4181 		if (error == 0)
   4182 			wm_put_hw_semaphore_82573(sc);
   4183 		break;
   4184 	default:
   4185 		break;
   4186 	}
   4187 
   4188 	if (phy_reset != 0)
   4189 		wm_get_cfg_done(sc);
   4190 
   4191 	/* reload EEPROM */
   4192 	switch (sc->sc_type) {
   4193 	case WM_T_82542_2_0:
   4194 	case WM_T_82542_2_1:
   4195 	case WM_T_82543:
   4196 	case WM_T_82544:
   4197 		delay(10);
   4198 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4199 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4200 		CSR_WRITE_FLUSH(sc);
   4201 		delay(2000);
   4202 		break;
   4203 	case WM_T_82540:
   4204 	case WM_T_82545:
   4205 	case WM_T_82545_3:
   4206 	case WM_T_82546:
   4207 	case WM_T_82546_3:
   4208 		delay(5*1000);
   4209 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4210 		break;
   4211 	case WM_T_82541:
   4212 	case WM_T_82541_2:
   4213 	case WM_T_82547:
   4214 	case WM_T_82547_2:
   4215 		delay(20000);
   4216 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4217 		break;
   4218 	case WM_T_82571:
   4219 	case WM_T_82572:
   4220 	case WM_T_82573:
   4221 	case WM_T_82574:
   4222 	case WM_T_82583:
   4223 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4224 			delay(10);
   4225 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4226 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4227 			CSR_WRITE_FLUSH(sc);
   4228 		}
   4229 		/* check EECD_EE_AUTORD */
   4230 		wm_get_auto_rd_done(sc);
   4231 		/*
   4232 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4233 		 * is set.
   4234 		 */
   4235 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4236 		    || (sc->sc_type == WM_T_82583))
   4237 			delay(25*1000);
   4238 		break;
   4239 	case WM_T_82575:
   4240 	case WM_T_82576:
   4241 	case WM_T_82580:
   4242 	case WM_T_I350:
   4243 	case WM_T_I354:
   4244 	case WM_T_I210:
   4245 	case WM_T_I211:
   4246 	case WM_T_80003:
   4247 		/* check EECD_EE_AUTORD */
   4248 		wm_get_auto_rd_done(sc);
   4249 		break;
   4250 	case WM_T_ICH8:
   4251 	case WM_T_ICH9:
   4252 	case WM_T_ICH10:
   4253 	case WM_T_PCH:
   4254 	case WM_T_PCH2:
   4255 	case WM_T_PCH_LPT:
   4256 	case WM_T_PCH_SPT:
   4257 		break;
   4258 	default:
   4259 		panic("%s: unknown type\n", __func__);
   4260 	}
   4261 
   4262 	/* Check whether EEPROM is present or not */
   4263 	switch (sc->sc_type) {
   4264 	case WM_T_82575:
   4265 	case WM_T_82576:
   4266 	case WM_T_82580:
   4267 	case WM_T_I350:
   4268 	case WM_T_I354:
   4269 	case WM_T_ICH8:
   4270 	case WM_T_ICH9:
   4271 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4272 			/* Not found */
   4273 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4274 			if (sc->sc_type == WM_T_82575)
   4275 				wm_reset_init_script_82575(sc);
   4276 		}
   4277 		break;
   4278 	default:
   4279 		break;
   4280 	}
   4281 
   4282 	if ((sc->sc_type == WM_T_82580)
   4283 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4284 		/* clear global device reset status bit */
   4285 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4286 	}
   4287 
   4288 	/* Clear any pending interrupt events. */
   4289 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4290 	reg = CSR_READ(sc, WMREG_ICR);
   4291 	if (sc->sc_nintrs > 1) {
   4292 		if (sc->sc_type != WM_T_82574) {
   4293 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4294 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4295 		} else
   4296 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4297 	}
   4298 
   4299 	/* reload sc_ctrl */
   4300 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4301 
   4302 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4303 		wm_set_eee_i350(sc);
   4304 
   4305 	/* Clear the host wakeup bit after lcd reset */
   4306 	if (sc->sc_type >= WM_T_PCH) {
   4307 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4308 		    BM_PORT_GEN_CFG);
   4309 		reg &= ~BM_WUC_HOST_WU_BIT;
   4310 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4311 		    BM_PORT_GEN_CFG, reg);
   4312 	}
   4313 
   4314 	/*
   4315 	 * For PCH, this write will make sure that any noise will be detected
   4316 	 * as a CRC error and be dropped rather than show up as a bad packet
   4317 	 * to the DMA engine
   4318 	 */
   4319 	if (sc->sc_type == WM_T_PCH)
   4320 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4321 
   4322 	if (sc->sc_type >= WM_T_82544)
   4323 		CSR_WRITE(sc, WMREG_WUC, 0);
   4324 
   4325 	wm_reset_mdicnfg_82580(sc);
   4326 
   4327 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4328 		wm_pll_workaround_i210(sc);
   4329 }
   4330 
   4331 /*
   4332  * wm_add_rxbuf:
   4333  *
   4334  *	Add a receive buffer to the indiciated descriptor.
   4335  */
   4336 static int
   4337 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4338 {
   4339 	struct wm_softc *sc = rxq->rxq_sc;
   4340 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4341 	struct mbuf *m;
   4342 	int error;
   4343 
   4344 	KASSERT(mutex_owned(rxq->rxq_lock));
   4345 
   4346 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4347 	if (m == NULL)
   4348 		return ENOBUFS;
   4349 
   4350 	MCLGET(m, M_DONTWAIT);
   4351 	if ((m->m_flags & M_EXT) == 0) {
   4352 		m_freem(m);
   4353 		return ENOBUFS;
   4354 	}
   4355 
   4356 	if (rxs->rxs_mbuf != NULL)
   4357 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4358 
   4359 	rxs->rxs_mbuf = m;
   4360 
   4361 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4362 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4363 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4364 	if (error) {
   4365 		/* XXX XXX XXX */
   4366 		aprint_error_dev(sc->sc_dev,
   4367 		    "unable to load rx DMA map %d, error = %d\n",
   4368 		    idx, error);
   4369 		panic("wm_add_rxbuf");
   4370 	}
   4371 
   4372 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4373 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4374 
   4375 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4376 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4377 			wm_init_rxdesc(rxq, idx);
   4378 	} else
   4379 		wm_init_rxdesc(rxq, idx);
   4380 
   4381 	return 0;
   4382 }
   4383 
   4384 /*
   4385  * wm_rxdrain:
   4386  *
   4387  *	Drain the receive queue.
   4388  */
   4389 static void
   4390 wm_rxdrain(struct wm_rxqueue *rxq)
   4391 {
   4392 	struct wm_softc *sc = rxq->rxq_sc;
   4393 	struct wm_rxsoft *rxs;
   4394 	int i;
   4395 
   4396 	KASSERT(mutex_owned(rxq->rxq_lock));
   4397 
   4398 	for (i = 0; i < WM_NRXDESC; i++) {
   4399 		rxs = &rxq->rxq_soft[i];
   4400 		if (rxs->rxs_mbuf != NULL) {
   4401 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4402 			m_freem(rxs->rxs_mbuf);
   4403 			rxs->rxs_mbuf = NULL;
   4404 		}
   4405 	}
   4406 }
   4407 
   4408 
   4409 /*
   4410  * XXX copy from FreeBSD's sys/net/rss_config.c
   4411  */
   4412 /*
   4413  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4414  * effectiveness may be limited by algorithm choice and available entropy
   4415  * during the boot.
   4416  *
   4417  * XXXRW: And that we don't randomize it yet!
   4418  *
   4419  * This is the default Microsoft RSS specification key which is also
   4420  * the Chelsio T5 firmware default key.
   4421  */
   4422 #define RSS_KEYSIZE 40
   4423 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4424 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4425 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4426 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4427 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4428 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4429 };
   4430 
   4431 /*
   4432  * Caller must pass an array of size sizeof(rss_key).
   4433  *
   4434  * XXX
   4435  * As if_ixgbe may use this function, this function should not be
   4436  * if_wm specific function.
   4437  */
   4438 static void
   4439 wm_rss_getkey(uint8_t *key)
   4440 {
   4441 
   4442 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4443 }
   4444 
   4445 /*
   4446  * Setup registers for RSS.
   4447  *
   4448  * XXX not yet VMDq support
   4449  */
   4450 static void
   4451 wm_init_rss(struct wm_softc *sc)
   4452 {
   4453 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4454 	int i;
   4455 
   4456 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4457 
   4458 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4459 		int qid, reta_ent;
   4460 
   4461 		qid  = i % sc->sc_nqueues;
   4462 		switch(sc->sc_type) {
   4463 		case WM_T_82574:
   4464 			reta_ent = __SHIFTIN(qid,
   4465 			    RETA_ENT_QINDEX_MASK_82574);
   4466 			break;
   4467 		case WM_T_82575:
   4468 			reta_ent = __SHIFTIN(qid,
   4469 			    RETA_ENT_QINDEX1_MASK_82575);
   4470 			break;
   4471 		default:
   4472 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4473 			break;
   4474 		}
   4475 
   4476 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4477 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4478 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4479 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4480 	}
   4481 
   4482 	wm_rss_getkey((uint8_t *)rss_key);
   4483 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4484 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4485 
   4486 	if (sc->sc_type == WM_T_82574)
   4487 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4488 	else
   4489 		mrqc = MRQC_ENABLE_RSS_MQ;
   4490 
   4491 	/*
   4492 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4493 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4494 	 */
   4495 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4496 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4497 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4498 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4499 
   4500 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4501 }
   4502 
   4503 /*
   4504  * Adjust TX and RX queue numbers which the system actulally uses.
   4505  *
   4506  * The numbers are affected by below parameters.
   4507  *     - The nubmer of hardware queues
   4508  *     - The number of MSI-X vectors (= "nvectors" argument)
   4509  *     - ncpu
   4510  */
   4511 static void
   4512 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4513 {
   4514 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4515 
   4516 	if (nvectors < 2) {
   4517 		sc->sc_nqueues = 1;
   4518 		return;
   4519 	}
   4520 
   4521 	switch(sc->sc_type) {
   4522 	case WM_T_82572:
   4523 		hw_ntxqueues = 2;
   4524 		hw_nrxqueues = 2;
   4525 		break;
   4526 	case WM_T_82574:
   4527 		hw_ntxqueues = 2;
   4528 		hw_nrxqueues = 2;
   4529 		break;
   4530 	case WM_T_82575:
   4531 		hw_ntxqueues = 4;
   4532 		hw_nrxqueues = 4;
   4533 		break;
   4534 	case WM_T_82576:
   4535 		hw_ntxqueues = 16;
   4536 		hw_nrxqueues = 16;
   4537 		break;
   4538 	case WM_T_82580:
   4539 	case WM_T_I350:
   4540 	case WM_T_I354:
   4541 		hw_ntxqueues = 8;
   4542 		hw_nrxqueues = 8;
   4543 		break;
   4544 	case WM_T_I210:
   4545 		hw_ntxqueues = 4;
   4546 		hw_nrxqueues = 4;
   4547 		break;
   4548 	case WM_T_I211:
   4549 		hw_ntxqueues = 2;
   4550 		hw_nrxqueues = 2;
   4551 		break;
   4552 		/*
   4553 		 * As below ethernet controllers does not support MSI-X,
   4554 		 * this driver let them not use multiqueue.
   4555 		 *     - WM_T_80003
   4556 		 *     - WM_T_ICH8
   4557 		 *     - WM_T_ICH9
   4558 		 *     - WM_T_ICH10
   4559 		 *     - WM_T_PCH
   4560 		 *     - WM_T_PCH2
   4561 		 *     - WM_T_PCH_LPT
   4562 		 */
   4563 	default:
   4564 		hw_ntxqueues = 1;
   4565 		hw_nrxqueues = 1;
   4566 		break;
   4567 	}
   4568 
   4569 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4570 
   4571 	/*
   4572 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4573 	 * the number of queues used actually.
   4574 	 */
   4575 	if (nvectors < hw_nqueues + 1) {
   4576 		sc->sc_nqueues = nvectors - 1;
   4577 	} else {
   4578 		sc->sc_nqueues = hw_nqueues;
   4579 	}
   4580 
   4581 	/*
   4582 	 * As queues more then cpus cannot improve scaling, we limit
   4583 	 * the number of queues used actually.
   4584 	 */
   4585 	if (ncpu < sc->sc_nqueues)
   4586 		sc->sc_nqueues = ncpu;
   4587 }
   4588 
   4589 static int
   4590 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4591 {
   4592 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4593 	wmq->wmq_id = qidx;
   4594 	wmq->wmq_intr_idx = intr_idx;
   4595 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4596 #ifdef WM_MPSAFE
   4597 	    | SOFTINT_MPSAFE
   4598 #endif
   4599 	    , wm_handle_queue, wmq);
   4600 	if (wmq->wmq_si != NULL)
   4601 		return 0;
   4602 
   4603 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4604 	    wmq->wmq_id);
   4605 
   4606 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4607 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4608 	return ENOMEM;
   4609 }
   4610 
   4611 /*
   4612  * Both single interrupt MSI and INTx can use this function.
   4613  */
   4614 static int
   4615 wm_setup_legacy(struct wm_softc *sc)
   4616 {
   4617 	pci_chipset_tag_t pc = sc->sc_pc;
   4618 	const char *intrstr = NULL;
   4619 	char intrbuf[PCI_INTRSTR_LEN];
   4620 	int error;
   4621 
   4622 	error = wm_alloc_txrx_queues(sc);
   4623 	if (error) {
   4624 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4625 		    error);
   4626 		return ENOMEM;
   4627 	}
   4628 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4629 	    sizeof(intrbuf));
   4630 #ifdef WM_MPSAFE
   4631 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4632 #endif
   4633 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4634 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4635 	if (sc->sc_ihs[0] == NULL) {
   4636 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4637 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4638 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4639 		return ENOMEM;
   4640 	}
   4641 
   4642 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4643 	sc->sc_nintrs = 1;
   4644 
   4645 	return wm_softint_establish(sc, 0, 0);
   4646 }
   4647 
   4648 static int
   4649 wm_setup_msix(struct wm_softc *sc)
   4650 {
   4651 	void *vih;
   4652 	kcpuset_t *affinity;
   4653 	int qidx, error, intr_idx, txrx_established;
   4654 	pci_chipset_tag_t pc = sc->sc_pc;
   4655 	const char *intrstr = NULL;
   4656 	char intrbuf[PCI_INTRSTR_LEN];
   4657 	char intr_xname[INTRDEVNAMEBUF];
   4658 
   4659 	if (sc->sc_nqueues < ncpu) {
   4660 		/*
   4661 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4662 		 * interrupts start from CPU#1.
   4663 		 */
   4664 		sc->sc_affinity_offset = 1;
   4665 	} else {
   4666 		/*
   4667 		 * In this case, this device use all CPUs. So, we unify
   4668 		 * affinitied cpu_index to msix vector number for readability.
   4669 		 */
   4670 		sc->sc_affinity_offset = 0;
   4671 	}
   4672 
   4673 	error = wm_alloc_txrx_queues(sc);
   4674 	if (error) {
   4675 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4676 		    error);
   4677 		return ENOMEM;
   4678 	}
   4679 
   4680 	kcpuset_create(&affinity, false);
   4681 	intr_idx = 0;
   4682 
   4683 	/*
   4684 	 * TX and RX
   4685 	 */
   4686 	txrx_established = 0;
   4687 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4688 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4689 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4690 
   4691 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4692 		    sizeof(intrbuf));
   4693 #ifdef WM_MPSAFE
   4694 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4695 		    PCI_INTR_MPSAFE, true);
   4696 #endif
   4697 		memset(intr_xname, 0, sizeof(intr_xname));
   4698 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4699 		    device_xname(sc->sc_dev), qidx);
   4700 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4701 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4702 		if (vih == NULL) {
   4703 			aprint_error_dev(sc->sc_dev,
   4704 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4705 			    intrstr ? " at " : "",
   4706 			    intrstr ? intrstr : "");
   4707 
   4708 			goto fail;
   4709 		}
   4710 		kcpuset_zero(affinity);
   4711 		/* Round-robin affinity */
   4712 		kcpuset_set(affinity, affinity_to);
   4713 		error = interrupt_distribute(vih, affinity, NULL);
   4714 		if (error == 0) {
   4715 			aprint_normal_dev(sc->sc_dev,
   4716 			    "for TX and RX interrupting at %s affinity to %u\n",
   4717 			    intrstr, affinity_to);
   4718 		} else {
   4719 			aprint_normal_dev(sc->sc_dev,
   4720 			    "for TX and RX interrupting at %s\n", intrstr);
   4721 		}
   4722 		sc->sc_ihs[intr_idx] = vih;
   4723 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   4724 			goto fail;
   4725 		txrx_established++;
   4726 		intr_idx++;
   4727 	}
   4728 
   4729 	/*
   4730 	 * LINK
   4731 	 */
   4732 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4733 	    sizeof(intrbuf));
   4734 #ifdef WM_MPSAFE
   4735 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4736 #endif
   4737 	memset(intr_xname, 0, sizeof(intr_xname));
   4738 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4739 	    device_xname(sc->sc_dev));
   4740 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4741 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4742 	if (vih == NULL) {
   4743 		aprint_error_dev(sc->sc_dev,
   4744 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4745 		    intrstr ? " at " : "",
   4746 		    intrstr ? intrstr : "");
   4747 
   4748 		goto fail;
   4749 	}
   4750 	/* keep default affinity to LINK interrupt */
   4751 	aprint_normal_dev(sc->sc_dev,
   4752 	    "for LINK interrupting at %s\n", intrstr);
   4753 	sc->sc_ihs[intr_idx] = vih;
   4754 	sc->sc_link_intr_idx = intr_idx;
   4755 
   4756 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4757 	kcpuset_destroy(affinity);
   4758 	return 0;
   4759 
   4760  fail:
   4761 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4762 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4763 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4764 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4765 	}
   4766 
   4767 	kcpuset_destroy(affinity);
   4768 	return ENOMEM;
   4769 }
   4770 
   4771 static void
   4772 wm_turnon(struct wm_softc *sc)
   4773 {
   4774 	int i;
   4775 
   4776 	KASSERT(WM_CORE_LOCKED(sc));
   4777 
   4778 	/*
   4779 	 * must unset stopping flags in ascending order.
   4780 	 */
   4781 	for(i = 0; i < sc->sc_nqueues; i++) {
   4782 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4783 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4784 
   4785 		mutex_enter(txq->txq_lock);
   4786 		txq->txq_stopping = false;
   4787 		mutex_exit(txq->txq_lock);
   4788 
   4789 		mutex_enter(rxq->rxq_lock);
   4790 		rxq->rxq_stopping = false;
   4791 		mutex_exit(rxq->rxq_lock);
   4792 	}
   4793 
   4794 	sc->sc_core_stopping = false;
   4795 }
   4796 
   4797 static void
   4798 wm_turnoff(struct wm_softc *sc)
   4799 {
   4800 	int i;
   4801 
   4802 	KASSERT(WM_CORE_LOCKED(sc));
   4803 
   4804 	sc->sc_core_stopping = true;
   4805 
   4806 	/*
   4807 	 * must set stopping flags in ascending order.
   4808 	 */
   4809 	for(i = 0; i < sc->sc_nqueues; i++) {
   4810 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4811 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4812 
   4813 		mutex_enter(rxq->rxq_lock);
   4814 		rxq->rxq_stopping = true;
   4815 		mutex_exit(rxq->rxq_lock);
   4816 
   4817 		mutex_enter(txq->txq_lock);
   4818 		txq->txq_stopping = true;
   4819 		mutex_exit(txq->txq_lock);
   4820 	}
   4821 }
   4822 
   4823 /*
   4824  * write interrupt interval value to ITR or EITR
   4825  */
   4826 static void
   4827 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   4828 {
   4829 
   4830 	if (!wmq->wmq_set_itr)
   4831 		return;
   4832 
   4833 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4834 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   4835 
   4836 		/*
   4837 		 * 82575 doesn't have CNT_INGR field.
   4838 		 * So, overwrite counter field by software.
   4839 		 */
   4840 		if (sc->sc_type == WM_T_82575)
   4841 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   4842 		else
   4843 			eitr |= EITR_CNT_INGR;
   4844 
   4845 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   4846 	} else if (sc->sc_type == WM_T_82574 && sc->sc_nintrs > 1) {
   4847 		/*
   4848 		 * 82574 has both ITR and EITR. SET EITR when we use
   4849 		 * the multi queue function with MSI-X.
   4850 		 */
   4851 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   4852 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   4853 	} else {
   4854 		KASSERT(wmq->wmq_id == 0);
   4855 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   4856 	}
   4857 
   4858 	wmq->wmq_set_itr = false;
   4859 }
   4860 
   4861 /*
   4862  * TODO
   4863  * Below dynamic calculation of itr is almost the same as linux igb,
   4864  * however it does not fit to wm(4). So, we will have been disable AIM
   4865  * until we will find appropriate calculation of itr.
   4866  */
   4867 /*
   4868  * calculate interrupt interval value to be going to write register in
   4869  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   4870  */
   4871 static void
   4872 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   4873 {
   4874 #ifdef NOTYET
   4875 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   4876 	struct wm_txqueue *txq = &wmq->wmq_txq;
   4877 	uint32_t avg_size = 0;
   4878 	uint32_t new_itr;
   4879 
   4880 	if (rxq->rxq_packets)
   4881 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   4882 	if (txq->txq_packets)
   4883 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   4884 
   4885 	if (avg_size == 0) {
   4886 		new_itr = 450; /* restore default value */
   4887 		goto out;
   4888 	}
   4889 
   4890 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   4891 	avg_size += 24;
   4892 
   4893 	/* Don't starve jumbo frames */
   4894 	avg_size = min(avg_size, 3000);
   4895 
   4896 	/* Give a little boost to mid-size frames */
   4897 	if ((avg_size > 300) && (avg_size < 1200))
   4898 		new_itr = avg_size / 3;
   4899 	else
   4900 		new_itr = avg_size / 2;
   4901 
   4902 out:
   4903 	/*
   4904 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   4905 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   4906 	 */
   4907 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   4908 		new_itr *= 4;
   4909 
   4910 	if (new_itr != wmq->wmq_itr) {
   4911 		wmq->wmq_itr = new_itr;
   4912 		wmq->wmq_set_itr = true;
   4913 	} else
   4914 		wmq->wmq_set_itr = false;
   4915 
   4916 	rxq->rxq_packets = 0;
   4917 	rxq->rxq_bytes = 0;
   4918 	txq->txq_packets = 0;
   4919 	txq->txq_bytes = 0;
   4920 #endif
   4921 }
   4922 
   4923 /*
   4924  * wm_init:		[ifnet interface function]
   4925  *
   4926  *	Initialize the interface.
   4927  */
   4928 static int
   4929 wm_init(struct ifnet *ifp)
   4930 {
   4931 	struct wm_softc *sc = ifp->if_softc;
   4932 	int ret;
   4933 
   4934 	WM_CORE_LOCK(sc);
   4935 	ret = wm_init_locked(ifp);
   4936 	WM_CORE_UNLOCK(sc);
   4937 
   4938 	return ret;
   4939 }
   4940 
   4941 static int
   4942 wm_init_locked(struct ifnet *ifp)
   4943 {
   4944 	struct wm_softc *sc = ifp->if_softc;
   4945 	int i, j, trynum, error = 0;
   4946 	uint32_t reg;
   4947 
   4948 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4949 		device_xname(sc->sc_dev), __func__));
   4950 	KASSERT(WM_CORE_LOCKED(sc));
   4951 
   4952 	/*
   4953 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4954 	 * There is a small but measurable benefit to avoiding the adjusment
   4955 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4956 	 * on such platforms.  One possibility is that the DMA itself is
   4957 	 * slightly more efficient if the front of the entire packet (instead
   4958 	 * of the front of the headers) is aligned.
   4959 	 *
   4960 	 * Note we must always set align_tweak to 0 if we are using
   4961 	 * jumbo frames.
   4962 	 */
   4963 #ifdef __NO_STRICT_ALIGNMENT
   4964 	sc->sc_align_tweak = 0;
   4965 #else
   4966 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4967 		sc->sc_align_tweak = 0;
   4968 	else
   4969 		sc->sc_align_tweak = 2;
   4970 #endif /* __NO_STRICT_ALIGNMENT */
   4971 
   4972 	/* Cancel any pending I/O. */
   4973 	wm_stop_locked(ifp, 0);
   4974 
   4975 	/* update statistics before reset */
   4976 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4977 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4978 
   4979 	/* PCH_SPT hardware workaround */
   4980 	if (sc->sc_type == WM_T_PCH_SPT)
   4981 		wm_flush_desc_rings(sc);
   4982 
   4983 	/* Reset the chip to a known state. */
   4984 	wm_reset(sc);
   4985 
   4986 	/* AMT based hardware can now take control from firmware */
   4987 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4988 		wm_get_hw_control(sc);
   4989 
   4990 	/* Init hardware bits */
   4991 	wm_initialize_hardware_bits(sc);
   4992 
   4993 	/* Reset the PHY. */
   4994 	if (sc->sc_flags & WM_F_HAS_MII)
   4995 		wm_gmii_reset(sc);
   4996 
   4997 	/* Calculate (E)ITR value */
   4998 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   4999 		/*
   5000 		 * For NEWQUEUE's EITR (except for 82575).
   5001 		 * 82575's EITR should be set same throttling value as other
   5002 		 * old controllers' ITR because the interrupt/sec calculation
   5003 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5004 		 *
   5005 		 * 82574's EITR should be set same throttling value as ITR.
   5006 		 *
   5007 		 * For N interrupts/sec, set this value to:
   5008 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5009 		 */
   5010 		sc->sc_itr_init = 450;
   5011 	} else if (sc->sc_type >= WM_T_82543) {
   5012 		/*
   5013 		 * Set up the interrupt throttling register (units of 256ns)
   5014 		 * Note that a footnote in Intel's documentation says this
   5015 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5016 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5017 		 * that that is also true for the 1024ns units of the other
   5018 		 * interrupt-related timer registers -- so, really, we ought
   5019 		 * to divide this value by 4 when the link speed is low.
   5020 		 *
   5021 		 * XXX implement this division at link speed change!
   5022 		 */
   5023 
   5024 		/*
   5025 		 * For N interrupts/sec, set this value to:
   5026 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5027 		 * absolute and packet timer values to this value
   5028 		 * divided by 4 to get "simple timer" behavior.
   5029 		 */
   5030 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5031 	}
   5032 
   5033 	error = wm_init_txrx_queues(sc);
   5034 	if (error)
   5035 		goto out;
   5036 
   5037 	/*
   5038 	 * Clear out the VLAN table -- we don't use it (yet).
   5039 	 */
   5040 	CSR_WRITE(sc, WMREG_VET, 0);
   5041 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5042 		trynum = 10; /* Due to hw errata */
   5043 	else
   5044 		trynum = 1;
   5045 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5046 		for (j = 0; j < trynum; j++)
   5047 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5048 
   5049 	/*
   5050 	 * Set up flow-control parameters.
   5051 	 *
   5052 	 * XXX Values could probably stand some tuning.
   5053 	 */
   5054 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5055 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5056 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5057 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5058 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5059 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5060 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5061 	}
   5062 
   5063 	sc->sc_fcrtl = FCRTL_DFLT;
   5064 	if (sc->sc_type < WM_T_82543) {
   5065 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5066 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5067 	} else {
   5068 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5069 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5070 	}
   5071 
   5072 	if (sc->sc_type == WM_T_80003)
   5073 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5074 	else
   5075 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5076 
   5077 	/* Writes the control register. */
   5078 	wm_set_vlan(sc);
   5079 
   5080 	if (sc->sc_flags & WM_F_HAS_MII) {
   5081 		int val;
   5082 
   5083 		switch (sc->sc_type) {
   5084 		case WM_T_80003:
   5085 		case WM_T_ICH8:
   5086 		case WM_T_ICH9:
   5087 		case WM_T_ICH10:
   5088 		case WM_T_PCH:
   5089 		case WM_T_PCH2:
   5090 		case WM_T_PCH_LPT:
   5091 		case WM_T_PCH_SPT:
   5092 			/*
   5093 			 * Set the mac to wait the maximum time between each
   5094 			 * iteration and increase the max iterations when
   5095 			 * polling the phy; this fixes erroneous timeouts at
   5096 			 * 10Mbps.
   5097 			 */
   5098 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5099 			    0xFFFF);
   5100 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   5101 			val |= 0x3F;
   5102 			wm_kmrn_writereg(sc,
   5103 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   5104 			break;
   5105 		default:
   5106 			break;
   5107 		}
   5108 
   5109 		if (sc->sc_type == WM_T_80003) {
   5110 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   5111 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   5112 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   5113 
   5114 			/* Bypass RX and TX FIFO's */
   5115 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5116 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5117 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5118 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5119 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5120 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5121 		}
   5122 	}
   5123 #if 0
   5124 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5125 #endif
   5126 
   5127 	/* Set up checksum offload parameters. */
   5128 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5129 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5130 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5131 		reg |= RXCSUM_IPOFL;
   5132 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5133 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5134 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5135 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5136 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5137 
   5138 	/* Set up MSI-X */
   5139 	if (sc->sc_nintrs > 1) {
   5140 		uint32_t ivar;
   5141 		struct wm_queue *wmq;
   5142 		int qid, qintr_idx;
   5143 
   5144 		if (sc->sc_type == WM_T_82575) {
   5145 			/* Interrupt control */
   5146 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5147 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5148 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5149 
   5150 			/* TX and RX */
   5151 			for (i = 0; i < sc->sc_nqueues; i++) {
   5152 				wmq = &sc->sc_queue[i];
   5153 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5154 				    EITR_TX_QUEUE(wmq->wmq_id)
   5155 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5156 			}
   5157 			/* Link status */
   5158 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5159 			    EITR_OTHER);
   5160 		} else if (sc->sc_type == WM_T_82574) {
   5161 			/* Interrupt control */
   5162 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5163 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5164 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5165 
   5166 			/*
   5167 			 * workaround issue with spurious interrupts
   5168 			 * in MSI-X mode.
   5169 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5170 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5171 			 */
   5172 			reg = CSR_READ(sc, WMREG_RFCTL);
   5173 			reg |= WMREG_RFCTL_ACKDIS;
   5174 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5175 
   5176 			ivar = 0;
   5177 			/* TX and RX */
   5178 			for (i = 0; i < sc->sc_nqueues; i++) {
   5179 				wmq = &sc->sc_queue[i];
   5180 				qid = wmq->wmq_id;
   5181 				qintr_idx = wmq->wmq_intr_idx;
   5182 
   5183 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5184 				    IVAR_TX_MASK_Q_82574(qid));
   5185 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5186 				    IVAR_RX_MASK_Q_82574(qid));
   5187 			}
   5188 			/* Link status */
   5189 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5190 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5191 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5192 		} else {
   5193 			/* Interrupt control */
   5194 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5195 			    | GPIE_EIAME | GPIE_PBA);
   5196 
   5197 			switch (sc->sc_type) {
   5198 			case WM_T_82580:
   5199 			case WM_T_I350:
   5200 			case WM_T_I354:
   5201 			case WM_T_I210:
   5202 			case WM_T_I211:
   5203 				/* TX and RX */
   5204 				for (i = 0; i < sc->sc_nqueues; i++) {
   5205 					wmq = &sc->sc_queue[i];
   5206 					qid = wmq->wmq_id;
   5207 					qintr_idx = wmq->wmq_intr_idx;
   5208 
   5209 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5210 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5211 					ivar |= __SHIFTIN((qintr_idx
   5212 						| IVAR_VALID),
   5213 					    IVAR_TX_MASK_Q(qid));
   5214 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5215 					ivar |= __SHIFTIN((qintr_idx
   5216 						| IVAR_VALID),
   5217 					    IVAR_RX_MASK_Q(qid));
   5218 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5219 				}
   5220 				break;
   5221 			case WM_T_82576:
   5222 				/* TX and RX */
   5223 				for (i = 0; i < sc->sc_nqueues; i++) {
   5224 					wmq = &sc->sc_queue[i];
   5225 					qid = wmq->wmq_id;
   5226 					qintr_idx = wmq->wmq_intr_idx;
   5227 
   5228 					ivar = CSR_READ(sc,
   5229 					    WMREG_IVAR_Q_82576(qid));
   5230 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5231 					ivar |= __SHIFTIN((qintr_idx
   5232 						| IVAR_VALID),
   5233 					    IVAR_TX_MASK_Q_82576(qid));
   5234 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5235 					ivar |= __SHIFTIN((qintr_idx
   5236 						| IVAR_VALID),
   5237 					    IVAR_RX_MASK_Q_82576(qid));
   5238 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5239 					    ivar);
   5240 				}
   5241 				break;
   5242 			default:
   5243 				break;
   5244 			}
   5245 
   5246 			/* Link status */
   5247 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5248 			    IVAR_MISC_OTHER);
   5249 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5250 		}
   5251 
   5252 		if (sc->sc_nqueues > 1) {
   5253 			wm_init_rss(sc);
   5254 
   5255 			/*
   5256 			** NOTE: Receive Full-Packet Checksum Offload
   5257 			** is mutually exclusive with Multiqueue. However
   5258 			** this is not the same as TCP/IP checksums which
   5259 			** still work.
   5260 			*/
   5261 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5262 			reg |= RXCSUM_PCSD;
   5263 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5264 		}
   5265 	}
   5266 
   5267 	/* Set up the interrupt registers. */
   5268 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5269 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5270 	    ICR_RXO | ICR_RXT0;
   5271 	if (sc->sc_nintrs > 1) {
   5272 		uint32_t mask;
   5273 		struct wm_queue *wmq;
   5274 
   5275 		switch (sc->sc_type) {
   5276 		case WM_T_82574:
   5277 			mask = 0;
   5278 			for (i = 0; i < sc->sc_nqueues; i++) {
   5279 				wmq = &sc->sc_queue[i];
   5280 				mask |= ICR_TXQ(wmq->wmq_id);
   5281 				mask |= ICR_RXQ(wmq->wmq_id);
   5282 			}
   5283 			mask |= ICR_OTHER;
   5284 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5285 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5286 			break;
   5287 		default:
   5288 			if (sc->sc_type == WM_T_82575) {
   5289 				mask = 0;
   5290 				for (i = 0; i < sc->sc_nqueues; i++) {
   5291 					wmq = &sc->sc_queue[i];
   5292 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5293 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5294 				}
   5295 				mask |= EITR_OTHER;
   5296 			} else {
   5297 				mask = 0;
   5298 				for (i = 0; i < sc->sc_nqueues; i++) {
   5299 					wmq = &sc->sc_queue[i];
   5300 					mask |= 1 << wmq->wmq_intr_idx;
   5301 				}
   5302 				mask |= 1 << sc->sc_link_intr_idx;
   5303 			}
   5304 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5305 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5306 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5307 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5308 			break;
   5309 		}
   5310 	} else
   5311 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5312 
   5313 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5314 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5315 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5316 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   5317 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5318 		reg |= KABGTXD_BGSQLBIAS;
   5319 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5320 	}
   5321 
   5322 	/* Set up the inter-packet gap. */
   5323 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5324 
   5325 	if (sc->sc_type >= WM_T_82543) {
   5326 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5327 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5328 			wm_itrs_writereg(sc, wmq);
   5329 		}
   5330 		/*
   5331 		 * Link interrupts occur much less than TX
   5332 		 * interrupts and RX interrupts. So, we don't
   5333 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5334 		 * FreeBSD's if_igb.
   5335 		 */
   5336 	}
   5337 
   5338 	/* Set the VLAN ethernetype. */
   5339 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5340 
   5341 	/*
   5342 	 * Set up the transmit control register; we start out with
   5343 	 * a collision distance suitable for FDX, but update it whe
   5344 	 * we resolve the media type.
   5345 	 */
   5346 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5347 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5348 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5349 	if (sc->sc_type >= WM_T_82571)
   5350 		sc->sc_tctl |= TCTL_MULR;
   5351 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5352 
   5353 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5354 		/* Write TDT after TCTL.EN is set. See the document. */
   5355 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5356 	}
   5357 
   5358 	if (sc->sc_type == WM_T_80003) {
   5359 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5360 		reg &= ~TCTL_EXT_GCEX_MASK;
   5361 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5362 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5363 	}
   5364 
   5365 	/* Set the media. */
   5366 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5367 		goto out;
   5368 
   5369 	/* Configure for OS presence */
   5370 	wm_init_manageability(sc);
   5371 
   5372 	/*
   5373 	 * Set up the receive control register; we actually program
   5374 	 * the register when we set the receive filter.  Use multicast
   5375 	 * address offset type 0.
   5376 	 *
   5377 	 * Only the i82544 has the ability to strip the incoming
   5378 	 * CRC, so we don't enable that feature.
   5379 	 */
   5380 	sc->sc_mchash_type = 0;
   5381 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5382 	    | RCTL_MO(sc->sc_mchash_type);
   5383 
   5384 	/*
   5385 	 * 82574 use one buffer extended Rx descriptor.
   5386 	 */
   5387 	if (sc->sc_type == WM_T_82574)
   5388 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5389 
   5390 	/*
   5391 	 * The I350 has a bug where it always strips the CRC whether
   5392 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5393 	 */
   5394 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5395 	    || (sc->sc_type == WM_T_I210))
   5396 		sc->sc_rctl |= RCTL_SECRC;
   5397 
   5398 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5399 	    && (ifp->if_mtu > ETHERMTU)) {
   5400 		sc->sc_rctl |= RCTL_LPE;
   5401 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5402 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5403 	}
   5404 
   5405 	if (MCLBYTES == 2048) {
   5406 		sc->sc_rctl |= RCTL_2k;
   5407 	} else {
   5408 		if (sc->sc_type >= WM_T_82543) {
   5409 			switch (MCLBYTES) {
   5410 			case 4096:
   5411 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5412 				break;
   5413 			case 8192:
   5414 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5415 				break;
   5416 			case 16384:
   5417 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5418 				break;
   5419 			default:
   5420 				panic("wm_init: MCLBYTES %d unsupported",
   5421 				    MCLBYTES);
   5422 				break;
   5423 			}
   5424 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5425 	}
   5426 
   5427 	/* Set the receive filter. */
   5428 	wm_set_filter(sc);
   5429 
   5430 	/* Enable ECC */
   5431 	switch (sc->sc_type) {
   5432 	case WM_T_82571:
   5433 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5434 		reg |= PBA_ECC_CORR_EN;
   5435 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5436 		break;
   5437 	case WM_T_PCH_LPT:
   5438 	case WM_T_PCH_SPT:
   5439 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5440 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5441 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5442 
   5443 		sc->sc_ctrl |= CTRL_MEHE;
   5444 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5445 		break;
   5446 	default:
   5447 		break;
   5448 	}
   5449 
   5450 	/* On 575 and later set RDT only if RX enabled */
   5451 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5452 		int qidx;
   5453 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5454 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5455 			for (i = 0; i < WM_NRXDESC; i++) {
   5456 				mutex_enter(rxq->rxq_lock);
   5457 				wm_init_rxdesc(rxq, i);
   5458 				mutex_exit(rxq->rxq_lock);
   5459 
   5460 			}
   5461 		}
   5462 	}
   5463 
   5464 	wm_turnon(sc);
   5465 
   5466 	/* Start the one second link check clock. */
   5467 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5468 
   5469 	/* ...all done! */
   5470 	ifp->if_flags |= IFF_RUNNING;
   5471 	ifp->if_flags &= ~IFF_OACTIVE;
   5472 
   5473  out:
   5474 	sc->sc_if_flags = ifp->if_flags;
   5475 	if (error)
   5476 		log(LOG_ERR, "%s: interface not running\n",
   5477 		    device_xname(sc->sc_dev));
   5478 	return error;
   5479 }
   5480 
   5481 /*
   5482  * wm_stop:		[ifnet interface function]
   5483  *
   5484  *	Stop transmission on the interface.
   5485  */
   5486 static void
   5487 wm_stop(struct ifnet *ifp, int disable)
   5488 {
   5489 	struct wm_softc *sc = ifp->if_softc;
   5490 
   5491 	WM_CORE_LOCK(sc);
   5492 	wm_stop_locked(ifp, disable);
   5493 	WM_CORE_UNLOCK(sc);
   5494 }
   5495 
   5496 static void
   5497 wm_stop_locked(struct ifnet *ifp, int disable)
   5498 {
   5499 	struct wm_softc *sc = ifp->if_softc;
   5500 	struct wm_txsoft *txs;
   5501 	int i, qidx;
   5502 
   5503 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5504 		device_xname(sc->sc_dev), __func__));
   5505 	KASSERT(WM_CORE_LOCKED(sc));
   5506 
   5507 	wm_turnoff(sc);
   5508 
   5509 	/* Stop the one second clock. */
   5510 	callout_stop(&sc->sc_tick_ch);
   5511 
   5512 	/* Stop the 82547 Tx FIFO stall check timer. */
   5513 	if (sc->sc_type == WM_T_82547)
   5514 		callout_stop(&sc->sc_txfifo_ch);
   5515 
   5516 	if (sc->sc_flags & WM_F_HAS_MII) {
   5517 		/* Down the MII. */
   5518 		mii_down(&sc->sc_mii);
   5519 	} else {
   5520 #if 0
   5521 		/* Should we clear PHY's status properly? */
   5522 		wm_reset(sc);
   5523 #endif
   5524 	}
   5525 
   5526 	/* Stop the transmit and receive processes. */
   5527 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5528 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5529 	sc->sc_rctl &= ~RCTL_EN;
   5530 
   5531 	/*
   5532 	 * Clear the interrupt mask to ensure the device cannot assert its
   5533 	 * interrupt line.
   5534 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5535 	 * service any currently pending or shared interrupt.
   5536 	 */
   5537 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5538 	sc->sc_icr = 0;
   5539 	if (sc->sc_nintrs > 1) {
   5540 		if (sc->sc_type != WM_T_82574) {
   5541 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5542 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5543 		} else
   5544 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5545 	}
   5546 
   5547 	/* Release any queued transmit buffers. */
   5548 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5549 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5550 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5551 		mutex_enter(txq->txq_lock);
   5552 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5553 			txs = &txq->txq_soft[i];
   5554 			if (txs->txs_mbuf != NULL) {
   5555 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5556 				m_freem(txs->txs_mbuf);
   5557 				txs->txs_mbuf = NULL;
   5558 			}
   5559 		}
   5560 		mutex_exit(txq->txq_lock);
   5561 	}
   5562 
   5563 	/* Mark the interface as down and cancel the watchdog timer. */
   5564 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5565 	ifp->if_timer = 0;
   5566 
   5567 	if (disable) {
   5568 		for (i = 0; i < sc->sc_nqueues; i++) {
   5569 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5570 			mutex_enter(rxq->rxq_lock);
   5571 			wm_rxdrain(rxq);
   5572 			mutex_exit(rxq->rxq_lock);
   5573 		}
   5574 	}
   5575 
   5576 #if 0 /* notyet */
   5577 	if (sc->sc_type >= WM_T_82544)
   5578 		CSR_WRITE(sc, WMREG_WUC, 0);
   5579 #endif
   5580 }
   5581 
   5582 static void
   5583 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5584 {
   5585 	struct mbuf *m;
   5586 	int i;
   5587 
   5588 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5589 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5590 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5591 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5592 		    m->m_data, m->m_len, m->m_flags);
   5593 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5594 	    i, i == 1 ? "" : "s");
   5595 }
   5596 
   5597 /*
   5598  * wm_82547_txfifo_stall:
   5599  *
   5600  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5601  *	reset the FIFO pointers, and restart packet transmission.
   5602  */
   5603 static void
   5604 wm_82547_txfifo_stall(void *arg)
   5605 {
   5606 	struct wm_softc *sc = arg;
   5607 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5608 
   5609 	mutex_enter(txq->txq_lock);
   5610 
   5611 	if (txq->txq_stopping)
   5612 		goto out;
   5613 
   5614 	if (txq->txq_fifo_stall) {
   5615 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5616 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5617 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5618 			/*
   5619 			 * Packets have drained.  Stop transmitter, reset
   5620 			 * FIFO pointers, restart transmitter, and kick
   5621 			 * the packet queue.
   5622 			 */
   5623 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5624 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5625 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5626 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5627 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5628 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5629 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5630 			CSR_WRITE_FLUSH(sc);
   5631 
   5632 			txq->txq_fifo_head = 0;
   5633 			txq->txq_fifo_stall = 0;
   5634 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5635 		} else {
   5636 			/*
   5637 			 * Still waiting for packets to drain; try again in
   5638 			 * another tick.
   5639 			 */
   5640 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5641 		}
   5642 	}
   5643 
   5644 out:
   5645 	mutex_exit(txq->txq_lock);
   5646 }
   5647 
   5648 /*
   5649  * wm_82547_txfifo_bugchk:
   5650  *
   5651  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5652  *	prevent enqueueing a packet that would wrap around the end
   5653  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5654  *
   5655  *	We do this by checking the amount of space before the end
   5656  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5657  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5658  *	the internal FIFO pointers to the beginning, and restart
   5659  *	transmission on the interface.
   5660  */
   5661 #define	WM_FIFO_HDR		0x10
   5662 #define	WM_82547_PAD_LEN	0x3e0
   5663 static int
   5664 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5665 {
   5666 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5667 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5668 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5669 
   5670 	/* Just return if already stalled. */
   5671 	if (txq->txq_fifo_stall)
   5672 		return 1;
   5673 
   5674 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5675 		/* Stall only occurs in half-duplex mode. */
   5676 		goto send_packet;
   5677 	}
   5678 
   5679 	if (len >= WM_82547_PAD_LEN + space) {
   5680 		txq->txq_fifo_stall = 1;
   5681 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5682 		return 1;
   5683 	}
   5684 
   5685  send_packet:
   5686 	txq->txq_fifo_head += len;
   5687 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5688 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5689 
   5690 	return 0;
   5691 }
   5692 
   5693 static int
   5694 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5695 {
   5696 	int error;
   5697 
   5698 	/*
   5699 	 * Allocate the control data structures, and create and load the
   5700 	 * DMA map for it.
   5701 	 *
   5702 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5703 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5704 	 * both sets within the same 4G segment.
   5705 	 */
   5706 	if (sc->sc_type < WM_T_82544)
   5707 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5708 	else
   5709 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5710 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5711 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5712 	else
   5713 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5714 
   5715 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5716 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5717 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5718 		aprint_error_dev(sc->sc_dev,
   5719 		    "unable to allocate TX control data, error = %d\n",
   5720 		    error);
   5721 		goto fail_0;
   5722 	}
   5723 
   5724 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5725 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5726 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5727 		aprint_error_dev(sc->sc_dev,
   5728 		    "unable to map TX control data, error = %d\n", error);
   5729 		goto fail_1;
   5730 	}
   5731 
   5732 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5733 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5734 		aprint_error_dev(sc->sc_dev,
   5735 		    "unable to create TX control data DMA map, error = %d\n",
   5736 		    error);
   5737 		goto fail_2;
   5738 	}
   5739 
   5740 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5741 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5742 		aprint_error_dev(sc->sc_dev,
   5743 		    "unable to load TX control data DMA map, error = %d\n",
   5744 		    error);
   5745 		goto fail_3;
   5746 	}
   5747 
   5748 	return 0;
   5749 
   5750  fail_3:
   5751 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5752  fail_2:
   5753 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5754 	    WM_TXDESCS_SIZE(txq));
   5755  fail_1:
   5756 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5757  fail_0:
   5758 	return error;
   5759 }
   5760 
   5761 static void
   5762 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5763 {
   5764 
   5765 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5766 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5767 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5768 	    WM_TXDESCS_SIZE(txq));
   5769 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5770 }
   5771 
   5772 static int
   5773 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5774 {
   5775 	int error;
   5776 	size_t rxq_descs_size;
   5777 
   5778 	/*
   5779 	 * Allocate the control data structures, and create and load the
   5780 	 * DMA map for it.
   5781 	 *
   5782 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5783 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5784 	 * both sets within the same 4G segment.
   5785 	 */
   5786 	rxq->rxq_ndesc = WM_NRXDESC;
   5787 	if (sc->sc_type == WM_T_82574)
   5788 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   5789 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5790 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   5791 	else
   5792 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   5793 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   5794 
   5795 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   5796 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5797 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5798 		aprint_error_dev(sc->sc_dev,
   5799 		    "unable to allocate RX control data, error = %d\n",
   5800 		    error);
   5801 		goto fail_0;
   5802 	}
   5803 
   5804 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5805 		    rxq->rxq_desc_rseg, rxq_descs_size,
   5806 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5807 		aprint_error_dev(sc->sc_dev,
   5808 		    "unable to map RX control data, error = %d\n", error);
   5809 		goto fail_1;
   5810 	}
   5811 
   5812 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   5813 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5814 		aprint_error_dev(sc->sc_dev,
   5815 		    "unable to create RX control data DMA map, error = %d\n",
   5816 		    error);
   5817 		goto fail_2;
   5818 	}
   5819 
   5820 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5821 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   5822 		aprint_error_dev(sc->sc_dev,
   5823 		    "unable to load RX control data DMA map, error = %d\n",
   5824 		    error);
   5825 		goto fail_3;
   5826 	}
   5827 
   5828 	return 0;
   5829 
   5830  fail_3:
   5831 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5832  fail_2:
   5833 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5834 	    rxq_descs_size);
   5835  fail_1:
   5836 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5837  fail_0:
   5838 	return error;
   5839 }
   5840 
   5841 static void
   5842 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5843 {
   5844 
   5845 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5846 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5847 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5848 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   5849 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5850 }
   5851 
   5852 
   5853 static int
   5854 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5855 {
   5856 	int i, error;
   5857 
   5858 	/* Create the transmit buffer DMA maps. */
   5859 	WM_TXQUEUELEN(txq) =
   5860 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5861 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5862 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5863 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5864 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5865 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5866 			aprint_error_dev(sc->sc_dev,
   5867 			    "unable to create Tx DMA map %d, error = %d\n",
   5868 			    i, error);
   5869 			goto fail;
   5870 		}
   5871 	}
   5872 
   5873 	return 0;
   5874 
   5875  fail:
   5876 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5877 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5878 			bus_dmamap_destroy(sc->sc_dmat,
   5879 			    txq->txq_soft[i].txs_dmamap);
   5880 	}
   5881 	return error;
   5882 }
   5883 
   5884 static void
   5885 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5886 {
   5887 	int i;
   5888 
   5889 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5890 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5891 			bus_dmamap_destroy(sc->sc_dmat,
   5892 			    txq->txq_soft[i].txs_dmamap);
   5893 	}
   5894 }
   5895 
   5896 static int
   5897 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5898 {
   5899 	int i, error;
   5900 
   5901 	/* Create the receive buffer DMA maps. */
   5902 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5903 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5904 			    MCLBYTES, 0, 0,
   5905 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5906 			aprint_error_dev(sc->sc_dev,
   5907 			    "unable to create Rx DMA map %d error = %d\n",
   5908 			    i, error);
   5909 			goto fail;
   5910 		}
   5911 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5912 	}
   5913 
   5914 	return 0;
   5915 
   5916  fail:
   5917 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5918 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5919 			bus_dmamap_destroy(sc->sc_dmat,
   5920 			    rxq->rxq_soft[i].rxs_dmamap);
   5921 	}
   5922 	return error;
   5923 }
   5924 
   5925 static void
   5926 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5927 {
   5928 	int i;
   5929 
   5930 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5931 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5932 			bus_dmamap_destroy(sc->sc_dmat,
   5933 			    rxq->rxq_soft[i].rxs_dmamap);
   5934 	}
   5935 }
   5936 
   5937 /*
   5938  * wm_alloc_quques:
   5939  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5940  */
   5941 static int
   5942 wm_alloc_txrx_queues(struct wm_softc *sc)
   5943 {
   5944 	int i, error, tx_done, rx_done;
   5945 
   5946 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5947 	    KM_SLEEP);
   5948 	if (sc->sc_queue == NULL) {
   5949 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5950 		error = ENOMEM;
   5951 		goto fail_0;
   5952 	}
   5953 
   5954 	/*
   5955 	 * For transmission
   5956 	 */
   5957 	error = 0;
   5958 	tx_done = 0;
   5959 	for (i = 0; i < sc->sc_nqueues; i++) {
   5960 #ifdef WM_EVENT_COUNTERS
   5961 		int j;
   5962 		const char *xname;
   5963 #endif
   5964 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5965 		txq->txq_sc = sc;
   5966 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5967 
   5968 		error = wm_alloc_tx_descs(sc, txq);
   5969 		if (error)
   5970 			break;
   5971 		error = wm_alloc_tx_buffer(sc, txq);
   5972 		if (error) {
   5973 			wm_free_tx_descs(sc, txq);
   5974 			break;
   5975 		}
   5976 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5977 		if (txq->txq_interq == NULL) {
   5978 			wm_free_tx_descs(sc, txq);
   5979 			wm_free_tx_buffer(sc, txq);
   5980 			error = ENOMEM;
   5981 			break;
   5982 		}
   5983 
   5984 #ifdef WM_EVENT_COUNTERS
   5985 		xname = device_xname(sc->sc_dev);
   5986 
   5987 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5988 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5989 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5990 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5991 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5992 
   5993 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5994 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5995 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5996 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5997 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5998 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5999 
   6000 		for (j = 0; j < WM_NTXSEGS; j++) {
   6001 			snprintf(txq->txq_txseg_evcnt_names[j],
   6002 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6003 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6004 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6005 		}
   6006 
   6007 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6008 
   6009 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6010 #endif /* WM_EVENT_COUNTERS */
   6011 
   6012 		tx_done++;
   6013 	}
   6014 	if (error)
   6015 		goto fail_1;
   6016 
   6017 	/*
   6018 	 * For recieve
   6019 	 */
   6020 	error = 0;
   6021 	rx_done = 0;
   6022 	for (i = 0; i < sc->sc_nqueues; i++) {
   6023 #ifdef WM_EVENT_COUNTERS
   6024 		const char *xname;
   6025 #endif
   6026 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6027 		rxq->rxq_sc = sc;
   6028 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6029 
   6030 		error = wm_alloc_rx_descs(sc, rxq);
   6031 		if (error)
   6032 			break;
   6033 
   6034 		error = wm_alloc_rx_buffer(sc, rxq);
   6035 		if (error) {
   6036 			wm_free_rx_descs(sc, rxq);
   6037 			break;
   6038 		}
   6039 
   6040 #ifdef WM_EVENT_COUNTERS
   6041 		xname = device_xname(sc->sc_dev);
   6042 
   6043 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6044 
   6045 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6046 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6047 #endif /* WM_EVENT_COUNTERS */
   6048 
   6049 		rx_done++;
   6050 	}
   6051 	if (error)
   6052 		goto fail_2;
   6053 
   6054 	return 0;
   6055 
   6056  fail_2:
   6057 	for (i = 0; i < rx_done; i++) {
   6058 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6059 		wm_free_rx_buffer(sc, rxq);
   6060 		wm_free_rx_descs(sc, rxq);
   6061 		if (rxq->rxq_lock)
   6062 			mutex_obj_free(rxq->rxq_lock);
   6063 	}
   6064  fail_1:
   6065 	for (i = 0; i < tx_done; i++) {
   6066 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6067 		pcq_destroy(txq->txq_interq);
   6068 		wm_free_tx_buffer(sc, txq);
   6069 		wm_free_tx_descs(sc, txq);
   6070 		if (txq->txq_lock)
   6071 			mutex_obj_free(txq->txq_lock);
   6072 	}
   6073 
   6074 	kmem_free(sc->sc_queue,
   6075 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6076  fail_0:
   6077 	return error;
   6078 }
   6079 
   6080 /*
   6081  * wm_free_quques:
   6082  *	Free {tx,rx}descs and {tx,rx} buffers
   6083  */
   6084 static void
   6085 wm_free_txrx_queues(struct wm_softc *sc)
   6086 {
   6087 	int i;
   6088 
   6089 	for (i = 0; i < sc->sc_nqueues; i++) {
   6090 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6091 
   6092 #ifdef WM_EVENT_COUNTERS
   6093 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6094 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6095 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6096 #endif /* WM_EVENT_COUNTERS */
   6097 
   6098 		wm_free_rx_buffer(sc, rxq);
   6099 		wm_free_rx_descs(sc, rxq);
   6100 		if (rxq->rxq_lock)
   6101 			mutex_obj_free(rxq->rxq_lock);
   6102 	}
   6103 
   6104 	for (i = 0; i < sc->sc_nqueues; i++) {
   6105 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6106 		struct mbuf *m;
   6107 #ifdef WM_EVENT_COUNTERS
   6108 		int j;
   6109 
   6110 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6111 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6112 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6113 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6114 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6115 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6116 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6117 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6118 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6119 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6120 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6121 
   6122 		for (j = 0; j < WM_NTXSEGS; j++)
   6123 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6124 
   6125 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6126 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6127 #endif /* WM_EVENT_COUNTERS */
   6128 
   6129 		/* drain txq_interq */
   6130 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6131 			m_freem(m);
   6132 		pcq_destroy(txq->txq_interq);
   6133 
   6134 		wm_free_tx_buffer(sc, txq);
   6135 		wm_free_tx_descs(sc, txq);
   6136 		if (txq->txq_lock)
   6137 			mutex_obj_free(txq->txq_lock);
   6138 	}
   6139 
   6140 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6141 }
   6142 
   6143 static void
   6144 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6145 {
   6146 
   6147 	KASSERT(mutex_owned(txq->txq_lock));
   6148 
   6149 	/* Initialize the transmit descriptor ring. */
   6150 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6151 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6152 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6153 	txq->txq_free = WM_NTXDESC(txq);
   6154 	txq->txq_next = 0;
   6155 }
   6156 
   6157 static void
   6158 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6159     struct wm_txqueue *txq)
   6160 {
   6161 
   6162 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6163 		device_xname(sc->sc_dev), __func__));
   6164 	KASSERT(mutex_owned(txq->txq_lock));
   6165 
   6166 	if (sc->sc_type < WM_T_82543) {
   6167 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6168 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6169 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6170 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6171 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6172 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6173 	} else {
   6174 		int qid = wmq->wmq_id;
   6175 
   6176 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6177 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6178 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6179 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6180 
   6181 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6182 			/*
   6183 			 * Don't write TDT before TCTL.EN is set.
   6184 			 * See the document.
   6185 			 */
   6186 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6187 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6188 			    | TXDCTL_WTHRESH(0));
   6189 		else {
   6190 			/* XXX should update with AIM? */
   6191 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6192 			if (sc->sc_type >= WM_T_82540) {
   6193 				/* should be same */
   6194 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6195 			}
   6196 
   6197 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6198 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6199 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6200 		}
   6201 	}
   6202 }
   6203 
   6204 static void
   6205 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6206 {
   6207 	int i;
   6208 
   6209 	KASSERT(mutex_owned(txq->txq_lock));
   6210 
   6211 	/* Initialize the transmit job descriptors. */
   6212 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6213 		txq->txq_soft[i].txs_mbuf = NULL;
   6214 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6215 	txq->txq_snext = 0;
   6216 	txq->txq_sdirty = 0;
   6217 }
   6218 
   6219 static void
   6220 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6221     struct wm_txqueue *txq)
   6222 {
   6223 
   6224 	KASSERT(mutex_owned(txq->txq_lock));
   6225 
   6226 	/*
   6227 	 * Set up some register offsets that are different between
   6228 	 * the i82542 and the i82543 and later chips.
   6229 	 */
   6230 	if (sc->sc_type < WM_T_82543)
   6231 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6232 	else
   6233 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6234 
   6235 	wm_init_tx_descs(sc, txq);
   6236 	wm_init_tx_regs(sc, wmq, txq);
   6237 	wm_init_tx_buffer(sc, txq);
   6238 }
   6239 
   6240 static void
   6241 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6242     struct wm_rxqueue *rxq)
   6243 {
   6244 
   6245 	KASSERT(mutex_owned(rxq->rxq_lock));
   6246 
   6247 	/*
   6248 	 * Initialize the receive descriptor and receive job
   6249 	 * descriptor rings.
   6250 	 */
   6251 	if (sc->sc_type < WM_T_82543) {
   6252 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6253 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6254 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6255 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6256 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6257 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6258 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6259 
   6260 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6261 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6262 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6263 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6264 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6265 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6266 	} else {
   6267 		int qid = wmq->wmq_id;
   6268 
   6269 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6270 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6271 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6272 
   6273 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6274 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6275 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6276 
   6277 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6278 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6279 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6280 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6281 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6282 			    | RXDCTL_WTHRESH(1));
   6283 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6284 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6285 		} else {
   6286 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6287 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6288 			/* XXX should update with AIM? */
   6289 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6290 			/* MUST be same */
   6291 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6292 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6293 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6294 		}
   6295 	}
   6296 }
   6297 
   6298 static int
   6299 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6300 {
   6301 	struct wm_rxsoft *rxs;
   6302 	int error, i;
   6303 
   6304 	KASSERT(mutex_owned(rxq->rxq_lock));
   6305 
   6306 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6307 		rxs = &rxq->rxq_soft[i];
   6308 		if (rxs->rxs_mbuf == NULL) {
   6309 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6310 				log(LOG_ERR, "%s: unable to allocate or map "
   6311 				    "rx buffer %d, error = %d\n",
   6312 				    device_xname(sc->sc_dev), i, error);
   6313 				/*
   6314 				 * XXX Should attempt to run with fewer receive
   6315 				 * XXX buffers instead of just failing.
   6316 				 */
   6317 				wm_rxdrain(rxq);
   6318 				return ENOMEM;
   6319 			}
   6320 		} else {
   6321 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6322 				wm_init_rxdesc(rxq, i);
   6323 			/*
   6324 			 * For 82575 and newer device, the RX descriptors
   6325 			 * must be initialized after the setting of RCTL.EN in
   6326 			 * wm_set_filter()
   6327 			 */
   6328 		}
   6329 	}
   6330 	rxq->rxq_ptr = 0;
   6331 	rxq->rxq_discard = 0;
   6332 	WM_RXCHAIN_RESET(rxq);
   6333 
   6334 	return 0;
   6335 }
   6336 
   6337 static int
   6338 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6339     struct wm_rxqueue *rxq)
   6340 {
   6341 
   6342 	KASSERT(mutex_owned(rxq->rxq_lock));
   6343 
   6344 	/*
   6345 	 * Set up some register offsets that are different between
   6346 	 * the i82542 and the i82543 and later chips.
   6347 	 */
   6348 	if (sc->sc_type < WM_T_82543)
   6349 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6350 	else
   6351 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6352 
   6353 	wm_init_rx_regs(sc, wmq, rxq);
   6354 	return wm_init_rx_buffer(sc, rxq);
   6355 }
   6356 
   6357 /*
   6358  * wm_init_quques:
   6359  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6360  */
   6361 static int
   6362 wm_init_txrx_queues(struct wm_softc *sc)
   6363 {
   6364 	int i, error = 0;
   6365 
   6366 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6367 		device_xname(sc->sc_dev), __func__));
   6368 
   6369 	for (i = 0; i < sc->sc_nqueues; i++) {
   6370 		struct wm_queue *wmq = &sc->sc_queue[i];
   6371 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6372 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6373 
   6374 		/*
   6375 		 * TODO
   6376 		 * Currently, use constant variable instead of AIM.
   6377 		 * Furthermore, the interrupt interval of multiqueue which use
   6378 		 * polling mode is less than default value.
   6379 		 * More tuning and AIM are required.
   6380 		 */
   6381 		if (sc->sc_nqueues > 1)
   6382 			wmq->wmq_itr = 50;
   6383 		else
   6384 			wmq->wmq_itr = sc->sc_itr_init;
   6385 		wmq->wmq_set_itr = true;
   6386 
   6387 		mutex_enter(txq->txq_lock);
   6388 		wm_init_tx_queue(sc, wmq, txq);
   6389 		mutex_exit(txq->txq_lock);
   6390 
   6391 		mutex_enter(rxq->rxq_lock);
   6392 		error = wm_init_rx_queue(sc, wmq, rxq);
   6393 		mutex_exit(rxq->rxq_lock);
   6394 		if (error)
   6395 			break;
   6396 	}
   6397 
   6398 	return error;
   6399 }
   6400 
   6401 /*
   6402  * wm_tx_offload:
   6403  *
   6404  *	Set up TCP/IP checksumming parameters for the
   6405  *	specified packet.
   6406  */
   6407 static int
   6408 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   6409     uint8_t *fieldsp)
   6410 {
   6411 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6412 	struct mbuf *m0 = txs->txs_mbuf;
   6413 	struct livengood_tcpip_ctxdesc *t;
   6414 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6415 	uint32_t ipcse;
   6416 	struct ether_header *eh;
   6417 	int offset, iphl;
   6418 	uint8_t fields;
   6419 
   6420 	/*
   6421 	 * XXX It would be nice if the mbuf pkthdr had offset
   6422 	 * fields for the protocol headers.
   6423 	 */
   6424 
   6425 	eh = mtod(m0, struct ether_header *);
   6426 	switch (htons(eh->ether_type)) {
   6427 	case ETHERTYPE_IP:
   6428 	case ETHERTYPE_IPV6:
   6429 		offset = ETHER_HDR_LEN;
   6430 		break;
   6431 
   6432 	case ETHERTYPE_VLAN:
   6433 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6434 		break;
   6435 
   6436 	default:
   6437 		/*
   6438 		 * Don't support this protocol or encapsulation.
   6439 		 */
   6440 		*fieldsp = 0;
   6441 		*cmdp = 0;
   6442 		return 0;
   6443 	}
   6444 
   6445 	if ((m0->m_pkthdr.csum_flags &
   6446 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   6447 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6448 	} else {
   6449 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6450 	}
   6451 	ipcse = offset + iphl - 1;
   6452 
   6453 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6454 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6455 	seg = 0;
   6456 	fields = 0;
   6457 
   6458 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6459 		int hlen = offset + iphl;
   6460 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6461 
   6462 		if (__predict_false(m0->m_len <
   6463 				    (hlen + sizeof(struct tcphdr)))) {
   6464 			/*
   6465 			 * TCP/IP headers are not in the first mbuf; we need
   6466 			 * to do this the slow and painful way.  Let's just
   6467 			 * hope this doesn't happen very often.
   6468 			 */
   6469 			struct tcphdr th;
   6470 
   6471 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6472 
   6473 			m_copydata(m0, hlen, sizeof(th), &th);
   6474 			if (v4) {
   6475 				struct ip ip;
   6476 
   6477 				m_copydata(m0, offset, sizeof(ip), &ip);
   6478 				ip.ip_len = 0;
   6479 				m_copyback(m0,
   6480 				    offset + offsetof(struct ip, ip_len),
   6481 				    sizeof(ip.ip_len), &ip.ip_len);
   6482 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6483 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6484 			} else {
   6485 				struct ip6_hdr ip6;
   6486 
   6487 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6488 				ip6.ip6_plen = 0;
   6489 				m_copyback(m0,
   6490 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6491 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6492 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6493 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6494 			}
   6495 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6496 			    sizeof(th.th_sum), &th.th_sum);
   6497 
   6498 			hlen += th.th_off << 2;
   6499 		} else {
   6500 			/*
   6501 			 * TCP/IP headers are in the first mbuf; we can do
   6502 			 * this the easy way.
   6503 			 */
   6504 			struct tcphdr *th;
   6505 
   6506 			if (v4) {
   6507 				struct ip *ip =
   6508 				    (void *)(mtod(m0, char *) + offset);
   6509 				th = (void *)(mtod(m0, char *) + hlen);
   6510 
   6511 				ip->ip_len = 0;
   6512 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6513 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6514 			} else {
   6515 				struct ip6_hdr *ip6 =
   6516 				    (void *)(mtod(m0, char *) + offset);
   6517 				th = (void *)(mtod(m0, char *) + hlen);
   6518 
   6519 				ip6->ip6_plen = 0;
   6520 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6521 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6522 			}
   6523 			hlen += th->th_off << 2;
   6524 		}
   6525 
   6526 		if (v4) {
   6527 			WM_Q_EVCNT_INCR(txq, txtso);
   6528 			cmdlen |= WTX_TCPIP_CMD_IP;
   6529 		} else {
   6530 			WM_Q_EVCNT_INCR(txq, txtso6);
   6531 			ipcse = 0;
   6532 		}
   6533 		cmd |= WTX_TCPIP_CMD_TSE;
   6534 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6535 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6536 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6537 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6538 	}
   6539 
   6540 	/*
   6541 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6542 	 * offload feature, if we load the context descriptor, we
   6543 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6544 	 */
   6545 
   6546 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6547 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6548 	    WTX_TCPIP_IPCSE(ipcse);
   6549 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6550 		WM_Q_EVCNT_INCR(txq, txipsum);
   6551 		fields |= WTX_IXSM;
   6552 	}
   6553 
   6554 	offset += iphl;
   6555 
   6556 	if (m0->m_pkthdr.csum_flags &
   6557 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6558 		WM_Q_EVCNT_INCR(txq, txtusum);
   6559 		fields |= WTX_TXSM;
   6560 		tucs = WTX_TCPIP_TUCSS(offset) |
   6561 		    WTX_TCPIP_TUCSO(offset +
   6562 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6563 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6564 	} else if ((m0->m_pkthdr.csum_flags &
   6565 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6566 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6567 		fields |= WTX_TXSM;
   6568 		tucs = WTX_TCPIP_TUCSS(offset) |
   6569 		    WTX_TCPIP_TUCSO(offset +
   6570 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6571 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6572 	} else {
   6573 		/* Just initialize it to a valid TCP context. */
   6574 		tucs = WTX_TCPIP_TUCSS(offset) |
   6575 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6576 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6577 	}
   6578 
   6579 	/* Fill in the context descriptor. */
   6580 	t = (struct livengood_tcpip_ctxdesc *)
   6581 	    &txq->txq_descs[txq->txq_next];
   6582 	t->tcpip_ipcs = htole32(ipcs);
   6583 	t->tcpip_tucs = htole32(tucs);
   6584 	t->tcpip_cmdlen = htole32(cmdlen);
   6585 	t->tcpip_seg = htole32(seg);
   6586 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6587 
   6588 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6589 	txs->txs_ndesc++;
   6590 
   6591 	*cmdp = cmd;
   6592 	*fieldsp = fields;
   6593 
   6594 	return 0;
   6595 }
   6596 
   6597 static inline int
   6598 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6599 {
   6600 	struct wm_softc *sc = ifp->if_softc;
   6601 	u_int cpuid = cpu_index(curcpu());
   6602 
   6603 	/*
   6604 	 * Currently, simple distribute strategy.
   6605 	 * TODO:
   6606 	 * distribute by flowid(RSS has value).
   6607 	 */
   6608         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6609 }
   6610 
   6611 /*
   6612  * wm_start:		[ifnet interface function]
   6613  *
   6614  *	Start packet transmission on the interface.
   6615  */
   6616 static void
   6617 wm_start(struct ifnet *ifp)
   6618 {
   6619 	struct wm_softc *sc = ifp->if_softc;
   6620 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6621 
   6622 #ifdef WM_MPSAFE
   6623 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6624 #endif
   6625 	/*
   6626 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6627 	 */
   6628 
   6629 	mutex_enter(txq->txq_lock);
   6630 	if (!txq->txq_stopping)
   6631 		wm_start_locked(ifp);
   6632 	mutex_exit(txq->txq_lock);
   6633 }
   6634 
   6635 static void
   6636 wm_start_locked(struct ifnet *ifp)
   6637 {
   6638 	struct wm_softc *sc = ifp->if_softc;
   6639 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6640 
   6641 	wm_send_common_locked(ifp, txq, false);
   6642 }
   6643 
   6644 static int
   6645 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6646 {
   6647 	int qid;
   6648 	struct wm_softc *sc = ifp->if_softc;
   6649 	struct wm_txqueue *txq;
   6650 
   6651 	qid = wm_select_txqueue(ifp, m);
   6652 	txq = &sc->sc_queue[qid].wmq_txq;
   6653 
   6654 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6655 		m_freem(m);
   6656 		WM_Q_EVCNT_INCR(txq, txdrop);
   6657 		return ENOBUFS;
   6658 	}
   6659 
   6660 	/*
   6661 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6662 	 */
   6663 	ifp->if_obytes += m->m_pkthdr.len;
   6664 	if (m->m_flags & M_MCAST)
   6665 		ifp->if_omcasts++;
   6666 
   6667 	if (mutex_tryenter(txq->txq_lock)) {
   6668 		if (!txq->txq_stopping)
   6669 			wm_transmit_locked(ifp, txq);
   6670 		mutex_exit(txq->txq_lock);
   6671 	}
   6672 
   6673 	return 0;
   6674 }
   6675 
   6676 static void
   6677 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6678 {
   6679 
   6680 	wm_send_common_locked(ifp, txq, true);
   6681 }
   6682 
   6683 static void
   6684 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6685     bool is_transmit)
   6686 {
   6687 	struct wm_softc *sc = ifp->if_softc;
   6688 	struct mbuf *m0;
   6689 	struct m_tag *mtag;
   6690 	struct wm_txsoft *txs;
   6691 	bus_dmamap_t dmamap;
   6692 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6693 	bus_addr_t curaddr;
   6694 	bus_size_t seglen, curlen;
   6695 	uint32_t cksumcmd;
   6696 	uint8_t cksumfields;
   6697 
   6698 	KASSERT(mutex_owned(txq->txq_lock));
   6699 
   6700 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   6701 		return;
   6702 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   6703 		return;
   6704 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6705 		return;
   6706 
   6707 	/* Remember the previous number of free descriptors. */
   6708 	ofree = txq->txq_free;
   6709 
   6710 	/*
   6711 	 * Loop through the send queue, setting up transmit descriptors
   6712 	 * until we drain the queue, or use up all available transmit
   6713 	 * descriptors.
   6714 	 */
   6715 	for (;;) {
   6716 		m0 = NULL;
   6717 
   6718 		/* Get a work queue entry. */
   6719 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6720 			wm_txeof(sc, txq);
   6721 			if (txq->txq_sfree == 0) {
   6722 				DPRINTF(WM_DEBUG_TX,
   6723 				    ("%s: TX: no free job descriptors\n",
   6724 					device_xname(sc->sc_dev)));
   6725 				WM_Q_EVCNT_INCR(txq, txsstall);
   6726 				break;
   6727 			}
   6728 		}
   6729 
   6730 		/* Grab a packet off the queue. */
   6731 		if (is_transmit)
   6732 			m0 = pcq_get(txq->txq_interq);
   6733 		else
   6734 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6735 		if (m0 == NULL)
   6736 			break;
   6737 
   6738 		DPRINTF(WM_DEBUG_TX,
   6739 		    ("%s: TX: have packet to transmit: %p\n",
   6740 		    device_xname(sc->sc_dev), m0));
   6741 
   6742 		txs = &txq->txq_soft[txq->txq_snext];
   6743 		dmamap = txs->txs_dmamap;
   6744 
   6745 		use_tso = (m0->m_pkthdr.csum_flags &
   6746 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6747 
   6748 		/*
   6749 		 * So says the Linux driver:
   6750 		 * The controller does a simple calculation to make sure
   6751 		 * there is enough room in the FIFO before initiating the
   6752 		 * DMA for each buffer.  The calc is:
   6753 		 *	4 = ceil(buffer len / MSS)
   6754 		 * To make sure we don't overrun the FIFO, adjust the max
   6755 		 * buffer len if the MSS drops.
   6756 		 */
   6757 		dmamap->dm_maxsegsz =
   6758 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6759 		    ? m0->m_pkthdr.segsz << 2
   6760 		    : WTX_MAX_LEN;
   6761 
   6762 		/*
   6763 		 * Load the DMA map.  If this fails, the packet either
   6764 		 * didn't fit in the allotted number of segments, or we
   6765 		 * were short on resources.  For the too-many-segments
   6766 		 * case, we simply report an error and drop the packet,
   6767 		 * since we can't sanely copy a jumbo packet to a single
   6768 		 * buffer.
   6769 		 */
   6770 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6771 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6772 		if (error) {
   6773 			if (error == EFBIG) {
   6774 				WM_Q_EVCNT_INCR(txq, txdrop);
   6775 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6776 				    "DMA segments, dropping...\n",
   6777 				    device_xname(sc->sc_dev));
   6778 				wm_dump_mbuf_chain(sc, m0);
   6779 				m_freem(m0);
   6780 				continue;
   6781 			}
   6782 			/*  Short on resources, just stop for now. */
   6783 			DPRINTF(WM_DEBUG_TX,
   6784 			    ("%s: TX: dmamap load failed: %d\n",
   6785 			    device_xname(sc->sc_dev), error));
   6786 			break;
   6787 		}
   6788 
   6789 		segs_needed = dmamap->dm_nsegs;
   6790 		if (use_tso) {
   6791 			/* For sentinel descriptor; see below. */
   6792 			segs_needed++;
   6793 		}
   6794 
   6795 		/*
   6796 		 * Ensure we have enough descriptors free to describe
   6797 		 * the packet.  Note, we always reserve one descriptor
   6798 		 * at the end of the ring due to the semantics of the
   6799 		 * TDT register, plus one more in the event we need
   6800 		 * to load offload context.
   6801 		 */
   6802 		if (segs_needed > txq->txq_free - 2) {
   6803 			/*
   6804 			 * Not enough free descriptors to transmit this
   6805 			 * packet.  We haven't committed anything yet,
   6806 			 * so just unload the DMA map, put the packet
   6807 			 * pack on the queue, and punt.  Notify the upper
   6808 			 * layer that there are no more slots left.
   6809 			 */
   6810 			DPRINTF(WM_DEBUG_TX,
   6811 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6812 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6813 			    segs_needed, txq->txq_free - 1));
   6814 			if (!is_transmit)
   6815 				ifp->if_flags |= IFF_OACTIVE;
   6816 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6817 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6818 			WM_Q_EVCNT_INCR(txq, txdstall);
   6819 			break;
   6820 		}
   6821 
   6822 		/*
   6823 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6824 		 * once we know we can transmit the packet, since we
   6825 		 * do some internal FIFO space accounting here.
   6826 		 */
   6827 		if (sc->sc_type == WM_T_82547 &&
   6828 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6829 			DPRINTF(WM_DEBUG_TX,
   6830 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6831 			    device_xname(sc->sc_dev)));
   6832 			if (!is_transmit)
   6833 				ifp->if_flags |= IFF_OACTIVE;
   6834 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6835 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6836 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6837 			break;
   6838 		}
   6839 
   6840 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6841 
   6842 		DPRINTF(WM_DEBUG_TX,
   6843 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6844 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6845 
   6846 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6847 
   6848 		/*
   6849 		 * Store a pointer to the packet so that we can free it
   6850 		 * later.
   6851 		 *
   6852 		 * Initially, we consider the number of descriptors the
   6853 		 * packet uses the number of DMA segments.  This may be
   6854 		 * incremented by 1 if we do checksum offload (a descriptor
   6855 		 * is used to set the checksum context).
   6856 		 */
   6857 		txs->txs_mbuf = m0;
   6858 		txs->txs_firstdesc = txq->txq_next;
   6859 		txs->txs_ndesc = segs_needed;
   6860 
   6861 		/* Set up offload parameters for this packet. */
   6862 		if (m0->m_pkthdr.csum_flags &
   6863 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6864 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6865 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6866 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6867 					  &cksumfields) != 0) {
   6868 				/* Error message already displayed. */
   6869 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6870 				continue;
   6871 			}
   6872 		} else {
   6873 			cksumcmd = 0;
   6874 			cksumfields = 0;
   6875 		}
   6876 
   6877 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6878 
   6879 		/* Sync the DMA map. */
   6880 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6881 		    BUS_DMASYNC_PREWRITE);
   6882 
   6883 		/* Initialize the transmit descriptor. */
   6884 		for (nexttx = txq->txq_next, seg = 0;
   6885 		     seg < dmamap->dm_nsegs; seg++) {
   6886 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6887 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6888 			     seglen != 0;
   6889 			     curaddr += curlen, seglen -= curlen,
   6890 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6891 				curlen = seglen;
   6892 
   6893 				/*
   6894 				 * So says the Linux driver:
   6895 				 * Work around for premature descriptor
   6896 				 * write-backs in TSO mode.  Append a
   6897 				 * 4-byte sentinel descriptor.
   6898 				 */
   6899 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6900 				    curlen > 8)
   6901 					curlen -= 4;
   6902 
   6903 				wm_set_dma_addr(
   6904 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6905 				txq->txq_descs[nexttx].wtx_cmdlen
   6906 				    = htole32(cksumcmd | curlen);
   6907 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6908 				    = 0;
   6909 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6910 				    = cksumfields;
   6911 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6912 				lasttx = nexttx;
   6913 
   6914 				DPRINTF(WM_DEBUG_TX,
   6915 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6916 				     "len %#04zx\n",
   6917 				    device_xname(sc->sc_dev), nexttx,
   6918 				    (uint64_t)curaddr, curlen));
   6919 			}
   6920 		}
   6921 
   6922 		KASSERT(lasttx != -1);
   6923 
   6924 		/*
   6925 		 * Set up the command byte on the last descriptor of
   6926 		 * the packet.  If we're in the interrupt delay window,
   6927 		 * delay the interrupt.
   6928 		 */
   6929 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6930 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6931 
   6932 		/*
   6933 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6934 		 * up the descriptor to encapsulate the packet for us.
   6935 		 *
   6936 		 * This is only valid on the last descriptor of the packet.
   6937 		 */
   6938 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6939 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6940 			    htole32(WTX_CMD_VLE);
   6941 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6942 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6943 		}
   6944 
   6945 		txs->txs_lastdesc = lasttx;
   6946 
   6947 		DPRINTF(WM_DEBUG_TX,
   6948 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6949 		    device_xname(sc->sc_dev),
   6950 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6951 
   6952 		/* Sync the descriptors we're using. */
   6953 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6954 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6955 
   6956 		/* Give the packet to the chip. */
   6957 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6958 
   6959 		DPRINTF(WM_DEBUG_TX,
   6960 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6961 
   6962 		DPRINTF(WM_DEBUG_TX,
   6963 		    ("%s: TX: finished transmitting packet, job %d\n",
   6964 		    device_xname(sc->sc_dev), txq->txq_snext));
   6965 
   6966 		/* Advance the tx pointer. */
   6967 		txq->txq_free -= txs->txs_ndesc;
   6968 		txq->txq_next = nexttx;
   6969 
   6970 		txq->txq_sfree--;
   6971 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6972 
   6973 		/* Pass the packet to any BPF listeners. */
   6974 		bpf_mtap(ifp, m0);
   6975 	}
   6976 
   6977 	if (m0 != NULL) {
   6978 		if (!is_transmit)
   6979 			ifp->if_flags |= IFF_OACTIVE;
   6980 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   6981 		WM_Q_EVCNT_INCR(txq, txdrop);
   6982 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6983 			__func__));
   6984 		m_freem(m0);
   6985 	}
   6986 
   6987 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6988 		/* No more slots; notify upper layer. */
   6989 		if (!is_transmit)
   6990 			ifp->if_flags |= IFF_OACTIVE;
   6991 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   6992 	}
   6993 
   6994 	if (txq->txq_free != ofree) {
   6995 		/* Set a watchdog timer in case the chip flakes out. */
   6996 		ifp->if_timer = 5;
   6997 	}
   6998 }
   6999 
   7000 /*
   7001  * wm_nq_tx_offload:
   7002  *
   7003  *	Set up TCP/IP checksumming parameters for the
   7004  *	specified packet, for NEWQUEUE devices
   7005  */
   7006 static int
   7007 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7008     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7009 {
   7010 	struct mbuf *m0 = txs->txs_mbuf;
   7011 	struct m_tag *mtag;
   7012 	uint32_t vl_len, mssidx, cmdc;
   7013 	struct ether_header *eh;
   7014 	int offset, iphl;
   7015 
   7016 	/*
   7017 	 * XXX It would be nice if the mbuf pkthdr had offset
   7018 	 * fields for the protocol headers.
   7019 	 */
   7020 	*cmdlenp = 0;
   7021 	*fieldsp = 0;
   7022 
   7023 	eh = mtod(m0, struct ether_header *);
   7024 	switch (htons(eh->ether_type)) {
   7025 	case ETHERTYPE_IP:
   7026 	case ETHERTYPE_IPV6:
   7027 		offset = ETHER_HDR_LEN;
   7028 		break;
   7029 
   7030 	case ETHERTYPE_VLAN:
   7031 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7032 		break;
   7033 
   7034 	default:
   7035 		/* Don't support this protocol or encapsulation. */
   7036 		*do_csum = false;
   7037 		return 0;
   7038 	}
   7039 	*do_csum = true;
   7040 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7041 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7042 
   7043 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7044 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7045 
   7046 	if ((m0->m_pkthdr.csum_flags &
   7047 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7048 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7049 	} else {
   7050 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7051 	}
   7052 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7053 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7054 
   7055 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7056 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7057 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7058 		*cmdlenp |= NQTX_CMD_VLE;
   7059 	}
   7060 
   7061 	mssidx = 0;
   7062 
   7063 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7064 		int hlen = offset + iphl;
   7065 		int tcp_hlen;
   7066 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7067 
   7068 		if (__predict_false(m0->m_len <
   7069 				    (hlen + sizeof(struct tcphdr)))) {
   7070 			/*
   7071 			 * TCP/IP headers are not in the first mbuf; we need
   7072 			 * to do this the slow and painful way.  Let's just
   7073 			 * hope this doesn't happen very often.
   7074 			 */
   7075 			struct tcphdr th;
   7076 
   7077 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7078 
   7079 			m_copydata(m0, hlen, sizeof(th), &th);
   7080 			if (v4) {
   7081 				struct ip ip;
   7082 
   7083 				m_copydata(m0, offset, sizeof(ip), &ip);
   7084 				ip.ip_len = 0;
   7085 				m_copyback(m0,
   7086 				    offset + offsetof(struct ip, ip_len),
   7087 				    sizeof(ip.ip_len), &ip.ip_len);
   7088 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7089 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7090 			} else {
   7091 				struct ip6_hdr ip6;
   7092 
   7093 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7094 				ip6.ip6_plen = 0;
   7095 				m_copyback(m0,
   7096 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7097 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7098 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7099 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7100 			}
   7101 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7102 			    sizeof(th.th_sum), &th.th_sum);
   7103 
   7104 			tcp_hlen = th.th_off << 2;
   7105 		} else {
   7106 			/*
   7107 			 * TCP/IP headers are in the first mbuf; we can do
   7108 			 * this the easy way.
   7109 			 */
   7110 			struct tcphdr *th;
   7111 
   7112 			if (v4) {
   7113 				struct ip *ip =
   7114 				    (void *)(mtod(m0, char *) + offset);
   7115 				th = (void *)(mtod(m0, char *) + hlen);
   7116 
   7117 				ip->ip_len = 0;
   7118 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7119 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7120 			} else {
   7121 				struct ip6_hdr *ip6 =
   7122 				    (void *)(mtod(m0, char *) + offset);
   7123 				th = (void *)(mtod(m0, char *) + hlen);
   7124 
   7125 				ip6->ip6_plen = 0;
   7126 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7127 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7128 			}
   7129 			tcp_hlen = th->th_off << 2;
   7130 		}
   7131 		hlen += tcp_hlen;
   7132 		*cmdlenp |= NQTX_CMD_TSE;
   7133 
   7134 		if (v4) {
   7135 			WM_Q_EVCNT_INCR(txq, txtso);
   7136 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7137 		} else {
   7138 			WM_Q_EVCNT_INCR(txq, txtso6);
   7139 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7140 		}
   7141 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7142 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7143 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7144 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7145 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7146 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7147 	} else {
   7148 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7149 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7150 	}
   7151 
   7152 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7153 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7154 		cmdc |= NQTXC_CMD_IP4;
   7155 	}
   7156 
   7157 	if (m0->m_pkthdr.csum_flags &
   7158 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7159 		WM_Q_EVCNT_INCR(txq, txtusum);
   7160 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7161 			cmdc |= NQTXC_CMD_TCP;
   7162 		} else {
   7163 			cmdc |= NQTXC_CMD_UDP;
   7164 		}
   7165 		cmdc |= NQTXC_CMD_IP4;
   7166 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7167 	}
   7168 	if (m0->m_pkthdr.csum_flags &
   7169 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7170 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7171 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7172 			cmdc |= NQTXC_CMD_TCP;
   7173 		} else {
   7174 			cmdc |= NQTXC_CMD_UDP;
   7175 		}
   7176 		cmdc |= NQTXC_CMD_IP6;
   7177 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7178 	}
   7179 
   7180 	/* Fill in the context descriptor. */
   7181 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7182 	    htole32(vl_len);
   7183 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7184 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7185 	    htole32(cmdc);
   7186 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7187 	    htole32(mssidx);
   7188 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7189 	DPRINTF(WM_DEBUG_TX,
   7190 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7191 	    txq->txq_next, 0, vl_len));
   7192 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7193 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7194 	txs->txs_ndesc++;
   7195 	return 0;
   7196 }
   7197 
   7198 /*
   7199  * wm_nq_start:		[ifnet interface function]
   7200  *
   7201  *	Start packet transmission on the interface for NEWQUEUE devices
   7202  */
   7203 static void
   7204 wm_nq_start(struct ifnet *ifp)
   7205 {
   7206 	struct wm_softc *sc = ifp->if_softc;
   7207 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7208 
   7209 #ifdef WM_MPSAFE
   7210 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7211 #endif
   7212 	/*
   7213 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7214 	 */
   7215 
   7216 	mutex_enter(txq->txq_lock);
   7217 	if (!txq->txq_stopping)
   7218 		wm_nq_start_locked(ifp);
   7219 	mutex_exit(txq->txq_lock);
   7220 }
   7221 
   7222 static void
   7223 wm_nq_start_locked(struct ifnet *ifp)
   7224 {
   7225 	struct wm_softc *sc = ifp->if_softc;
   7226 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7227 
   7228 	wm_nq_send_common_locked(ifp, txq, false);
   7229 }
   7230 
   7231 static int
   7232 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7233 {
   7234 	int qid;
   7235 	struct wm_softc *sc = ifp->if_softc;
   7236 	struct wm_txqueue *txq;
   7237 
   7238 	qid = wm_select_txqueue(ifp, m);
   7239 	txq = &sc->sc_queue[qid].wmq_txq;
   7240 
   7241 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7242 		m_freem(m);
   7243 		WM_Q_EVCNT_INCR(txq, txdrop);
   7244 		return ENOBUFS;
   7245 	}
   7246 
   7247 	/*
   7248 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7249 	 */
   7250 	ifp->if_obytes += m->m_pkthdr.len;
   7251 	if (m->m_flags & M_MCAST)
   7252 		ifp->if_omcasts++;
   7253 
   7254 	/*
   7255 	 * The situations which this mutex_tryenter() fails at running time
   7256 	 * are below two patterns.
   7257 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7258 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7259 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7260 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7261 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7262 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7263 	 */
   7264 	if (mutex_tryenter(txq->txq_lock)) {
   7265 		if (!txq->txq_stopping)
   7266 			wm_nq_transmit_locked(ifp, txq);
   7267 		mutex_exit(txq->txq_lock);
   7268 	}
   7269 
   7270 	return 0;
   7271 }
   7272 
   7273 static void
   7274 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7275 {
   7276 
   7277 	wm_nq_send_common_locked(ifp, txq, true);
   7278 }
   7279 
   7280 static void
   7281 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7282     bool is_transmit)
   7283 {
   7284 	struct wm_softc *sc = ifp->if_softc;
   7285 	struct mbuf *m0;
   7286 	struct m_tag *mtag;
   7287 	struct wm_txsoft *txs;
   7288 	bus_dmamap_t dmamap;
   7289 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7290 	bool do_csum, sent;
   7291 
   7292 	KASSERT(mutex_owned(txq->txq_lock));
   7293 
   7294 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7295 		return;
   7296 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7297 		return;
   7298 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7299 		return;
   7300 
   7301 	sent = false;
   7302 
   7303 	/*
   7304 	 * Loop through the send queue, setting up transmit descriptors
   7305 	 * until we drain the queue, or use up all available transmit
   7306 	 * descriptors.
   7307 	 */
   7308 	for (;;) {
   7309 		m0 = NULL;
   7310 
   7311 		/* Get a work queue entry. */
   7312 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7313 			wm_txeof(sc, txq);
   7314 			if (txq->txq_sfree == 0) {
   7315 				DPRINTF(WM_DEBUG_TX,
   7316 				    ("%s: TX: no free job descriptors\n",
   7317 					device_xname(sc->sc_dev)));
   7318 				WM_Q_EVCNT_INCR(txq, txsstall);
   7319 				break;
   7320 			}
   7321 		}
   7322 
   7323 		/* Grab a packet off the queue. */
   7324 		if (is_transmit)
   7325 			m0 = pcq_get(txq->txq_interq);
   7326 		else
   7327 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7328 		if (m0 == NULL)
   7329 			break;
   7330 
   7331 		DPRINTF(WM_DEBUG_TX,
   7332 		    ("%s: TX: have packet to transmit: %p\n",
   7333 		    device_xname(sc->sc_dev), m0));
   7334 
   7335 		txs = &txq->txq_soft[txq->txq_snext];
   7336 		dmamap = txs->txs_dmamap;
   7337 
   7338 		/*
   7339 		 * Load the DMA map.  If this fails, the packet either
   7340 		 * didn't fit in the allotted number of segments, or we
   7341 		 * were short on resources.  For the too-many-segments
   7342 		 * case, we simply report an error and drop the packet,
   7343 		 * since we can't sanely copy a jumbo packet to a single
   7344 		 * buffer.
   7345 		 */
   7346 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7347 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7348 		if (error) {
   7349 			if (error == EFBIG) {
   7350 				WM_Q_EVCNT_INCR(txq, txdrop);
   7351 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7352 				    "DMA segments, dropping...\n",
   7353 				    device_xname(sc->sc_dev));
   7354 				wm_dump_mbuf_chain(sc, m0);
   7355 				m_freem(m0);
   7356 				continue;
   7357 			}
   7358 			/* Short on resources, just stop for now. */
   7359 			DPRINTF(WM_DEBUG_TX,
   7360 			    ("%s: TX: dmamap load failed: %d\n",
   7361 			    device_xname(sc->sc_dev), error));
   7362 			break;
   7363 		}
   7364 
   7365 		segs_needed = dmamap->dm_nsegs;
   7366 
   7367 		/*
   7368 		 * Ensure we have enough descriptors free to describe
   7369 		 * the packet.  Note, we always reserve one descriptor
   7370 		 * at the end of the ring due to the semantics of the
   7371 		 * TDT register, plus one more in the event we need
   7372 		 * to load offload context.
   7373 		 */
   7374 		if (segs_needed > txq->txq_free - 2) {
   7375 			/*
   7376 			 * Not enough free descriptors to transmit this
   7377 			 * packet.  We haven't committed anything yet,
   7378 			 * so just unload the DMA map, put the packet
   7379 			 * pack on the queue, and punt.  Notify the upper
   7380 			 * layer that there are no more slots left.
   7381 			 */
   7382 			DPRINTF(WM_DEBUG_TX,
   7383 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7384 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7385 			    segs_needed, txq->txq_free - 1));
   7386 			if (!is_transmit)
   7387 				ifp->if_flags |= IFF_OACTIVE;
   7388 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7389 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7390 			WM_Q_EVCNT_INCR(txq, txdstall);
   7391 			break;
   7392 		}
   7393 
   7394 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7395 
   7396 		DPRINTF(WM_DEBUG_TX,
   7397 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7398 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7399 
   7400 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7401 
   7402 		/*
   7403 		 * Store a pointer to the packet so that we can free it
   7404 		 * later.
   7405 		 *
   7406 		 * Initially, we consider the number of descriptors the
   7407 		 * packet uses the number of DMA segments.  This may be
   7408 		 * incremented by 1 if we do checksum offload (a descriptor
   7409 		 * is used to set the checksum context).
   7410 		 */
   7411 		txs->txs_mbuf = m0;
   7412 		txs->txs_firstdesc = txq->txq_next;
   7413 		txs->txs_ndesc = segs_needed;
   7414 
   7415 		/* Set up offload parameters for this packet. */
   7416 		uint32_t cmdlen, fields, dcmdlen;
   7417 		if (m0->m_pkthdr.csum_flags &
   7418 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7419 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7420 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7421 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7422 			    &do_csum) != 0) {
   7423 				/* Error message already displayed. */
   7424 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7425 				continue;
   7426 			}
   7427 		} else {
   7428 			do_csum = false;
   7429 			cmdlen = 0;
   7430 			fields = 0;
   7431 		}
   7432 
   7433 		/* Sync the DMA map. */
   7434 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7435 		    BUS_DMASYNC_PREWRITE);
   7436 
   7437 		/* Initialize the first transmit descriptor. */
   7438 		nexttx = txq->txq_next;
   7439 		if (!do_csum) {
   7440 			/* setup a legacy descriptor */
   7441 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7442 			    dmamap->dm_segs[0].ds_addr);
   7443 			txq->txq_descs[nexttx].wtx_cmdlen =
   7444 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7445 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7446 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7447 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7448 			    NULL) {
   7449 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7450 				    htole32(WTX_CMD_VLE);
   7451 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7452 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7453 			} else {
   7454 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7455 			}
   7456 			dcmdlen = 0;
   7457 		} else {
   7458 			/* setup an advanced data descriptor */
   7459 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7460 			    htole64(dmamap->dm_segs[0].ds_addr);
   7461 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7462 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7463 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7464 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7465 			    htole32(fields);
   7466 			DPRINTF(WM_DEBUG_TX,
   7467 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7468 			    device_xname(sc->sc_dev), nexttx,
   7469 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7470 			DPRINTF(WM_DEBUG_TX,
   7471 			    ("\t 0x%08x%08x\n", fields,
   7472 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7473 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7474 		}
   7475 
   7476 		lasttx = nexttx;
   7477 		nexttx = WM_NEXTTX(txq, nexttx);
   7478 		/*
   7479 		 * fill in the next descriptors. legacy or adcanced format
   7480 		 * is the same here
   7481 		 */
   7482 		for (seg = 1; seg < dmamap->dm_nsegs;
   7483 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7484 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7485 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7486 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7487 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7488 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7489 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7490 			lasttx = nexttx;
   7491 
   7492 			DPRINTF(WM_DEBUG_TX,
   7493 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7494 			     "len %#04zx\n",
   7495 			    device_xname(sc->sc_dev), nexttx,
   7496 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7497 			    dmamap->dm_segs[seg].ds_len));
   7498 		}
   7499 
   7500 		KASSERT(lasttx != -1);
   7501 
   7502 		/*
   7503 		 * Set up the command byte on the last descriptor of
   7504 		 * the packet.  If we're in the interrupt delay window,
   7505 		 * delay the interrupt.
   7506 		 */
   7507 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7508 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7509 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7510 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7511 
   7512 		txs->txs_lastdesc = lasttx;
   7513 
   7514 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7515 		    device_xname(sc->sc_dev),
   7516 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7517 
   7518 		/* Sync the descriptors we're using. */
   7519 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7520 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7521 
   7522 		/* Give the packet to the chip. */
   7523 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7524 		sent = true;
   7525 
   7526 		DPRINTF(WM_DEBUG_TX,
   7527 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7528 
   7529 		DPRINTF(WM_DEBUG_TX,
   7530 		    ("%s: TX: finished transmitting packet, job %d\n",
   7531 		    device_xname(sc->sc_dev), txq->txq_snext));
   7532 
   7533 		/* Advance the tx pointer. */
   7534 		txq->txq_free -= txs->txs_ndesc;
   7535 		txq->txq_next = nexttx;
   7536 
   7537 		txq->txq_sfree--;
   7538 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7539 
   7540 		/* Pass the packet to any BPF listeners. */
   7541 		bpf_mtap(ifp, m0);
   7542 	}
   7543 
   7544 	if (m0 != NULL) {
   7545 		if (!is_transmit)
   7546 			ifp->if_flags |= IFF_OACTIVE;
   7547 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7548 		WM_Q_EVCNT_INCR(txq, txdrop);
   7549 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7550 			__func__));
   7551 		m_freem(m0);
   7552 	}
   7553 
   7554 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7555 		/* No more slots; notify upper layer. */
   7556 		if (!is_transmit)
   7557 			ifp->if_flags |= IFF_OACTIVE;
   7558 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7559 	}
   7560 
   7561 	if (sent) {
   7562 		/* Set a watchdog timer in case the chip flakes out. */
   7563 		ifp->if_timer = 5;
   7564 	}
   7565 }
   7566 
   7567 static void
   7568 wm_deferred_start_locked(struct wm_txqueue *txq)
   7569 {
   7570 	struct wm_softc *sc = txq->txq_sc;
   7571 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7572 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7573 	int qid = wmq->wmq_id;
   7574 
   7575 	KASSERT(mutex_owned(txq->txq_lock));
   7576 
   7577 	if (txq->txq_stopping) {
   7578 		mutex_exit(txq->txq_lock);
   7579 		return;
   7580 	}
   7581 
   7582 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7583 		/* XXX need for ALTQ */
   7584 		if (qid == 0)
   7585 			wm_nq_start_locked(ifp);
   7586 		wm_nq_transmit_locked(ifp, txq);
   7587 	} else {
   7588 		/* XXX need for ALTQ */
   7589 		if (qid == 0)
   7590 			wm_start_locked(ifp);
   7591 		wm_transmit_locked(ifp, txq);
   7592 	}
   7593 }
   7594 
   7595 /* Interrupt */
   7596 
   7597 /*
   7598  * wm_txeof:
   7599  *
   7600  *	Helper; handle transmit interrupts.
   7601  */
   7602 static int
   7603 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7604 {
   7605 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7606 	struct wm_txsoft *txs;
   7607 	bool processed = false;
   7608 	int count = 0;
   7609 	int i;
   7610 	uint8_t status;
   7611 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7612 
   7613 	KASSERT(mutex_owned(txq->txq_lock));
   7614 
   7615 	if (txq->txq_stopping)
   7616 		return 0;
   7617 
   7618 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7619 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7620 	if (wmq->wmq_id == 0)
   7621 		ifp->if_flags &= ~IFF_OACTIVE;
   7622 
   7623 	/*
   7624 	 * Go through the Tx list and free mbufs for those
   7625 	 * frames which have been transmitted.
   7626 	 */
   7627 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7628 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7629 		txs = &txq->txq_soft[i];
   7630 
   7631 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7632 			device_xname(sc->sc_dev), i));
   7633 
   7634 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7635 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7636 
   7637 		status =
   7638 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7639 		if ((status & WTX_ST_DD) == 0) {
   7640 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7641 			    BUS_DMASYNC_PREREAD);
   7642 			break;
   7643 		}
   7644 
   7645 		processed = true;
   7646 		count++;
   7647 		DPRINTF(WM_DEBUG_TX,
   7648 		    ("%s: TX: job %d done: descs %d..%d\n",
   7649 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7650 		    txs->txs_lastdesc));
   7651 
   7652 		/*
   7653 		 * XXX We should probably be using the statistics
   7654 		 * XXX registers, but I don't know if they exist
   7655 		 * XXX on chips before the i82544.
   7656 		 */
   7657 
   7658 #ifdef WM_EVENT_COUNTERS
   7659 		if (status & WTX_ST_TU)
   7660 			WM_Q_EVCNT_INCR(txq, tu);
   7661 #endif /* WM_EVENT_COUNTERS */
   7662 
   7663 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7664 			ifp->if_oerrors++;
   7665 			if (status & WTX_ST_LC)
   7666 				log(LOG_WARNING, "%s: late collision\n",
   7667 				    device_xname(sc->sc_dev));
   7668 			else if (status & WTX_ST_EC) {
   7669 				ifp->if_collisions += 16;
   7670 				log(LOG_WARNING, "%s: excessive collisions\n",
   7671 				    device_xname(sc->sc_dev));
   7672 			}
   7673 		} else
   7674 			ifp->if_opackets++;
   7675 
   7676 		txq->txq_packets++;
   7677 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   7678 
   7679 		txq->txq_free += txs->txs_ndesc;
   7680 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7681 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7682 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7683 		m_freem(txs->txs_mbuf);
   7684 		txs->txs_mbuf = NULL;
   7685 	}
   7686 
   7687 	/* Update the dirty transmit buffer pointer. */
   7688 	txq->txq_sdirty = i;
   7689 	DPRINTF(WM_DEBUG_TX,
   7690 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7691 
   7692 	if (count != 0)
   7693 		rnd_add_uint32(&sc->rnd_source, count);
   7694 
   7695 	/*
   7696 	 * If there are no more pending transmissions, cancel the watchdog
   7697 	 * timer.
   7698 	 */
   7699 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7700 		ifp->if_timer = 0;
   7701 
   7702 	return processed;
   7703 }
   7704 
   7705 static inline uint32_t
   7706 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   7707 {
   7708 	struct wm_softc *sc = rxq->rxq_sc;
   7709 
   7710 	if (sc->sc_type == WM_T_82574)
   7711 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7712 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7713 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7714 	else
   7715 		return rxq->rxq_descs[idx].wrx_status;
   7716 }
   7717 
   7718 static inline uint32_t
   7719 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   7720 {
   7721 	struct wm_softc *sc = rxq->rxq_sc;
   7722 
   7723 	if (sc->sc_type == WM_T_82574)
   7724 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7725 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7726 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7727 	else
   7728 		return rxq->rxq_descs[idx].wrx_errors;
   7729 }
   7730 
   7731 static inline uint16_t
   7732 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   7733 {
   7734 	struct wm_softc *sc = rxq->rxq_sc;
   7735 
   7736 	if (sc->sc_type == WM_T_82574)
   7737 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   7738 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7739 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   7740 	else
   7741 		return rxq->rxq_descs[idx].wrx_special;
   7742 }
   7743 
   7744 static inline int
   7745 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   7746 {
   7747 	struct wm_softc *sc = rxq->rxq_sc;
   7748 
   7749 	if (sc->sc_type == WM_T_82574)
   7750 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   7751 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7752 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   7753 	else
   7754 		return rxq->rxq_descs[idx].wrx_len;
   7755 }
   7756 
   7757 #ifdef WM_DEBUG
   7758 static inline uint32_t
   7759 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   7760 {
   7761 	struct wm_softc *sc = rxq->rxq_sc;
   7762 
   7763 	if (sc->sc_type == WM_T_82574)
   7764 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   7765 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7766 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   7767 	else
   7768 		return 0;
   7769 }
   7770 
   7771 static inline uint8_t
   7772 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   7773 {
   7774 	struct wm_softc *sc = rxq->rxq_sc;
   7775 
   7776 	if (sc->sc_type == WM_T_82574)
   7777 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   7778 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7779 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   7780 	else
   7781 		return 0;
   7782 }
   7783 #endif /* WM_DEBUG */
   7784 
   7785 static inline bool
   7786 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   7787     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7788 {
   7789 
   7790 	if (sc->sc_type == WM_T_82574)
   7791 		return (status & ext_bit) != 0;
   7792 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7793 		return (status & nq_bit) != 0;
   7794 	else
   7795 		return (status & legacy_bit) != 0;
   7796 }
   7797 
   7798 static inline bool
   7799 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   7800     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7801 {
   7802 
   7803 	if (sc->sc_type == WM_T_82574)
   7804 		return (error & ext_bit) != 0;
   7805 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7806 		return (error & nq_bit) != 0;
   7807 	else
   7808 		return (error & legacy_bit) != 0;
   7809 }
   7810 
   7811 static inline bool
   7812 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   7813 {
   7814 
   7815 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7816 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   7817 		return true;
   7818 	else
   7819 		return false;
   7820 }
   7821 
   7822 static inline bool
   7823 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   7824 {
   7825 	struct wm_softc *sc = rxq->rxq_sc;
   7826 
   7827 	/* XXXX missing error bit for newqueue? */
   7828 	if (wm_rxdesc_is_set_error(sc, errors,
   7829 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   7830 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   7831 		NQRXC_ERROR_RXE)) {
   7832 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   7833 			log(LOG_WARNING, "%s: symbol error\n",
   7834 			    device_xname(sc->sc_dev));
   7835 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   7836 			log(LOG_WARNING, "%s: receive sequence error\n",
   7837 			    device_xname(sc->sc_dev));
   7838 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   7839 			log(LOG_WARNING, "%s: CRC error\n",
   7840 			    device_xname(sc->sc_dev));
   7841 		return true;
   7842 	}
   7843 
   7844 	return false;
   7845 }
   7846 
   7847 static inline bool
   7848 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   7849 {
   7850 	struct wm_softc *sc = rxq->rxq_sc;
   7851 
   7852 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   7853 		NQRXC_STATUS_DD)) {
   7854 		/* We have processed all of the receive descriptors. */
   7855 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   7856 		return false;
   7857 	}
   7858 
   7859 	return true;
   7860 }
   7861 
   7862 static inline bool
   7863 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   7864     struct mbuf *m)
   7865 {
   7866 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   7867 
   7868 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7869 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   7870 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   7871 	}
   7872 
   7873 	return true;
   7874 }
   7875 
   7876 static inline void
   7877 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   7878     uint32_t errors, struct mbuf *m)
   7879 {
   7880 	struct wm_softc *sc = rxq->rxq_sc;
   7881 
   7882 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   7883 		if (wm_rxdesc_is_set_status(sc, status,
   7884 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   7885 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   7886 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7887 			if (wm_rxdesc_is_set_error(sc, errors,
   7888 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   7889 				m->m_pkthdr.csum_flags |=
   7890 					M_CSUM_IPv4_BAD;
   7891 		}
   7892 		if (wm_rxdesc_is_set_status(sc, status,
   7893 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   7894 			/*
   7895 			 * Note: we don't know if this was TCP or UDP,
   7896 			 * so we just set both bits, and expect the
   7897 			 * upper layers to deal.
   7898 			 */
   7899 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   7900 			m->m_pkthdr.csum_flags |=
   7901 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7902 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7903 			if (wm_rxdesc_is_set_error(sc, errors,
   7904 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   7905 				m->m_pkthdr.csum_flags |=
   7906 					M_CSUM_TCP_UDP_BAD;
   7907 		}
   7908 	}
   7909 }
   7910 
   7911 /*
   7912  * wm_rxeof:
   7913  *
   7914  *	Helper; handle receive interrupts.
   7915  */
   7916 static void
   7917 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   7918 {
   7919 	struct wm_softc *sc = rxq->rxq_sc;
   7920 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7921 	struct wm_rxsoft *rxs;
   7922 	struct mbuf *m;
   7923 	int i, len;
   7924 	int count = 0;
   7925 	uint32_t status, errors;
   7926 	uint16_t vlantag;
   7927 
   7928 	KASSERT(mutex_owned(rxq->rxq_lock));
   7929 
   7930 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7931 		if (limit-- == 0) {
   7932 			rxq->rxq_ptr = i;
   7933 			break;
   7934 		}
   7935 
   7936 		rxs = &rxq->rxq_soft[i];
   7937 
   7938 		DPRINTF(WM_DEBUG_RX,
   7939 		    ("%s: RX: checking descriptor %d\n",
   7940 		    device_xname(sc->sc_dev), i));
   7941 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7942 
   7943 		status = wm_rxdesc_get_status(rxq, i);
   7944 		errors = wm_rxdesc_get_errors(rxq, i);
   7945 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   7946 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   7947 #ifdef WM_DEBUG
   7948 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   7949 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   7950 #endif
   7951 
   7952 		if (!wm_rxdesc_dd(rxq, i, status)) {
   7953 			/*
   7954 			 * Update the receive pointer holding rxq_lock
   7955 			 * consistent with increment counter.
   7956 			 */
   7957 			rxq->rxq_ptr = i;
   7958 			break;
   7959 		}
   7960 
   7961 		count++;
   7962 		if (__predict_false(rxq->rxq_discard)) {
   7963 			DPRINTF(WM_DEBUG_RX,
   7964 			    ("%s: RX: discarding contents of descriptor %d\n",
   7965 			    device_xname(sc->sc_dev), i));
   7966 			wm_init_rxdesc(rxq, i);
   7967 			if (wm_rxdesc_is_eop(rxq, status)) {
   7968 				/* Reset our state. */
   7969 				DPRINTF(WM_DEBUG_RX,
   7970 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7971 				    device_xname(sc->sc_dev)));
   7972 				rxq->rxq_discard = 0;
   7973 			}
   7974 			continue;
   7975 		}
   7976 
   7977 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7978 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7979 
   7980 		m = rxs->rxs_mbuf;
   7981 
   7982 		/*
   7983 		 * Add a new receive buffer to the ring, unless of
   7984 		 * course the length is zero. Treat the latter as a
   7985 		 * failed mapping.
   7986 		 */
   7987 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7988 			/*
   7989 			 * Failed, throw away what we've done so
   7990 			 * far, and discard the rest of the packet.
   7991 			 */
   7992 			ifp->if_ierrors++;
   7993 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7994 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7995 			wm_init_rxdesc(rxq, i);
   7996 			if (!wm_rxdesc_is_eop(rxq, status))
   7997 				rxq->rxq_discard = 1;
   7998 			if (rxq->rxq_head != NULL)
   7999 				m_freem(rxq->rxq_head);
   8000 			WM_RXCHAIN_RESET(rxq);
   8001 			DPRINTF(WM_DEBUG_RX,
   8002 			    ("%s: RX: Rx buffer allocation failed, "
   8003 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8004 			    rxq->rxq_discard ? " (discard)" : ""));
   8005 			continue;
   8006 		}
   8007 
   8008 		m->m_len = len;
   8009 		rxq->rxq_len += len;
   8010 		DPRINTF(WM_DEBUG_RX,
   8011 		    ("%s: RX: buffer at %p len %d\n",
   8012 		    device_xname(sc->sc_dev), m->m_data, len));
   8013 
   8014 		/* If this is not the end of the packet, keep looking. */
   8015 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8016 			WM_RXCHAIN_LINK(rxq, m);
   8017 			DPRINTF(WM_DEBUG_RX,
   8018 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8019 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8020 			continue;
   8021 		}
   8022 
   8023 		/*
   8024 		 * Okay, we have the entire packet now.  The chip is
   8025 		 * configured to include the FCS except I350 and I21[01]
   8026 		 * (not all chips can be configured to strip it),
   8027 		 * so we need to trim it.
   8028 		 * May need to adjust length of previous mbuf in the
   8029 		 * chain if the current mbuf is too short.
   8030 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8031 		 * is always set in I350, so we don't trim it.
   8032 		 */
   8033 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8034 		    && (sc->sc_type != WM_T_I210)
   8035 		    && (sc->sc_type != WM_T_I211)) {
   8036 			if (m->m_len < ETHER_CRC_LEN) {
   8037 				rxq->rxq_tail->m_len
   8038 				    -= (ETHER_CRC_LEN - m->m_len);
   8039 				m->m_len = 0;
   8040 			} else
   8041 				m->m_len -= ETHER_CRC_LEN;
   8042 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8043 		} else
   8044 			len = rxq->rxq_len;
   8045 
   8046 		WM_RXCHAIN_LINK(rxq, m);
   8047 
   8048 		*rxq->rxq_tailp = NULL;
   8049 		m = rxq->rxq_head;
   8050 
   8051 		WM_RXCHAIN_RESET(rxq);
   8052 
   8053 		DPRINTF(WM_DEBUG_RX,
   8054 		    ("%s: RX: have entire packet, len -> %d\n",
   8055 		    device_xname(sc->sc_dev), len));
   8056 
   8057 		/* If an error occurred, update stats and drop the packet. */
   8058 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8059 			m_freem(m);
   8060 			continue;
   8061 		}
   8062 
   8063 		/* No errors.  Receive the packet. */
   8064 		m_set_rcvif(m, ifp);
   8065 		m->m_pkthdr.len = len;
   8066 		/*
   8067 		 * TODO
   8068 		 * should be save rsshash and rsstype to this mbuf.
   8069 		 */
   8070 		DPRINTF(WM_DEBUG_RX,
   8071 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8072 			device_xname(sc->sc_dev), rsstype, rsshash));
   8073 
   8074 		/*
   8075 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8076 		 * for us.  Associate the tag with the packet.
   8077 		 */
   8078 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8079 			continue;
   8080 
   8081 		/* Set up checksum info for this packet. */
   8082 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8083 		/*
   8084 		 * Update the receive pointer holding rxq_lock consistent with
   8085 		 * increment counter.
   8086 		 */
   8087 		rxq->rxq_ptr = i;
   8088 		rxq->rxq_packets++;
   8089 		rxq->rxq_bytes += len;
   8090 		mutex_exit(rxq->rxq_lock);
   8091 
   8092 		/* Pass it on. */
   8093 		if_percpuq_enqueue(sc->sc_ipq, m);
   8094 
   8095 		mutex_enter(rxq->rxq_lock);
   8096 
   8097 		if (rxq->rxq_stopping)
   8098 			break;
   8099 	}
   8100 
   8101 	if (count != 0)
   8102 		rnd_add_uint32(&sc->rnd_source, count);
   8103 
   8104 	DPRINTF(WM_DEBUG_RX,
   8105 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8106 }
   8107 
   8108 /*
   8109  * wm_linkintr_gmii:
   8110  *
   8111  *	Helper; handle link interrupts for GMII.
   8112  */
   8113 static void
   8114 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8115 {
   8116 
   8117 	KASSERT(WM_CORE_LOCKED(sc));
   8118 
   8119 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8120 		__func__));
   8121 
   8122 	if (icr & ICR_LSC) {
   8123 		uint32_t reg;
   8124 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8125 
   8126 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8127 			wm_gig_downshift_workaround_ich8lan(sc);
   8128 
   8129 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8130 			device_xname(sc->sc_dev)));
   8131 		mii_pollstat(&sc->sc_mii);
   8132 		if (sc->sc_type == WM_T_82543) {
   8133 			int miistatus, active;
   8134 
   8135 			/*
   8136 			 * With 82543, we need to force speed and
   8137 			 * duplex on the MAC equal to what the PHY
   8138 			 * speed and duplex configuration is.
   8139 			 */
   8140 			miistatus = sc->sc_mii.mii_media_status;
   8141 
   8142 			if (miistatus & IFM_ACTIVE) {
   8143 				active = sc->sc_mii.mii_media_active;
   8144 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8145 				switch (IFM_SUBTYPE(active)) {
   8146 				case IFM_10_T:
   8147 					sc->sc_ctrl |= CTRL_SPEED_10;
   8148 					break;
   8149 				case IFM_100_TX:
   8150 					sc->sc_ctrl |= CTRL_SPEED_100;
   8151 					break;
   8152 				case IFM_1000_T:
   8153 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8154 					break;
   8155 				default:
   8156 					/*
   8157 					 * fiber?
   8158 					 * Shoud not enter here.
   8159 					 */
   8160 					printf("unknown media (%x)\n", active);
   8161 					break;
   8162 				}
   8163 				if (active & IFM_FDX)
   8164 					sc->sc_ctrl |= CTRL_FD;
   8165 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8166 			}
   8167 		} else if ((sc->sc_type == WM_T_ICH8)
   8168 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8169 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8170 		} else if (sc->sc_type == WM_T_PCH) {
   8171 			wm_k1_gig_workaround_hv(sc,
   8172 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8173 		}
   8174 
   8175 		if ((sc->sc_phytype == WMPHY_82578)
   8176 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8177 			== IFM_1000_T)) {
   8178 
   8179 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8180 				delay(200*1000); /* XXX too big */
   8181 
   8182 				/* Link stall fix for link up */
   8183 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8184 				    HV_MUX_DATA_CTRL,
   8185 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8186 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8187 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8188 				    HV_MUX_DATA_CTRL,
   8189 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8190 			}
   8191 		}
   8192 		/*
   8193 		 * I217 Packet Loss issue:
   8194 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8195 		 * on power up.
   8196 		 * Set the Beacon Duration for I217 to 8 usec
   8197 		 */
   8198 		if ((sc->sc_type == WM_T_PCH_LPT)
   8199 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8200 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8201 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8202 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8203 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8204 		}
   8205 
   8206 		/* XXX Work-around I218 hang issue */
   8207 		/* e1000_k1_workaround_lpt_lp() */
   8208 
   8209 		if ((sc->sc_type == WM_T_PCH_LPT)
   8210 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8211 			/*
   8212 			 * Set platform power management values for Latency
   8213 			 * Tolerance Reporting (LTR)
   8214 			 */
   8215 			wm_platform_pm_pch_lpt(sc,
   8216 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8217 				    != 0));
   8218 		}
   8219 
   8220 		/* FEXTNVM6 K1-off workaround */
   8221 		if (sc->sc_type == WM_T_PCH_SPT) {
   8222 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8223 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8224 			    & FEXTNVM6_K1_OFF_ENABLE)
   8225 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8226 			else
   8227 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8228 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8229 		}
   8230 	} else if (icr & ICR_RXSEQ) {
   8231 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8232 			device_xname(sc->sc_dev)));
   8233 	}
   8234 }
   8235 
   8236 /*
   8237  * wm_linkintr_tbi:
   8238  *
   8239  *	Helper; handle link interrupts for TBI mode.
   8240  */
   8241 static void
   8242 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8243 {
   8244 	uint32_t status;
   8245 
   8246 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8247 		__func__));
   8248 
   8249 	status = CSR_READ(sc, WMREG_STATUS);
   8250 	if (icr & ICR_LSC) {
   8251 		if (status & STATUS_LU) {
   8252 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8253 			    device_xname(sc->sc_dev),
   8254 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8255 			/*
   8256 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8257 			 * so we should update sc->sc_ctrl
   8258 			 */
   8259 
   8260 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8261 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8262 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8263 			if (status & STATUS_FD)
   8264 				sc->sc_tctl |=
   8265 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8266 			else
   8267 				sc->sc_tctl |=
   8268 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8269 			if (sc->sc_ctrl & CTRL_TFCE)
   8270 				sc->sc_fcrtl |= FCRTL_XONE;
   8271 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8272 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8273 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8274 				      sc->sc_fcrtl);
   8275 			sc->sc_tbi_linkup = 1;
   8276 		} else {
   8277 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8278 			    device_xname(sc->sc_dev)));
   8279 			sc->sc_tbi_linkup = 0;
   8280 		}
   8281 		/* Update LED */
   8282 		wm_tbi_serdes_set_linkled(sc);
   8283 	} else if (icr & ICR_RXSEQ) {
   8284 		DPRINTF(WM_DEBUG_LINK,
   8285 		    ("%s: LINK: Receive sequence error\n",
   8286 		    device_xname(sc->sc_dev)));
   8287 	}
   8288 }
   8289 
   8290 /*
   8291  * wm_linkintr_serdes:
   8292  *
   8293  *	Helper; handle link interrupts for TBI mode.
   8294  */
   8295 static void
   8296 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8297 {
   8298 	struct mii_data *mii = &sc->sc_mii;
   8299 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8300 	uint32_t pcs_adv, pcs_lpab, reg;
   8301 
   8302 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8303 		__func__));
   8304 
   8305 	if (icr & ICR_LSC) {
   8306 		/* Check PCS */
   8307 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8308 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8309 			mii->mii_media_status |= IFM_ACTIVE;
   8310 			sc->sc_tbi_linkup = 1;
   8311 		} else {
   8312 			mii->mii_media_status |= IFM_NONE;
   8313 			sc->sc_tbi_linkup = 0;
   8314 			wm_tbi_serdes_set_linkled(sc);
   8315 			return;
   8316 		}
   8317 		mii->mii_media_active |= IFM_1000_SX;
   8318 		if ((reg & PCS_LSTS_FDX) != 0)
   8319 			mii->mii_media_active |= IFM_FDX;
   8320 		else
   8321 			mii->mii_media_active |= IFM_HDX;
   8322 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8323 			/* Check flow */
   8324 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8325 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8326 				DPRINTF(WM_DEBUG_LINK,
   8327 				    ("XXX LINKOK but not ACOMP\n"));
   8328 				return;
   8329 			}
   8330 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8331 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8332 			DPRINTF(WM_DEBUG_LINK,
   8333 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8334 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8335 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8336 				mii->mii_media_active |= IFM_FLOW
   8337 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8338 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8339 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8340 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8341 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8342 				mii->mii_media_active |= IFM_FLOW
   8343 				    | IFM_ETH_TXPAUSE;
   8344 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8345 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8346 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8347 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8348 				mii->mii_media_active |= IFM_FLOW
   8349 				    | IFM_ETH_RXPAUSE;
   8350 		}
   8351 		/* Update LED */
   8352 		wm_tbi_serdes_set_linkled(sc);
   8353 	} else {
   8354 		DPRINTF(WM_DEBUG_LINK,
   8355 		    ("%s: LINK: Receive sequence error\n",
   8356 		    device_xname(sc->sc_dev)));
   8357 	}
   8358 }
   8359 
   8360 /*
   8361  * wm_linkintr:
   8362  *
   8363  *	Helper; handle link interrupts.
   8364  */
   8365 static void
   8366 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8367 {
   8368 
   8369 	KASSERT(WM_CORE_LOCKED(sc));
   8370 
   8371 	if (sc->sc_flags & WM_F_HAS_MII)
   8372 		wm_linkintr_gmii(sc, icr);
   8373 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8374 	    && (sc->sc_type >= WM_T_82575))
   8375 		wm_linkintr_serdes(sc, icr);
   8376 	else
   8377 		wm_linkintr_tbi(sc, icr);
   8378 }
   8379 
   8380 /*
   8381  * wm_intr_legacy:
   8382  *
   8383  *	Interrupt service routine for INTx and MSI.
   8384  */
   8385 static int
   8386 wm_intr_legacy(void *arg)
   8387 {
   8388 	struct wm_softc *sc = arg;
   8389 	struct wm_queue *wmq = &sc->sc_queue[0];
   8390 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8391 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8392 	uint32_t icr, rndval = 0;
   8393 	int handled = 0;
   8394 
   8395 	DPRINTF(WM_DEBUG_TX,
   8396 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   8397 	while (1 /* CONSTCOND */) {
   8398 		icr = CSR_READ(sc, WMREG_ICR);
   8399 		if ((icr & sc->sc_icr) == 0)
   8400 			break;
   8401 		if (rndval == 0)
   8402 			rndval = icr;
   8403 
   8404 		mutex_enter(rxq->rxq_lock);
   8405 
   8406 		if (rxq->rxq_stopping) {
   8407 			mutex_exit(rxq->rxq_lock);
   8408 			break;
   8409 		}
   8410 
   8411 		handled = 1;
   8412 
   8413 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8414 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8415 			DPRINTF(WM_DEBUG_RX,
   8416 			    ("%s: RX: got Rx intr 0x%08x\n",
   8417 			    device_xname(sc->sc_dev),
   8418 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8419 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8420 		}
   8421 #endif
   8422 		wm_rxeof(rxq, UINT_MAX);
   8423 
   8424 		mutex_exit(rxq->rxq_lock);
   8425 		mutex_enter(txq->txq_lock);
   8426 
   8427 		if (txq->txq_stopping) {
   8428 			mutex_exit(txq->txq_lock);
   8429 			break;
   8430 		}
   8431 
   8432 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8433 		if (icr & ICR_TXDW) {
   8434 			DPRINTF(WM_DEBUG_TX,
   8435 			    ("%s: TX: got TXDW interrupt\n",
   8436 			    device_xname(sc->sc_dev)));
   8437 			WM_Q_EVCNT_INCR(txq, txdw);
   8438 		}
   8439 #endif
   8440 		wm_txeof(sc, txq);
   8441 
   8442 		mutex_exit(txq->txq_lock);
   8443 		WM_CORE_LOCK(sc);
   8444 
   8445 		if (sc->sc_core_stopping) {
   8446 			WM_CORE_UNLOCK(sc);
   8447 			break;
   8448 		}
   8449 
   8450 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8451 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8452 			wm_linkintr(sc, icr);
   8453 		}
   8454 
   8455 		WM_CORE_UNLOCK(sc);
   8456 
   8457 		if (icr & ICR_RXO) {
   8458 #if defined(WM_DEBUG)
   8459 			log(LOG_WARNING, "%s: Receive overrun\n",
   8460 			    device_xname(sc->sc_dev));
   8461 #endif /* defined(WM_DEBUG) */
   8462 		}
   8463 	}
   8464 
   8465 	rnd_add_uint32(&sc->rnd_source, rndval);
   8466 
   8467 	if (handled) {
   8468 		/* Try to get more packets going. */
   8469 		softint_schedule(wmq->wmq_si);
   8470 	}
   8471 
   8472 	return handled;
   8473 }
   8474 
   8475 static inline void
   8476 wm_txrxintr_disable(struct wm_queue *wmq)
   8477 {
   8478 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8479 
   8480 	if (sc->sc_type == WM_T_82574)
   8481 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8482 	else if (sc->sc_type == WM_T_82575)
   8483 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8484 	else
   8485 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8486 }
   8487 
   8488 static inline void
   8489 wm_txrxintr_enable(struct wm_queue *wmq)
   8490 {
   8491 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8492 
   8493 	wm_itrs_calculate(sc, wmq);
   8494 
   8495 	if (sc->sc_type == WM_T_82574)
   8496 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8497 	else if (sc->sc_type == WM_T_82575)
   8498 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8499 	else
   8500 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8501 }
   8502 
   8503 static int
   8504 wm_txrxintr_msix(void *arg)
   8505 {
   8506 	struct wm_queue *wmq = arg;
   8507 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8508 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8509 	struct wm_softc *sc = txq->txq_sc;
   8510 	u_int limit = sc->sc_rx_intr_process_limit;
   8511 
   8512 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8513 
   8514 	DPRINTF(WM_DEBUG_TX,
   8515 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8516 
   8517 	wm_txrxintr_disable(wmq);
   8518 
   8519 	mutex_enter(txq->txq_lock);
   8520 
   8521 	if (txq->txq_stopping) {
   8522 		mutex_exit(txq->txq_lock);
   8523 		return 0;
   8524 	}
   8525 
   8526 	WM_Q_EVCNT_INCR(txq, txdw);
   8527 	wm_txeof(sc, txq);
   8528 	/* wm_deferred start() is done in wm_handle_queue(). */
   8529 	mutex_exit(txq->txq_lock);
   8530 
   8531 	DPRINTF(WM_DEBUG_RX,
   8532 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8533 	mutex_enter(rxq->rxq_lock);
   8534 
   8535 	if (rxq->rxq_stopping) {
   8536 		mutex_exit(rxq->rxq_lock);
   8537 		return 0;
   8538 	}
   8539 
   8540 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8541 	wm_rxeof(rxq, limit);
   8542 	mutex_exit(rxq->rxq_lock);
   8543 
   8544 	wm_itrs_writereg(sc, wmq);
   8545 
   8546 	softint_schedule(wmq->wmq_si);
   8547 
   8548 	return 1;
   8549 }
   8550 
   8551 static void
   8552 wm_handle_queue(void *arg)
   8553 {
   8554 	struct wm_queue *wmq = arg;
   8555 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8556 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8557 	struct wm_softc *sc = txq->txq_sc;
   8558 	u_int limit = sc->sc_rx_process_limit;
   8559 
   8560 	mutex_enter(txq->txq_lock);
   8561 	if (txq->txq_stopping) {
   8562 		mutex_exit(txq->txq_lock);
   8563 		return;
   8564 	}
   8565 	wm_txeof(sc, txq);
   8566 	wm_deferred_start_locked(txq);
   8567 	mutex_exit(txq->txq_lock);
   8568 
   8569 	mutex_enter(rxq->rxq_lock);
   8570 	if (rxq->rxq_stopping) {
   8571 		mutex_exit(rxq->rxq_lock);
   8572 		return;
   8573 	}
   8574 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8575 	wm_rxeof(rxq, limit);
   8576 	mutex_exit(rxq->rxq_lock);
   8577 
   8578 	wm_txrxintr_enable(wmq);
   8579 }
   8580 
   8581 /*
   8582  * wm_linkintr_msix:
   8583  *
   8584  *	Interrupt service routine for link status change for MSI-X.
   8585  */
   8586 static int
   8587 wm_linkintr_msix(void *arg)
   8588 {
   8589 	struct wm_softc *sc = arg;
   8590 	uint32_t reg;
   8591 
   8592 	DPRINTF(WM_DEBUG_LINK,
   8593 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8594 
   8595 	reg = CSR_READ(sc, WMREG_ICR);
   8596 	WM_CORE_LOCK(sc);
   8597 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8598 		goto out;
   8599 
   8600 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8601 	wm_linkintr(sc, ICR_LSC);
   8602 
   8603 out:
   8604 	WM_CORE_UNLOCK(sc);
   8605 
   8606 	if (sc->sc_type == WM_T_82574)
   8607 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8608 	else if (sc->sc_type == WM_T_82575)
   8609 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8610 	else
   8611 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8612 
   8613 	return 1;
   8614 }
   8615 
   8616 /*
   8617  * Media related.
   8618  * GMII, SGMII, TBI (and SERDES)
   8619  */
   8620 
   8621 /* Common */
   8622 
   8623 /*
   8624  * wm_tbi_serdes_set_linkled:
   8625  *
   8626  *	Update the link LED on TBI and SERDES devices.
   8627  */
   8628 static void
   8629 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8630 {
   8631 
   8632 	if (sc->sc_tbi_linkup)
   8633 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8634 	else
   8635 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8636 
   8637 	/* 82540 or newer devices are active low */
   8638 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8639 
   8640 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8641 }
   8642 
   8643 /* GMII related */
   8644 
   8645 /*
   8646  * wm_gmii_reset:
   8647  *
   8648  *	Reset the PHY.
   8649  */
   8650 static void
   8651 wm_gmii_reset(struct wm_softc *sc)
   8652 {
   8653 	uint32_t reg;
   8654 	int rv;
   8655 
   8656 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8657 		device_xname(sc->sc_dev), __func__));
   8658 
   8659 	rv = sc->phy.acquire(sc);
   8660 	if (rv != 0) {
   8661 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8662 		    __func__);
   8663 		return;
   8664 	}
   8665 
   8666 	switch (sc->sc_type) {
   8667 	case WM_T_82542_2_0:
   8668 	case WM_T_82542_2_1:
   8669 		/* null */
   8670 		break;
   8671 	case WM_T_82543:
   8672 		/*
   8673 		 * With 82543, we need to force speed and duplex on the MAC
   8674 		 * equal to what the PHY speed and duplex configuration is.
   8675 		 * In addition, we need to perform a hardware reset on the PHY
   8676 		 * to take it out of reset.
   8677 		 */
   8678 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8679 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8680 
   8681 		/* The PHY reset pin is active-low. */
   8682 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8683 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8684 		    CTRL_EXT_SWDPIN(4));
   8685 		reg |= CTRL_EXT_SWDPIO(4);
   8686 
   8687 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8688 		CSR_WRITE_FLUSH(sc);
   8689 		delay(10*1000);
   8690 
   8691 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8692 		CSR_WRITE_FLUSH(sc);
   8693 		delay(150);
   8694 #if 0
   8695 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8696 #endif
   8697 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8698 		break;
   8699 	case WM_T_82544:	/* reset 10000us */
   8700 	case WM_T_82540:
   8701 	case WM_T_82545:
   8702 	case WM_T_82545_3:
   8703 	case WM_T_82546:
   8704 	case WM_T_82546_3:
   8705 	case WM_T_82541:
   8706 	case WM_T_82541_2:
   8707 	case WM_T_82547:
   8708 	case WM_T_82547_2:
   8709 	case WM_T_82571:	/* reset 100us */
   8710 	case WM_T_82572:
   8711 	case WM_T_82573:
   8712 	case WM_T_82574:
   8713 	case WM_T_82575:
   8714 	case WM_T_82576:
   8715 	case WM_T_82580:
   8716 	case WM_T_I350:
   8717 	case WM_T_I354:
   8718 	case WM_T_I210:
   8719 	case WM_T_I211:
   8720 	case WM_T_82583:
   8721 	case WM_T_80003:
   8722 		/* generic reset */
   8723 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8724 		CSR_WRITE_FLUSH(sc);
   8725 		delay(20000);
   8726 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8727 		CSR_WRITE_FLUSH(sc);
   8728 		delay(20000);
   8729 
   8730 		if ((sc->sc_type == WM_T_82541)
   8731 		    || (sc->sc_type == WM_T_82541_2)
   8732 		    || (sc->sc_type == WM_T_82547)
   8733 		    || (sc->sc_type == WM_T_82547_2)) {
   8734 			/* workaround for igp are done in igp_reset() */
   8735 			/* XXX add code to set LED after phy reset */
   8736 		}
   8737 		break;
   8738 	case WM_T_ICH8:
   8739 	case WM_T_ICH9:
   8740 	case WM_T_ICH10:
   8741 	case WM_T_PCH:
   8742 	case WM_T_PCH2:
   8743 	case WM_T_PCH_LPT:
   8744 	case WM_T_PCH_SPT:
   8745 		/* generic reset */
   8746 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8747 		CSR_WRITE_FLUSH(sc);
   8748 		delay(100);
   8749 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8750 		CSR_WRITE_FLUSH(sc);
   8751 		delay(150);
   8752 		break;
   8753 	default:
   8754 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8755 		    __func__);
   8756 		break;
   8757 	}
   8758 
   8759 	sc->phy.release(sc);
   8760 
   8761 	/* get_cfg_done */
   8762 	wm_get_cfg_done(sc);
   8763 
   8764 	/* extra setup */
   8765 	switch (sc->sc_type) {
   8766 	case WM_T_82542_2_0:
   8767 	case WM_T_82542_2_1:
   8768 	case WM_T_82543:
   8769 	case WM_T_82544:
   8770 	case WM_T_82540:
   8771 	case WM_T_82545:
   8772 	case WM_T_82545_3:
   8773 	case WM_T_82546:
   8774 	case WM_T_82546_3:
   8775 	case WM_T_82541_2:
   8776 	case WM_T_82547_2:
   8777 	case WM_T_82571:
   8778 	case WM_T_82572:
   8779 	case WM_T_82573:
   8780 	case WM_T_82575:
   8781 	case WM_T_82576:
   8782 	case WM_T_82580:
   8783 	case WM_T_I350:
   8784 	case WM_T_I354:
   8785 	case WM_T_I210:
   8786 	case WM_T_I211:
   8787 	case WM_T_80003:
   8788 		/* null */
   8789 		break;
   8790 	case WM_T_82574:
   8791 	case WM_T_82583:
   8792 		wm_lplu_d0_disable(sc);
   8793 		break;
   8794 	case WM_T_82541:
   8795 	case WM_T_82547:
   8796 		/* XXX Configure actively LED after PHY reset */
   8797 		break;
   8798 	case WM_T_ICH8:
   8799 	case WM_T_ICH9:
   8800 	case WM_T_ICH10:
   8801 	case WM_T_PCH:
   8802 	case WM_T_PCH2:
   8803 	case WM_T_PCH_LPT:
   8804 	case WM_T_PCH_SPT:
   8805 		/* Allow time for h/w to get to a quiescent state afer reset */
   8806 		delay(10*1000);
   8807 
   8808 		if (sc->sc_type == WM_T_PCH)
   8809 			wm_hv_phy_workaround_ich8lan(sc);
   8810 
   8811 		if (sc->sc_type == WM_T_PCH2)
   8812 			wm_lv_phy_workaround_ich8lan(sc);
   8813 
   8814 		/* Clear the host wakeup bit after lcd reset */
   8815 		if (sc->sc_type >= WM_T_PCH) {
   8816 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8817 			    BM_PORT_GEN_CFG);
   8818 			reg &= ~BM_WUC_HOST_WU_BIT;
   8819 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8820 			    BM_PORT_GEN_CFG, reg);
   8821 		}
   8822 
   8823 		/*
   8824 		 * XXX Configure the LCD with th extended configuration region
   8825 		 * in NVM
   8826 		 */
   8827 
   8828 		/* Disable D0 LPLU. */
   8829 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8830 			wm_lplu_d0_disable_pch(sc);
   8831 		else
   8832 			wm_lplu_d0_disable(sc);	/* ICH* */
   8833 		break;
   8834 	default:
   8835 		panic("%s: unknown type\n", __func__);
   8836 		break;
   8837 	}
   8838 }
   8839 
   8840 /*
   8841  * Setup sc_phytype and mii_{read|write}reg.
   8842  *
   8843  *  To identify PHY type, correct read/write function should be selected.
   8844  * To select correct read/write function, PCI ID or MAC type are required
   8845  * without accessing PHY registers.
   8846  *
   8847  *  On the first call of this function, PHY ID is not known yet. Check
   8848  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   8849  * result might be incorrect.
   8850  *
   8851  *  In the second call, PHY OUI and model is used to identify PHY type.
   8852  * It might not be perfpect because of the lack of compared entry, but it
   8853  * would be better than the first call.
   8854  *
   8855  *  If the detected new result and previous assumption is different,
   8856  * diagnous message will be printed.
   8857  */
   8858 static void
   8859 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   8860     uint16_t phy_model)
   8861 {
   8862 	device_t dev = sc->sc_dev;
   8863 	struct mii_data *mii = &sc->sc_mii;
   8864 	uint16_t new_phytype = WMPHY_UNKNOWN;
   8865 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   8866 	mii_readreg_t new_readreg;
   8867 	mii_writereg_t new_writereg;
   8868 
   8869 	if (mii->mii_readreg == NULL) {
   8870 		/*
   8871 		 *  This is the first call of this function. For ICH and PCH
   8872 		 * variants, it's difficult to determine the PHY access method
   8873 		 * by sc_type, so use the PCI product ID for some devices.
   8874 		 */
   8875 
   8876 		switch (sc->sc_pcidevid) {
   8877 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   8878 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   8879 			/* 82577 */
   8880 			new_phytype = WMPHY_82577;
   8881 			break;
   8882 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   8883 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   8884 			/* 82578 */
   8885 			new_phytype = WMPHY_82578;
   8886 			break;
   8887 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8888 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8889 			/* 82579 */
   8890 			new_phytype = WMPHY_82579;
   8891 			break;
   8892 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8893 		case PCI_PRODUCT_INTEL_82801I_BM:
   8894 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   8895 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8896 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8897 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8898 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8899 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8900 			/* ICH8, 9, 10 with 82567 */
   8901 			new_phytype = WMPHY_BM;
   8902 			break;
   8903 		default:
   8904 			break;
   8905 		}
   8906 	} else {
   8907 		/* It's not the first call. Use PHY OUI and model */
   8908 		switch (phy_oui) {
   8909 		case MII_OUI_ATHEROS: /* XXX ??? */
   8910 			switch (phy_model) {
   8911 			case 0x0004: /* XXX */
   8912 				new_phytype = WMPHY_82578;
   8913 				break;
   8914 			default:
   8915 				break;
   8916 			}
   8917 			break;
   8918 		case MII_OUI_xxMARVELL:
   8919 			switch (phy_model) {
   8920 			case MII_MODEL_xxMARVELL_I210:
   8921 				new_phytype = WMPHY_I210;
   8922 				break;
   8923 			case MII_MODEL_xxMARVELL_E1011:
   8924 			case MII_MODEL_xxMARVELL_E1000_3:
   8925 			case MII_MODEL_xxMARVELL_E1000_5:
   8926 			case MII_MODEL_xxMARVELL_E1112:
   8927 				new_phytype = WMPHY_M88;
   8928 				break;
   8929 			case MII_MODEL_xxMARVELL_E1149:
   8930 				new_phytype = WMPHY_BM;
   8931 				break;
   8932 			case MII_MODEL_xxMARVELL_E1111:
   8933 			case MII_MODEL_xxMARVELL_I347:
   8934 			case MII_MODEL_xxMARVELL_E1512:
   8935 			case MII_MODEL_xxMARVELL_E1340M:
   8936 			case MII_MODEL_xxMARVELL_E1543:
   8937 				new_phytype = WMPHY_M88;
   8938 				break;
   8939 			case MII_MODEL_xxMARVELL_I82563:
   8940 				new_phytype = WMPHY_GG82563;
   8941 				break;
   8942 			default:
   8943 				break;
   8944 			}
   8945 			break;
   8946 		case MII_OUI_INTEL:
   8947 			switch (phy_model) {
   8948 			case MII_MODEL_INTEL_I82577:
   8949 				new_phytype = WMPHY_82577;
   8950 				break;
   8951 			case MII_MODEL_INTEL_I82579:
   8952 				new_phytype = WMPHY_82579;
   8953 				break;
   8954 			case MII_MODEL_INTEL_I217:
   8955 				new_phytype = WMPHY_I217;
   8956 				break;
   8957 			case MII_MODEL_INTEL_I82580:
   8958 			case MII_MODEL_INTEL_I350:
   8959 				new_phytype = WMPHY_82580;
   8960 				break;
   8961 			default:
   8962 				break;
   8963 			}
   8964 			break;
   8965 		case MII_OUI_yyINTEL:
   8966 			switch (phy_model) {
   8967 			case MII_MODEL_yyINTEL_I82562G:
   8968 			case MII_MODEL_yyINTEL_I82562EM:
   8969 			case MII_MODEL_yyINTEL_I82562ET:
   8970 				new_phytype = WMPHY_IFE;
   8971 				break;
   8972 			case MII_MODEL_yyINTEL_IGP01E1000:
   8973 				new_phytype = WMPHY_IGP;
   8974 				break;
   8975 			case MII_MODEL_yyINTEL_I82566:
   8976 				new_phytype = WMPHY_IGP_3;
   8977 				break;
   8978 			default:
   8979 				break;
   8980 			}
   8981 			break;
   8982 		default:
   8983 			break;
   8984 		}
   8985 		if (new_phytype == WMPHY_UNKNOWN)
   8986 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   8987 			    __func__);
   8988 
   8989 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8990 		    && (sc->sc_phytype != new_phytype )) {
   8991 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   8992 			    "was incorrect. PHY type from PHY ID = %u\n",
   8993 			    sc->sc_phytype, new_phytype);
   8994 		}
   8995 	}
   8996 
   8997 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   8998 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   8999 		/* SGMII */
   9000 		new_readreg = wm_sgmii_readreg;
   9001 		new_writereg = wm_sgmii_writereg;
   9002 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9003 		/* BM2 (phyaddr == 1) */
   9004 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9005 		    && (new_phytype != WMPHY_BM)
   9006 		    && (new_phytype != WMPHY_UNKNOWN))
   9007 			doubt_phytype = new_phytype;
   9008 		new_phytype = WMPHY_BM;
   9009 		new_readreg = wm_gmii_bm_readreg;
   9010 		new_writereg = wm_gmii_bm_writereg;
   9011 	} else if (sc->sc_type >= WM_T_PCH) {
   9012 		/* All PCH* use _hv_ */
   9013 		new_readreg = wm_gmii_hv_readreg;
   9014 		new_writereg = wm_gmii_hv_writereg;
   9015 	} else if (sc->sc_type >= WM_T_ICH8) {
   9016 		/* non-82567 ICH8, 9 and 10 */
   9017 		new_readreg = wm_gmii_i82544_readreg;
   9018 		new_writereg = wm_gmii_i82544_writereg;
   9019 	} else if (sc->sc_type >= WM_T_80003) {
   9020 		/* 80003 */
   9021 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9022 		    && (new_phytype != WMPHY_GG82563)
   9023 		    && (new_phytype != WMPHY_UNKNOWN))
   9024 			doubt_phytype = new_phytype;
   9025 		new_phytype = WMPHY_GG82563;
   9026 		new_readreg = wm_gmii_i80003_readreg;
   9027 		new_writereg = wm_gmii_i80003_writereg;
   9028 	} else if (sc->sc_type >= WM_T_I210) {
   9029 		/* I210 and I211 */
   9030 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9031 		    && (new_phytype != WMPHY_I210)
   9032 		    && (new_phytype != WMPHY_UNKNOWN))
   9033 			doubt_phytype = new_phytype;
   9034 		new_phytype = WMPHY_I210;
   9035 		new_readreg = wm_gmii_gs40g_readreg;
   9036 		new_writereg = wm_gmii_gs40g_writereg;
   9037 	} else if (sc->sc_type >= WM_T_82580) {
   9038 		/* 82580, I350 and I354 */
   9039 		new_readreg = wm_gmii_82580_readreg;
   9040 		new_writereg = wm_gmii_82580_writereg;
   9041 	} else if (sc->sc_type >= WM_T_82544) {
   9042 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9043 		new_readreg = wm_gmii_i82544_readreg;
   9044 		new_writereg = wm_gmii_i82544_writereg;
   9045 	} else {
   9046 		new_readreg = wm_gmii_i82543_readreg;
   9047 		new_writereg = wm_gmii_i82543_writereg;
   9048 	}
   9049 
   9050 	if (new_phytype == WMPHY_BM) {
   9051 		/* All BM use _bm_ */
   9052 		new_readreg = wm_gmii_bm_readreg;
   9053 		new_writereg = wm_gmii_bm_writereg;
   9054 	}
   9055 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9056 		/* All PCH* use _hv_ */
   9057 		new_readreg = wm_gmii_hv_readreg;
   9058 		new_writereg = wm_gmii_hv_writereg;
   9059 	}
   9060 
   9061 	/* Diag output */
   9062 	if (doubt_phytype != WMPHY_UNKNOWN)
   9063 		aprint_error_dev(dev, "Assumed new PHY type was "
   9064 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9065 		    new_phytype);
   9066 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9067 	    && (sc->sc_phytype != new_phytype ))
   9068 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9069 		    "was incorrect. New PHY type = %u\n",
   9070 		    sc->sc_phytype, new_phytype);
   9071 
   9072 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9073 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9074 
   9075 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9076 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9077 		    "function was incorrect.\n");
   9078 
   9079 	/* Update now */
   9080 	sc->sc_phytype = new_phytype;
   9081 	mii->mii_readreg = new_readreg;
   9082 	mii->mii_writereg = new_writereg;
   9083 }
   9084 
   9085 /*
   9086  * wm_get_phy_id_82575:
   9087  *
   9088  * Return PHY ID. Return -1 if it failed.
   9089  */
   9090 static int
   9091 wm_get_phy_id_82575(struct wm_softc *sc)
   9092 {
   9093 	uint32_t reg;
   9094 	int phyid = -1;
   9095 
   9096 	/* XXX */
   9097 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9098 		return -1;
   9099 
   9100 	if (wm_sgmii_uses_mdio(sc)) {
   9101 		switch (sc->sc_type) {
   9102 		case WM_T_82575:
   9103 		case WM_T_82576:
   9104 			reg = CSR_READ(sc, WMREG_MDIC);
   9105 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9106 			break;
   9107 		case WM_T_82580:
   9108 		case WM_T_I350:
   9109 		case WM_T_I354:
   9110 		case WM_T_I210:
   9111 		case WM_T_I211:
   9112 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9113 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9114 			break;
   9115 		default:
   9116 			return -1;
   9117 		}
   9118 	}
   9119 
   9120 	return phyid;
   9121 }
   9122 
   9123 
   9124 /*
   9125  * wm_gmii_mediainit:
   9126  *
   9127  *	Initialize media for use on 1000BASE-T devices.
   9128  */
   9129 static void
   9130 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9131 {
   9132 	device_t dev = sc->sc_dev;
   9133 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9134 	struct mii_data *mii = &sc->sc_mii;
   9135 	uint32_t reg;
   9136 
   9137 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9138 		device_xname(sc->sc_dev), __func__));
   9139 
   9140 	/* We have GMII. */
   9141 	sc->sc_flags |= WM_F_HAS_MII;
   9142 
   9143 	if (sc->sc_type == WM_T_80003)
   9144 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9145 	else
   9146 		sc->sc_tipg = TIPG_1000T_DFLT;
   9147 
   9148 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9149 	if ((sc->sc_type == WM_T_82580)
   9150 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9151 	    || (sc->sc_type == WM_T_I211)) {
   9152 		reg = CSR_READ(sc, WMREG_PHPM);
   9153 		reg &= ~PHPM_GO_LINK_D;
   9154 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9155 	}
   9156 
   9157 	/*
   9158 	 * Let the chip set speed/duplex on its own based on
   9159 	 * signals from the PHY.
   9160 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9161 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9162 	 */
   9163 	sc->sc_ctrl |= CTRL_SLU;
   9164 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9165 
   9166 	/* Initialize our media structures and probe the GMII. */
   9167 	mii->mii_ifp = ifp;
   9168 
   9169 	/*
   9170 	 * The first call of wm_mii_setup_phytype. The result might be
   9171 	 * incorrect.
   9172 	 */
   9173 	wm_gmii_setup_phytype(sc, 0, 0);
   9174 
   9175 	mii->mii_statchg = wm_gmii_statchg;
   9176 
   9177 	/* get PHY control from SMBus to PCIe */
   9178 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9179 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9180 		wm_smbustopci(sc);
   9181 
   9182 	wm_gmii_reset(sc);
   9183 
   9184 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9185 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9186 	    wm_gmii_mediastatus);
   9187 
   9188 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9189 	    || (sc->sc_type == WM_T_82580)
   9190 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9191 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9192 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9193 			/* Attach only one port */
   9194 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9195 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9196 		} else {
   9197 			int i, id;
   9198 			uint32_t ctrl_ext;
   9199 
   9200 			id = wm_get_phy_id_82575(sc);
   9201 			if (id != -1) {
   9202 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9203 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9204 			}
   9205 			if ((id == -1)
   9206 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9207 				/* Power on sgmii phy if it is disabled */
   9208 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9209 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9210 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9211 				CSR_WRITE_FLUSH(sc);
   9212 				delay(300*1000); /* XXX too long */
   9213 
   9214 				/* from 1 to 8 */
   9215 				for (i = 1; i < 8; i++)
   9216 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9217 					    0xffffffff, i, MII_OFFSET_ANY,
   9218 					    MIIF_DOPAUSE);
   9219 
   9220 				/* restore previous sfp cage power state */
   9221 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9222 			}
   9223 		}
   9224 	} else {
   9225 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9226 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9227 	}
   9228 
   9229 	/*
   9230 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9231 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9232 	 */
   9233 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9234 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9235 		wm_set_mdio_slow_mode_hv(sc);
   9236 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9237 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9238 	}
   9239 
   9240 	/*
   9241 	 * (For ICH8 variants)
   9242 	 * If PHY detection failed, use BM's r/w function and retry.
   9243 	 */
   9244 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9245 		/* if failed, retry with *_bm_* */
   9246 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9247 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9248 		    sc->sc_phytype);
   9249 		sc->sc_phytype = WMPHY_BM;
   9250 		mii->mii_readreg = wm_gmii_bm_readreg;
   9251 		mii->mii_writereg = wm_gmii_bm_writereg;
   9252 
   9253 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9254 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9255 	}
   9256 
   9257 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9258 		/* Any PHY wasn't find */
   9259 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9260 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9261 		sc->sc_phytype = WMPHY_NONE;
   9262 	} else {
   9263 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9264 
   9265 		/*
   9266 		 * PHY Found! Check PHY type again by the second call of
   9267 		 * wm_mii_setup_phytype.
   9268 		 */
   9269 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9270 		    child->mii_mpd_model);
   9271 
   9272 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9273 	}
   9274 }
   9275 
   9276 /*
   9277  * wm_gmii_mediachange:	[ifmedia interface function]
   9278  *
   9279  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9280  */
   9281 static int
   9282 wm_gmii_mediachange(struct ifnet *ifp)
   9283 {
   9284 	struct wm_softc *sc = ifp->if_softc;
   9285 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9286 	int rc;
   9287 
   9288 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9289 		device_xname(sc->sc_dev), __func__));
   9290 	if ((ifp->if_flags & IFF_UP) == 0)
   9291 		return 0;
   9292 
   9293 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9294 	sc->sc_ctrl |= CTRL_SLU;
   9295 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9296 	    || (sc->sc_type > WM_T_82543)) {
   9297 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9298 	} else {
   9299 		sc->sc_ctrl &= ~CTRL_ASDE;
   9300 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9301 		if (ife->ifm_media & IFM_FDX)
   9302 			sc->sc_ctrl |= CTRL_FD;
   9303 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9304 		case IFM_10_T:
   9305 			sc->sc_ctrl |= CTRL_SPEED_10;
   9306 			break;
   9307 		case IFM_100_TX:
   9308 			sc->sc_ctrl |= CTRL_SPEED_100;
   9309 			break;
   9310 		case IFM_1000_T:
   9311 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9312 			break;
   9313 		default:
   9314 			panic("wm_gmii_mediachange: bad media 0x%x",
   9315 			    ife->ifm_media);
   9316 		}
   9317 	}
   9318 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9319 	if (sc->sc_type <= WM_T_82543)
   9320 		wm_gmii_reset(sc);
   9321 
   9322 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9323 		return 0;
   9324 	return rc;
   9325 }
   9326 
   9327 /*
   9328  * wm_gmii_mediastatus:	[ifmedia interface function]
   9329  *
   9330  *	Get the current interface media status on a 1000BASE-T device.
   9331  */
   9332 static void
   9333 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9334 {
   9335 	struct wm_softc *sc = ifp->if_softc;
   9336 
   9337 	ether_mediastatus(ifp, ifmr);
   9338 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9339 	    | sc->sc_flowflags;
   9340 }
   9341 
   9342 #define	MDI_IO		CTRL_SWDPIN(2)
   9343 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9344 #define	MDI_CLK		CTRL_SWDPIN(3)
   9345 
   9346 static void
   9347 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9348 {
   9349 	uint32_t i, v;
   9350 
   9351 	v = CSR_READ(sc, WMREG_CTRL);
   9352 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9353 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9354 
   9355 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9356 		if (data & i)
   9357 			v |= MDI_IO;
   9358 		else
   9359 			v &= ~MDI_IO;
   9360 		CSR_WRITE(sc, WMREG_CTRL, v);
   9361 		CSR_WRITE_FLUSH(sc);
   9362 		delay(10);
   9363 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9364 		CSR_WRITE_FLUSH(sc);
   9365 		delay(10);
   9366 		CSR_WRITE(sc, WMREG_CTRL, v);
   9367 		CSR_WRITE_FLUSH(sc);
   9368 		delay(10);
   9369 	}
   9370 }
   9371 
   9372 static uint32_t
   9373 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9374 {
   9375 	uint32_t v, i, data = 0;
   9376 
   9377 	v = CSR_READ(sc, WMREG_CTRL);
   9378 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9379 	v |= CTRL_SWDPIO(3);
   9380 
   9381 	CSR_WRITE(sc, WMREG_CTRL, v);
   9382 	CSR_WRITE_FLUSH(sc);
   9383 	delay(10);
   9384 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9385 	CSR_WRITE_FLUSH(sc);
   9386 	delay(10);
   9387 	CSR_WRITE(sc, WMREG_CTRL, v);
   9388 	CSR_WRITE_FLUSH(sc);
   9389 	delay(10);
   9390 
   9391 	for (i = 0; i < 16; i++) {
   9392 		data <<= 1;
   9393 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9394 		CSR_WRITE_FLUSH(sc);
   9395 		delay(10);
   9396 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9397 			data |= 1;
   9398 		CSR_WRITE(sc, WMREG_CTRL, v);
   9399 		CSR_WRITE_FLUSH(sc);
   9400 		delay(10);
   9401 	}
   9402 
   9403 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9404 	CSR_WRITE_FLUSH(sc);
   9405 	delay(10);
   9406 	CSR_WRITE(sc, WMREG_CTRL, v);
   9407 	CSR_WRITE_FLUSH(sc);
   9408 	delay(10);
   9409 
   9410 	return data;
   9411 }
   9412 
   9413 #undef MDI_IO
   9414 #undef MDI_DIR
   9415 #undef MDI_CLK
   9416 
   9417 /*
   9418  * wm_gmii_i82543_readreg:	[mii interface function]
   9419  *
   9420  *	Read a PHY register on the GMII (i82543 version).
   9421  */
   9422 static int
   9423 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   9424 {
   9425 	struct wm_softc *sc = device_private(self);
   9426 	int rv;
   9427 
   9428 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9429 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9430 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9431 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9432 
   9433 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9434 	    device_xname(sc->sc_dev), phy, reg, rv));
   9435 
   9436 	return rv;
   9437 }
   9438 
   9439 /*
   9440  * wm_gmii_i82543_writereg:	[mii interface function]
   9441  *
   9442  *	Write a PHY register on the GMII (i82543 version).
   9443  */
   9444 static void
   9445 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   9446 {
   9447 	struct wm_softc *sc = device_private(self);
   9448 
   9449 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9450 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9451 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9452 	    (MII_COMMAND_START << 30), 32);
   9453 }
   9454 
   9455 /*
   9456  * wm_gmii_mdic_readreg:	[mii interface function]
   9457  *
   9458  *	Read a PHY register on the GMII.
   9459  */
   9460 static int
   9461 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   9462 {
   9463 	struct wm_softc *sc = device_private(self);
   9464 	uint32_t mdic = 0;
   9465 	int i, rv;
   9466 
   9467 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9468 	    MDIC_REGADD(reg));
   9469 
   9470 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9471 		mdic = CSR_READ(sc, WMREG_MDIC);
   9472 		if (mdic & MDIC_READY)
   9473 			break;
   9474 		delay(50);
   9475 	}
   9476 
   9477 	if ((mdic & MDIC_READY) == 0) {
   9478 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9479 		    device_xname(sc->sc_dev), phy, reg);
   9480 		rv = 0;
   9481 	} else if (mdic & MDIC_E) {
   9482 #if 0 /* This is normal if no PHY is present. */
   9483 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9484 		    device_xname(sc->sc_dev), phy, reg);
   9485 #endif
   9486 		rv = 0;
   9487 	} else {
   9488 		rv = MDIC_DATA(mdic);
   9489 		if (rv == 0xffff)
   9490 			rv = 0;
   9491 	}
   9492 
   9493 	return rv;
   9494 }
   9495 
   9496 /*
   9497  * wm_gmii_mdic_writereg:	[mii interface function]
   9498  *
   9499  *	Write a PHY register on the GMII.
   9500  */
   9501 static void
   9502 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   9503 {
   9504 	struct wm_softc *sc = device_private(self);
   9505 	uint32_t mdic = 0;
   9506 	int i;
   9507 
   9508 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9509 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9510 
   9511 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9512 		mdic = CSR_READ(sc, WMREG_MDIC);
   9513 		if (mdic & MDIC_READY)
   9514 			break;
   9515 		delay(50);
   9516 	}
   9517 
   9518 	if ((mdic & MDIC_READY) == 0)
   9519 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9520 		    device_xname(sc->sc_dev), phy, reg);
   9521 	else if (mdic & MDIC_E)
   9522 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9523 		    device_xname(sc->sc_dev), phy, reg);
   9524 }
   9525 
   9526 /*
   9527  * wm_gmii_i82544_readreg:	[mii interface function]
   9528  *
   9529  *	Read a PHY register on the GMII.
   9530  */
   9531 static int
   9532 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   9533 {
   9534 	struct wm_softc *sc = device_private(self);
   9535 	int rv;
   9536 
   9537 	if (sc->phy.acquire(sc)) {
   9538 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9539 		    __func__);
   9540 		return 0;
   9541 	}
   9542 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9543 	sc->phy.release(sc);
   9544 
   9545 	return rv;
   9546 }
   9547 
   9548 /*
   9549  * wm_gmii_i82544_writereg:	[mii interface function]
   9550  *
   9551  *	Write a PHY register on the GMII.
   9552  */
   9553 static void
   9554 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   9555 {
   9556 	struct wm_softc *sc = device_private(self);
   9557 
   9558 	if (sc->phy.acquire(sc)) {
   9559 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9560 		    __func__);
   9561 	}
   9562 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9563 	sc->phy.release(sc);
   9564 }
   9565 
   9566 /*
   9567  * wm_gmii_i80003_readreg:	[mii interface function]
   9568  *
   9569  *	Read a PHY register on the kumeran
   9570  * This could be handled by the PHY layer if we didn't have to lock the
   9571  * ressource ...
   9572  */
   9573 static int
   9574 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   9575 {
   9576 	struct wm_softc *sc = device_private(self);
   9577 	int rv;
   9578 
   9579 	if (phy != 1) /* only one PHY on kumeran bus */
   9580 		return 0;
   9581 
   9582 	if (sc->phy.acquire(sc)) {
   9583 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9584 		    __func__);
   9585 		return 0;
   9586 	}
   9587 
   9588 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9589 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9590 		    reg >> GG82563_PAGE_SHIFT);
   9591 	} else {
   9592 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9593 		    reg >> GG82563_PAGE_SHIFT);
   9594 	}
   9595 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9596 	delay(200);
   9597 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9598 	delay(200);
   9599 	sc->phy.release(sc);
   9600 
   9601 	return rv;
   9602 }
   9603 
   9604 /*
   9605  * wm_gmii_i80003_writereg:	[mii interface function]
   9606  *
   9607  *	Write a PHY register on the kumeran.
   9608  * This could be handled by the PHY layer if we didn't have to lock the
   9609  * ressource ...
   9610  */
   9611 static void
   9612 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   9613 {
   9614 	struct wm_softc *sc = device_private(self);
   9615 
   9616 	if (phy != 1) /* only one PHY on kumeran bus */
   9617 		return;
   9618 
   9619 	if (sc->phy.acquire(sc)) {
   9620 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9621 		    __func__);
   9622 		return;
   9623 	}
   9624 
   9625 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9626 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9627 		    reg >> GG82563_PAGE_SHIFT);
   9628 	} else {
   9629 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9630 		    reg >> GG82563_PAGE_SHIFT);
   9631 	}
   9632 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9633 	delay(200);
   9634 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9635 	delay(200);
   9636 
   9637 	sc->phy.release(sc);
   9638 }
   9639 
   9640 /*
   9641  * wm_gmii_bm_readreg:	[mii interface function]
   9642  *
   9643  *	Read a PHY register on the kumeran
   9644  * This could be handled by the PHY layer if we didn't have to lock the
   9645  * ressource ...
   9646  */
   9647 static int
   9648 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   9649 {
   9650 	struct wm_softc *sc = device_private(self);
   9651 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9652 	uint16_t val;
   9653 	int rv;
   9654 
   9655 	if (sc->phy.acquire(sc)) {
   9656 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9657 		    __func__);
   9658 		return 0;
   9659 	}
   9660 
   9661 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9662 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9663 		    || (reg == 31)) ? 1 : phy;
   9664 	/* Page 800 works differently than the rest so it has its own func */
   9665 	if (page == BM_WUC_PAGE) {
   9666 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9667 		rv = val;
   9668 		goto release;
   9669 	}
   9670 
   9671 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9672 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9673 		    && (sc->sc_type != WM_T_82583))
   9674 			wm_gmii_mdic_writereg(self, phy,
   9675 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9676 		else
   9677 			wm_gmii_mdic_writereg(self, phy,
   9678 			    BME1000_PHY_PAGE_SELECT, page);
   9679 	}
   9680 
   9681 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9682 
   9683 release:
   9684 	sc->phy.release(sc);
   9685 	return rv;
   9686 }
   9687 
   9688 /*
   9689  * wm_gmii_bm_writereg:	[mii interface function]
   9690  *
   9691  *	Write a PHY register on the kumeran.
   9692  * This could be handled by the PHY layer if we didn't have to lock the
   9693  * ressource ...
   9694  */
   9695 static void
   9696 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   9697 {
   9698 	struct wm_softc *sc = device_private(self);
   9699 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9700 
   9701 	if (sc->phy.acquire(sc)) {
   9702 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9703 		    __func__);
   9704 		return;
   9705 	}
   9706 
   9707 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9708 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9709 		    || (reg == 31)) ? 1 : phy;
   9710 	/* Page 800 works differently than the rest so it has its own func */
   9711 	if (page == BM_WUC_PAGE) {
   9712 		uint16_t tmp;
   9713 
   9714 		tmp = val;
   9715 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9716 		goto release;
   9717 	}
   9718 
   9719 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9720 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9721 		    && (sc->sc_type != WM_T_82583))
   9722 			wm_gmii_mdic_writereg(self, phy,
   9723 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9724 		else
   9725 			wm_gmii_mdic_writereg(self, phy,
   9726 			    BME1000_PHY_PAGE_SELECT, page);
   9727 	}
   9728 
   9729 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9730 
   9731 release:
   9732 	sc->phy.release(sc);
   9733 }
   9734 
   9735 static void
   9736 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   9737 {
   9738 	struct wm_softc *sc = device_private(self);
   9739 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9740 	uint16_t wuce, reg;
   9741 
   9742 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9743 		device_xname(sc->sc_dev), __func__));
   9744 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9745 	if (sc->sc_type == WM_T_PCH) {
   9746 		/* XXX e1000 driver do nothing... why? */
   9747 	}
   9748 
   9749 	/*
   9750 	 * 1) Enable PHY wakeup register first.
   9751 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9752 	 */
   9753 
   9754 	/* Set page 769 */
   9755 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9756 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9757 
   9758 	/* Read WUCE and save it */
   9759 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9760 
   9761 	reg = wuce | BM_WUC_ENABLE_BIT;
   9762 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9763 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9764 
   9765 	/* Select page 800 */
   9766 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9767 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9768 
   9769 	/*
   9770 	 * 2) Access PHY wakeup register.
   9771 	 * See e1000_access_phy_wakeup_reg_bm.
   9772 	 */
   9773 
   9774 	/* Write page 800 */
   9775 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9776 
   9777 	if (rd)
   9778 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9779 	else
   9780 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9781 
   9782 	/*
   9783 	 * 3) Disable PHY wakeup register.
   9784 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9785 	 */
   9786 	/* Set page 769 */
   9787 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9788 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9789 
   9790 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9791 }
   9792 
   9793 /*
   9794  * wm_gmii_hv_readreg:	[mii interface function]
   9795  *
   9796  *	Read a PHY register on the kumeran
   9797  * This could be handled by the PHY layer if we didn't have to lock the
   9798  * ressource ...
   9799  */
   9800 static int
   9801 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9802 {
   9803 	struct wm_softc *sc = device_private(self);
   9804 	int rv;
   9805 
   9806 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9807 		device_xname(sc->sc_dev), __func__));
   9808 	if (sc->phy.acquire(sc)) {
   9809 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9810 		    __func__);
   9811 		return 0;
   9812 	}
   9813 
   9814 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9815 	sc->phy.release(sc);
   9816 	return rv;
   9817 }
   9818 
   9819 static int
   9820 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9821 {
   9822 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9823 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9824 	uint16_t val;
   9825 	int rv;
   9826 
   9827 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9828 
   9829 	/* Page 800 works differently than the rest so it has its own func */
   9830 	if (page == BM_WUC_PAGE) {
   9831 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9832 		return val;
   9833 	}
   9834 
   9835 	/*
   9836 	 * Lower than page 768 works differently than the rest so it has its
   9837 	 * own func
   9838 	 */
   9839 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9840 		printf("gmii_hv_readreg!!!\n");
   9841 		return 0;
   9842 	}
   9843 
   9844 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9845 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9846 		    page << BME1000_PAGE_SHIFT);
   9847 	}
   9848 
   9849 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9850 	return rv;
   9851 }
   9852 
   9853 /*
   9854  * wm_gmii_hv_writereg:	[mii interface function]
   9855  *
   9856  *	Write a PHY register on the kumeran.
   9857  * This could be handled by the PHY layer if we didn't have to lock the
   9858  * ressource ...
   9859  */
   9860 static void
   9861 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9862 {
   9863 	struct wm_softc *sc = device_private(self);
   9864 
   9865 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9866 		device_xname(sc->sc_dev), __func__));
   9867 
   9868 	if (sc->phy.acquire(sc)) {
   9869 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9870 		    __func__);
   9871 		return;
   9872 	}
   9873 
   9874 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   9875 	sc->phy.release(sc);
   9876 }
   9877 
   9878 static void
   9879 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   9880 {
   9881 	struct wm_softc *sc = device_private(self);
   9882 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9883 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9884 
   9885 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9886 
   9887 	/* Page 800 works differently than the rest so it has its own func */
   9888 	if (page == BM_WUC_PAGE) {
   9889 		uint16_t tmp;
   9890 
   9891 		tmp = val;
   9892 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9893 		return;
   9894 	}
   9895 
   9896 	/*
   9897 	 * Lower than page 768 works differently than the rest so it has its
   9898 	 * own func
   9899 	 */
   9900 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9901 		printf("gmii_hv_writereg!!!\n");
   9902 		return;
   9903 	}
   9904 
   9905 	{
   9906 		/*
   9907 		 * XXX Workaround MDIO accesses being disabled after entering
   9908 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9909 		 * register is set)
   9910 		 */
   9911 		if (sc->sc_phytype == WMPHY_82578) {
   9912 			struct mii_softc *child;
   9913 
   9914 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9915 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   9916 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   9917 			    && ((val & (1 << 11)) != 0)) {
   9918 				printf("XXX need workaround\n");
   9919 			}
   9920 		}
   9921 
   9922 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9923 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9924 			    page << BME1000_PAGE_SHIFT);
   9925 		}
   9926 	}
   9927 
   9928 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   9929 }
   9930 
   9931 /*
   9932  * wm_gmii_82580_readreg:	[mii interface function]
   9933  *
   9934  *	Read a PHY register on the 82580 and I350.
   9935  * This could be handled by the PHY layer if we didn't have to lock the
   9936  * ressource ...
   9937  */
   9938 static int
   9939 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   9940 {
   9941 	struct wm_softc *sc = device_private(self);
   9942 	int rv;
   9943 
   9944 	if (sc->phy.acquire(sc) != 0) {
   9945 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9946 		    __func__);
   9947 		return 0;
   9948 	}
   9949 
   9950 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9951 
   9952 	sc->phy.release(sc);
   9953 	return rv;
   9954 }
   9955 
   9956 /*
   9957  * wm_gmii_82580_writereg:	[mii interface function]
   9958  *
   9959  *	Write a PHY register on the 82580 and I350.
   9960  * This could be handled by the PHY layer if we didn't have to lock the
   9961  * ressource ...
   9962  */
   9963 static void
   9964 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   9965 {
   9966 	struct wm_softc *sc = device_private(self);
   9967 
   9968 	if (sc->phy.acquire(sc) != 0) {
   9969 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9970 		    __func__);
   9971 		return;
   9972 	}
   9973 
   9974 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9975 
   9976 	sc->phy.release(sc);
   9977 }
   9978 
   9979 /*
   9980  * wm_gmii_gs40g_readreg:	[mii interface function]
   9981  *
   9982  *	Read a PHY register on the I2100 and I211.
   9983  * This could be handled by the PHY layer if we didn't have to lock the
   9984  * ressource ...
   9985  */
   9986 static int
   9987 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   9988 {
   9989 	struct wm_softc *sc = device_private(self);
   9990 	int page, offset;
   9991 	int rv;
   9992 
   9993 	/* Acquire semaphore */
   9994 	if (sc->phy.acquire(sc)) {
   9995 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9996 		    __func__);
   9997 		return 0;
   9998 	}
   9999 
   10000 	/* Page select */
   10001 	page = reg >> GS40G_PAGE_SHIFT;
   10002 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10003 
   10004 	/* Read reg */
   10005 	offset = reg & GS40G_OFFSET_MASK;
   10006 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   10007 
   10008 	sc->phy.release(sc);
   10009 	return rv;
   10010 }
   10011 
   10012 /*
   10013  * wm_gmii_gs40g_writereg:	[mii interface function]
   10014  *
   10015  *	Write a PHY register on the I210 and I211.
   10016  * This could be handled by the PHY layer if we didn't have to lock the
   10017  * ressource ...
   10018  */
   10019 static void
   10020 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   10021 {
   10022 	struct wm_softc *sc = device_private(self);
   10023 	int page, offset;
   10024 
   10025 	/* Acquire semaphore */
   10026 	if (sc->phy.acquire(sc)) {
   10027 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10028 		    __func__);
   10029 		return;
   10030 	}
   10031 
   10032 	/* Page select */
   10033 	page = reg >> GS40G_PAGE_SHIFT;
   10034 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10035 
   10036 	/* Write reg */
   10037 	offset = reg & GS40G_OFFSET_MASK;
   10038 	wm_gmii_mdic_writereg(self, phy, offset, val);
   10039 
   10040 	/* Release semaphore */
   10041 	sc->phy.release(sc);
   10042 }
   10043 
   10044 /*
   10045  * wm_gmii_statchg:	[mii interface function]
   10046  *
   10047  *	Callback from MII layer when media changes.
   10048  */
   10049 static void
   10050 wm_gmii_statchg(struct ifnet *ifp)
   10051 {
   10052 	struct wm_softc *sc = ifp->if_softc;
   10053 	struct mii_data *mii = &sc->sc_mii;
   10054 
   10055 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10056 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10057 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10058 
   10059 	/*
   10060 	 * Get flow control negotiation result.
   10061 	 */
   10062 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10063 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10064 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10065 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10066 	}
   10067 
   10068 	if (sc->sc_flowflags & IFM_FLOW) {
   10069 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10070 			sc->sc_ctrl |= CTRL_TFCE;
   10071 			sc->sc_fcrtl |= FCRTL_XONE;
   10072 		}
   10073 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10074 			sc->sc_ctrl |= CTRL_RFCE;
   10075 	}
   10076 
   10077 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10078 		DPRINTF(WM_DEBUG_LINK,
   10079 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10080 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10081 	} else {
   10082 		DPRINTF(WM_DEBUG_LINK,
   10083 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10084 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10085 	}
   10086 
   10087 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10088 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10089 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10090 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10091 	if (sc->sc_type == WM_T_80003) {
   10092 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10093 		case IFM_1000_T:
   10094 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10095 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10096 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10097 			break;
   10098 		default:
   10099 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10100 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10101 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10102 			break;
   10103 		}
   10104 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10105 	}
   10106 }
   10107 
   10108 /* kumeran related (80003, ICH* and PCH*) */
   10109 
   10110 /*
   10111  * wm_kmrn_readreg:
   10112  *
   10113  *	Read a kumeran register
   10114  */
   10115 static int
   10116 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   10117 {
   10118 	int rv;
   10119 
   10120 	if (sc->sc_type == WM_T_80003)
   10121 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10122 	else
   10123 		rv = sc->phy.acquire(sc);
   10124 	if (rv != 0) {
   10125 		aprint_error_dev(sc->sc_dev,
   10126 		    "%s: failed to get semaphore\n", __func__);
   10127 		return 0;
   10128 	}
   10129 
   10130 	rv = wm_kmrn_readreg_locked(sc, reg);
   10131 
   10132 	if (sc->sc_type == WM_T_80003)
   10133 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10134 	else
   10135 		sc->phy.release(sc);
   10136 
   10137 	return rv;
   10138 }
   10139 
   10140 static int
   10141 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   10142 {
   10143 	int rv;
   10144 
   10145 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10146 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10147 	    KUMCTRLSTA_REN);
   10148 	CSR_WRITE_FLUSH(sc);
   10149 	delay(2);
   10150 
   10151 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10152 
   10153 	return rv;
   10154 }
   10155 
   10156 /*
   10157  * wm_kmrn_writereg:
   10158  *
   10159  *	Write a kumeran register
   10160  */
   10161 static void
   10162 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   10163 {
   10164 	int rv;
   10165 
   10166 	if (sc->sc_type == WM_T_80003)
   10167 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10168 	else
   10169 		rv = sc->phy.acquire(sc);
   10170 	if (rv != 0) {
   10171 		aprint_error_dev(sc->sc_dev,
   10172 		    "%s: failed to get semaphore\n", __func__);
   10173 		return;
   10174 	}
   10175 
   10176 	wm_kmrn_writereg_locked(sc, reg, val);
   10177 
   10178 	if (sc->sc_type == WM_T_80003)
   10179 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10180 	else
   10181 		sc->phy.release(sc);
   10182 }
   10183 
   10184 static void
   10185 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   10186 {
   10187 
   10188 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10189 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10190 	    (val & KUMCTRLSTA_MASK));
   10191 }
   10192 
   10193 /* SGMII related */
   10194 
   10195 /*
   10196  * wm_sgmii_uses_mdio
   10197  *
   10198  * Check whether the transaction is to the internal PHY or the external
   10199  * MDIO interface. Return true if it's MDIO.
   10200  */
   10201 static bool
   10202 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10203 {
   10204 	uint32_t reg;
   10205 	bool ismdio = false;
   10206 
   10207 	switch (sc->sc_type) {
   10208 	case WM_T_82575:
   10209 	case WM_T_82576:
   10210 		reg = CSR_READ(sc, WMREG_MDIC);
   10211 		ismdio = ((reg & MDIC_DEST) != 0);
   10212 		break;
   10213 	case WM_T_82580:
   10214 	case WM_T_I350:
   10215 	case WM_T_I354:
   10216 	case WM_T_I210:
   10217 	case WM_T_I211:
   10218 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10219 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10220 		break;
   10221 	default:
   10222 		break;
   10223 	}
   10224 
   10225 	return ismdio;
   10226 }
   10227 
   10228 /*
   10229  * wm_sgmii_readreg:	[mii interface function]
   10230  *
   10231  *	Read a PHY register on the SGMII
   10232  * This could be handled by the PHY layer if we didn't have to lock the
   10233  * ressource ...
   10234  */
   10235 static int
   10236 wm_sgmii_readreg(device_t self, int phy, int reg)
   10237 {
   10238 	struct wm_softc *sc = device_private(self);
   10239 	uint32_t i2ccmd;
   10240 	int i, rv;
   10241 
   10242 	if (sc->phy.acquire(sc)) {
   10243 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10244 		    __func__);
   10245 		return 0;
   10246 	}
   10247 
   10248 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10249 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10250 	    | I2CCMD_OPCODE_READ;
   10251 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10252 
   10253 	/* Poll the ready bit */
   10254 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10255 		delay(50);
   10256 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10257 		if (i2ccmd & I2CCMD_READY)
   10258 			break;
   10259 	}
   10260 	if ((i2ccmd & I2CCMD_READY) == 0)
   10261 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   10262 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10263 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10264 
   10265 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10266 
   10267 	sc->phy.release(sc);
   10268 	return rv;
   10269 }
   10270 
   10271 /*
   10272  * wm_sgmii_writereg:	[mii interface function]
   10273  *
   10274  *	Write a PHY register on the SGMII.
   10275  * This could be handled by the PHY layer if we didn't have to lock the
   10276  * ressource ...
   10277  */
   10278 static void
   10279 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   10280 {
   10281 	struct wm_softc *sc = device_private(self);
   10282 	uint32_t i2ccmd;
   10283 	int i;
   10284 	int val_swapped;
   10285 
   10286 	if (sc->phy.acquire(sc) != 0) {
   10287 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10288 		    __func__);
   10289 		return;
   10290 	}
   10291 	/* Swap the data bytes for the I2C interface */
   10292 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10293 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10294 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10295 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10296 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10297 
   10298 	/* Poll the ready bit */
   10299 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10300 		delay(50);
   10301 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10302 		if (i2ccmd & I2CCMD_READY)
   10303 			break;
   10304 	}
   10305 	if ((i2ccmd & I2CCMD_READY) == 0)
   10306 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   10307 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10308 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10309 
   10310 	sc->phy.release(sc);
   10311 }
   10312 
   10313 /* TBI related */
   10314 
   10315 /*
   10316  * wm_tbi_mediainit:
   10317  *
   10318  *	Initialize media for use on 1000BASE-X devices.
   10319  */
   10320 static void
   10321 wm_tbi_mediainit(struct wm_softc *sc)
   10322 {
   10323 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10324 	const char *sep = "";
   10325 
   10326 	if (sc->sc_type < WM_T_82543)
   10327 		sc->sc_tipg = TIPG_WM_DFLT;
   10328 	else
   10329 		sc->sc_tipg = TIPG_LG_DFLT;
   10330 
   10331 	sc->sc_tbi_serdes_anegticks = 5;
   10332 
   10333 	/* Initialize our media structures */
   10334 	sc->sc_mii.mii_ifp = ifp;
   10335 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10336 
   10337 	if ((sc->sc_type >= WM_T_82575)
   10338 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10339 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10340 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10341 	else
   10342 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10343 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10344 
   10345 	/*
   10346 	 * SWD Pins:
   10347 	 *
   10348 	 *	0 = Link LED (output)
   10349 	 *	1 = Loss Of Signal (input)
   10350 	 */
   10351 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10352 
   10353 	/* XXX Perhaps this is only for TBI */
   10354 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10355 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10356 
   10357 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10358 		sc->sc_ctrl &= ~CTRL_LRST;
   10359 
   10360 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10361 
   10362 #define	ADD(ss, mm, dd)							\
   10363 do {									\
   10364 	aprint_normal("%s%s", sep, ss);					\
   10365 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10366 	sep = ", ";							\
   10367 } while (/*CONSTCOND*/0)
   10368 
   10369 	aprint_normal_dev(sc->sc_dev, "");
   10370 
   10371 	if (sc->sc_type == WM_T_I354) {
   10372 		uint32_t status;
   10373 
   10374 		status = CSR_READ(sc, WMREG_STATUS);
   10375 		if (((status & STATUS_2P5_SKU) != 0)
   10376 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10377 			ADD("2500baseKX-FDX", IFM_2500_SX | IFM_FDX,ANAR_X_FD);
   10378 		} else
   10379 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX,ANAR_X_FD);
   10380 	} else if (sc->sc_type == WM_T_82545) {
   10381 		/* Only 82545 is LX (XXX except SFP) */
   10382 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10383 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10384 	} else {
   10385 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10386 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10387 	}
   10388 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10389 	aprint_normal("\n");
   10390 
   10391 #undef ADD
   10392 
   10393 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10394 }
   10395 
   10396 /*
   10397  * wm_tbi_mediachange:	[ifmedia interface function]
   10398  *
   10399  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10400  */
   10401 static int
   10402 wm_tbi_mediachange(struct ifnet *ifp)
   10403 {
   10404 	struct wm_softc *sc = ifp->if_softc;
   10405 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10406 	uint32_t status;
   10407 	int i;
   10408 
   10409 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10410 		/* XXX need some work for >= 82571 and < 82575 */
   10411 		if (sc->sc_type < WM_T_82575)
   10412 			return 0;
   10413 	}
   10414 
   10415 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10416 	    || (sc->sc_type >= WM_T_82575))
   10417 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10418 
   10419 	sc->sc_ctrl &= ~CTRL_LRST;
   10420 	sc->sc_txcw = TXCW_ANE;
   10421 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10422 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10423 	else if (ife->ifm_media & IFM_FDX)
   10424 		sc->sc_txcw |= TXCW_FD;
   10425 	else
   10426 		sc->sc_txcw |= TXCW_HD;
   10427 
   10428 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10429 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10430 
   10431 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10432 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10433 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10434 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10435 	CSR_WRITE_FLUSH(sc);
   10436 	delay(1000);
   10437 
   10438 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10439 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10440 
   10441 	/*
   10442 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10443 	 * optics detect a signal, 0 if they don't.
   10444 	 */
   10445 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10446 		/* Have signal; wait for the link to come up. */
   10447 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10448 			delay(10000);
   10449 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10450 				break;
   10451 		}
   10452 
   10453 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10454 			    device_xname(sc->sc_dev),i));
   10455 
   10456 		status = CSR_READ(sc, WMREG_STATUS);
   10457 		DPRINTF(WM_DEBUG_LINK,
   10458 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10459 			device_xname(sc->sc_dev),status, STATUS_LU));
   10460 		if (status & STATUS_LU) {
   10461 			/* Link is up. */
   10462 			DPRINTF(WM_DEBUG_LINK,
   10463 			    ("%s: LINK: set media -> link up %s\n",
   10464 			    device_xname(sc->sc_dev),
   10465 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10466 
   10467 			/*
   10468 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10469 			 * so we should update sc->sc_ctrl
   10470 			 */
   10471 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10472 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10473 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10474 			if (status & STATUS_FD)
   10475 				sc->sc_tctl |=
   10476 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10477 			else
   10478 				sc->sc_tctl |=
   10479 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10480 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10481 				sc->sc_fcrtl |= FCRTL_XONE;
   10482 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10483 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10484 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10485 				      sc->sc_fcrtl);
   10486 			sc->sc_tbi_linkup = 1;
   10487 		} else {
   10488 			if (i == WM_LINKUP_TIMEOUT)
   10489 				wm_check_for_link(sc);
   10490 			/* Link is down. */
   10491 			DPRINTF(WM_DEBUG_LINK,
   10492 			    ("%s: LINK: set media -> link down\n",
   10493 			    device_xname(sc->sc_dev)));
   10494 			sc->sc_tbi_linkup = 0;
   10495 		}
   10496 	} else {
   10497 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10498 		    device_xname(sc->sc_dev)));
   10499 		sc->sc_tbi_linkup = 0;
   10500 	}
   10501 
   10502 	wm_tbi_serdes_set_linkled(sc);
   10503 
   10504 	return 0;
   10505 }
   10506 
   10507 /*
   10508  * wm_tbi_mediastatus:	[ifmedia interface function]
   10509  *
   10510  *	Get the current interface media status on a 1000BASE-X device.
   10511  */
   10512 static void
   10513 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10514 {
   10515 	struct wm_softc *sc = ifp->if_softc;
   10516 	uint32_t ctrl, status;
   10517 
   10518 	ifmr->ifm_status = IFM_AVALID;
   10519 	ifmr->ifm_active = IFM_ETHER;
   10520 
   10521 	status = CSR_READ(sc, WMREG_STATUS);
   10522 	if ((status & STATUS_LU) == 0) {
   10523 		ifmr->ifm_active |= IFM_NONE;
   10524 		return;
   10525 	}
   10526 
   10527 	ifmr->ifm_status |= IFM_ACTIVE;
   10528 	/* Only 82545 is LX */
   10529 	if (sc->sc_type == WM_T_82545)
   10530 		ifmr->ifm_active |= IFM_1000_LX;
   10531 	else
   10532 		ifmr->ifm_active |= IFM_1000_SX;
   10533 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10534 		ifmr->ifm_active |= IFM_FDX;
   10535 	else
   10536 		ifmr->ifm_active |= IFM_HDX;
   10537 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10538 	if (ctrl & CTRL_RFCE)
   10539 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10540 	if (ctrl & CTRL_TFCE)
   10541 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10542 }
   10543 
   10544 /* XXX TBI only */
   10545 static int
   10546 wm_check_for_link(struct wm_softc *sc)
   10547 {
   10548 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10549 	uint32_t rxcw;
   10550 	uint32_t ctrl;
   10551 	uint32_t status;
   10552 	uint32_t sig;
   10553 
   10554 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10555 		/* XXX need some work for >= 82571 */
   10556 		if (sc->sc_type >= WM_T_82571) {
   10557 			sc->sc_tbi_linkup = 1;
   10558 			return 0;
   10559 		}
   10560 	}
   10561 
   10562 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10563 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10564 	status = CSR_READ(sc, WMREG_STATUS);
   10565 
   10566 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10567 
   10568 	DPRINTF(WM_DEBUG_LINK,
   10569 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10570 		device_xname(sc->sc_dev), __func__,
   10571 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10572 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10573 
   10574 	/*
   10575 	 * SWDPIN   LU RXCW
   10576 	 *      0    0    0
   10577 	 *      0    0    1	(should not happen)
   10578 	 *      0    1    0	(should not happen)
   10579 	 *      0    1    1	(should not happen)
   10580 	 *      1    0    0	Disable autonego and force linkup
   10581 	 *      1    0    1	got /C/ but not linkup yet
   10582 	 *      1    1    0	(linkup)
   10583 	 *      1    1    1	If IFM_AUTO, back to autonego
   10584 	 *
   10585 	 */
   10586 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10587 	    && ((status & STATUS_LU) == 0)
   10588 	    && ((rxcw & RXCW_C) == 0)) {
   10589 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10590 			__func__));
   10591 		sc->sc_tbi_linkup = 0;
   10592 		/* Disable auto-negotiation in the TXCW register */
   10593 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10594 
   10595 		/*
   10596 		 * Force link-up and also force full-duplex.
   10597 		 *
   10598 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10599 		 * so we should update sc->sc_ctrl
   10600 		 */
   10601 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10602 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10603 	} else if (((status & STATUS_LU) != 0)
   10604 	    && ((rxcw & RXCW_C) != 0)
   10605 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10606 		sc->sc_tbi_linkup = 1;
   10607 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10608 			__func__));
   10609 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10610 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10611 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10612 	    && ((rxcw & RXCW_C) != 0)) {
   10613 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10614 	} else {
   10615 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10616 			status));
   10617 	}
   10618 
   10619 	return 0;
   10620 }
   10621 
   10622 /*
   10623  * wm_tbi_tick:
   10624  *
   10625  *	Check the link on TBI devices.
   10626  *	This function acts as mii_tick().
   10627  */
   10628 static void
   10629 wm_tbi_tick(struct wm_softc *sc)
   10630 {
   10631 	struct mii_data *mii = &sc->sc_mii;
   10632 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10633 	uint32_t status;
   10634 
   10635 	KASSERT(WM_CORE_LOCKED(sc));
   10636 
   10637 	status = CSR_READ(sc, WMREG_STATUS);
   10638 
   10639 	/* XXX is this needed? */
   10640 	(void)CSR_READ(sc, WMREG_RXCW);
   10641 	(void)CSR_READ(sc, WMREG_CTRL);
   10642 
   10643 	/* set link status */
   10644 	if ((status & STATUS_LU) == 0) {
   10645 		DPRINTF(WM_DEBUG_LINK,
   10646 		    ("%s: LINK: checklink -> down\n",
   10647 			device_xname(sc->sc_dev)));
   10648 		sc->sc_tbi_linkup = 0;
   10649 	} else if (sc->sc_tbi_linkup == 0) {
   10650 		DPRINTF(WM_DEBUG_LINK,
   10651 		    ("%s: LINK: checklink -> up %s\n",
   10652 			device_xname(sc->sc_dev),
   10653 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10654 		sc->sc_tbi_linkup = 1;
   10655 		sc->sc_tbi_serdes_ticks = 0;
   10656 	}
   10657 
   10658 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   10659 		goto setled;
   10660 
   10661 	if ((status & STATUS_LU) == 0) {
   10662 		sc->sc_tbi_linkup = 0;
   10663 		/* If the timer expired, retry autonegotiation */
   10664 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10665 		    && (++sc->sc_tbi_serdes_ticks
   10666 			>= sc->sc_tbi_serdes_anegticks)) {
   10667 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10668 			sc->sc_tbi_serdes_ticks = 0;
   10669 			/*
   10670 			 * Reset the link, and let autonegotiation do
   10671 			 * its thing
   10672 			 */
   10673 			sc->sc_ctrl |= CTRL_LRST;
   10674 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10675 			CSR_WRITE_FLUSH(sc);
   10676 			delay(1000);
   10677 			sc->sc_ctrl &= ~CTRL_LRST;
   10678 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10679 			CSR_WRITE_FLUSH(sc);
   10680 			delay(1000);
   10681 			CSR_WRITE(sc, WMREG_TXCW,
   10682 			    sc->sc_txcw & ~TXCW_ANE);
   10683 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10684 		}
   10685 	}
   10686 
   10687 setled:
   10688 	wm_tbi_serdes_set_linkled(sc);
   10689 }
   10690 
   10691 /* SERDES related */
   10692 static void
   10693 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10694 {
   10695 	uint32_t reg;
   10696 
   10697 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10698 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   10699 		return;
   10700 
   10701 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   10702 	reg |= PCS_CFG_PCS_EN;
   10703 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   10704 
   10705 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10706 	reg &= ~CTRL_EXT_SWDPIN(3);
   10707 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10708 	CSR_WRITE_FLUSH(sc);
   10709 }
   10710 
   10711 static int
   10712 wm_serdes_mediachange(struct ifnet *ifp)
   10713 {
   10714 	struct wm_softc *sc = ifp->if_softc;
   10715 	bool pcs_autoneg = true; /* XXX */
   10716 	uint32_t ctrl_ext, pcs_lctl, reg;
   10717 
   10718 	/* XXX Currently, this function is not called on 8257[12] */
   10719 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10720 	    || (sc->sc_type >= WM_T_82575))
   10721 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10722 
   10723 	wm_serdes_power_up_link_82575(sc);
   10724 
   10725 	sc->sc_ctrl |= CTRL_SLU;
   10726 
   10727 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10728 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10729 
   10730 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10731 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10732 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10733 	case CTRL_EXT_LINK_MODE_SGMII:
   10734 		pcs_autoneg = true;
   10735 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10736 		break;
   10737 	case CTRL_EXT_LINK_MODE_1000KX:
   10738 		pcs_autoneg = false;
   10739 		/* FALLTHROUGH */
   10740 	default:
   10741 		if ((sc->sc_type == WM_T_82575)
   10742 		    || (sc->sc_type == WM_T_82576)) {
   10743 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10744 				pcs_autoneg = false;
   10745 		}
   10746 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10747 		    | CTRL_FRCFDX;
   10748 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10749 	}
   10750 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10751 
   10752 	if (pcs_autoneg) {
   10753 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10754 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10755 
   10756 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10757 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10758 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10759 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10760 	} else
   10761 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10762 
   10763 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10764 
   10765 
   10766 	return 0;
   10767 }
   10768 
   10769 static void
   10770 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10771 {
   10772 	struct wm_softc *sc = ifp->if_softc;
   10773 	struct mii_data *mii = &sc->sc_mii;
   10774 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10775 	uint32_t pcs_adv, pcs_lpab, reg;
   10776 
   10777 	ifmr->ifm_status = IFM_AVALID;
   10778 	ifmr->ifm_active = IFM_ETHER;
   10779 
   10780 	/* Check PCS */
   10781 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10782 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10783 		ifmr->ifm_active |= IFM_NONE;
   10784 		sc->sc_tbi_linkup = 0;
   10785 		goto setled;
   10786 	}
   10787 
   10788 	sc->sc_tbi_linkup = 1;
   10789 	ifmr->ifm_status |= IFM_ACTIVE;
   10790 	if (sc->sc_type == WM_T_I354) {
   10791 		uint32_t status;
   10792 
   10793 		status = CSR_READ(sc, WMREG_STATUS);
   10794 		if (((status & STATUS_2P5_SKU) != 0)
   10795 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10796 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   10797 		} else
   10798 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   10799 	} else {
   10800 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   10801 		case PCS_LSTS_SPEED_10:
   10802 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   10803 			break;
   10804 		case PCS_LSTS_SPEED_100:
   10805 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   10806 			break;
   10807 		case PCS_LSTS_SPEED_1000:
   10808 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10809 			break;
   10810 		default:
   10811 			device_printf(sc->sc_dev, "Unknown speed\n");
   10812 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10813 			break;
   10814 		}
   10815 	}
   10816 	if ((reg & PCS_LSTS_FDX) != 0)
   10817 		ifmr->ifm_active |= IFM_FDX;
   10818 	else
   10819 		ifmr->ifm_active |= IFM_HDX;
   10820 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10821 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10822 		/* Check flow */
   10823 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10824 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10825 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10826 			goto setled;
   10827 		}
   10828 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10829 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10830 		DPRINTF(WM_DEBUG_LINK,
   10831 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10832 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10833 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10834 			mii->mii_media_active |= IFM_FLOW
   10835 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10836 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10837 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10838 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10839 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10840 			mii->mii_media_active |= IFM_FLOW
   10841 			    | IFM_ETH_TXPAUSE;
   10842 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10843 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10844 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10845 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10846 			mii->mii_media_active |= IFM_FLOW
   10847 			    | IFM_ETH_RXPAUSE;
   10848 		}
   10849 	}
   10850 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10851 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10852 setled:
   10853 	wm_tbi_serdes_set_linkled(sc);
   10854 }
   10855 
   10856 /*
   10857  * wm_serdes_tick:
   10858  *
   10859  *	Check the link on serdes devices.
   10860  */
   10861 static void
   10862 wm_serdes_tick(struct wm_softc *sc)
   10863 {
   10864 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10865 	struct mii_data *mii = &sc->sc_mii;
   10866 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10867 	uint32_t reg;
   10868 
   10869 	KASSERT(WM_CORE_LOCKED(sc));
   10870 
   10871 	mii->mii_media_status = IFM_AVALID;
   10872 	mii->mii_media_active = IFM_ETHER;
   10873 
   10874 	/* Check PCS */
   10875 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10876 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   10877 		mii->mii_media_status |= IFM_ACTIVE;
   10878 		sc->sc_tbi_linkup = 1;
   10879 		sc->sc_tbi_serdes_ticks = 0;
   10880 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   10881 		if ((reg & PCS_LSTS_FDX) != 0)
   10882 			mii->mii_media_active |= IFM_FDX;
   10883 		else
   10884 			mii->mii_media_active |= IFM_HDX;
   10885 	} else {
   10886 		mii->mii_media_status |= IFM_NONE;
   10887 		sc->sc_tbi_linkup = 0;
   10888 		/* If the timer expired, retry autonegotiation */
   10889 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10890 		    && (++sc->sc_tbi_serdes_ticks
   10891 			>= sc->sc_tbi_serdes_anegticks)) {
   10892 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10893 			sc->sc_tbi_serdes_ticks = 0;
   10894 			/* XXX */
   10895 			wm_serdes_mediachange(ifp);
   10896 		}
   10897 	}
   10898 
   10899 	wm_tbi_serdes_set_linkled(sc);
   10900 }
   10901 
   10902 /* SFP related */
   10903 
   10904 static int
   10905 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   10906 {
   10907 	uint32_t i2ccmd;
   10908 	int i;
   10909 
   10910 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10911 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10912 
   10913 	/* Poll the ready bit */
   10914 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10915 		delay(50);
   10916 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10917 		if (i2ccmd & I2CCMD_READY)
   10918 			break;
   10919 	}
   10920 	if ((i2ccmd & I2CCMD_READY) == 0)
   10921 		return -1;
   10922 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10923 		return -1;
   10924 
   10925 	*data = i2ccmd & 0x00ff;
   10926 
   10927 	return 0;
   10928 }
   10929 
   10930 static uint32_t
   10931 wm_sfp_get_media_type(struct wm_softc *sc)
   10932 {
   10933 	uint32_t ctrl_ext;
   10934 	uint8_t val = 0;
   10935 	int timeout = 3;
   10936 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   10937 	int rv = -1;
   10938 
   10939 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10940 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   10941 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   10942 	CSR_WRITE_FLUSH(sc);
   10943 
   10944 	/* Read SFP module data */
   10945 	while (timeout) {
   10946 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   10947 		if (rv == 0)
   10948 			break;
   10949 		delay(100*1000); /* XXX too big */
   10950 		timeout--;
   10951 	}
   10952 	if (rv != 0)
   10953 		goto out;
   10954 	switch (val) {
   10955 	case SFF_SFP_ID_SFF:
   10956 		aprint_normal_dev(sc->sc_dev,
   10957 		    "Module/Connector soldered to board\n");
   10958 		break;
   10959 	case SFF_SFP_ID_SFP:
   10960 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   10961 		break;
   10962 	case SFF_SFP_ID_UNKNOWN:
   10963 		goto out;
   10964 	default:
   10965 		break;
   10966 	}
   10967 
   10968 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   10969 	if (rv != 0) {
   10970 		goto out;
   10971 	}
   10972 
   10973 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   10974 		mediatype = WM_MEDIATYPE_SERDES;
   10975 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   10976 		sc->sc_flags |= WM_F_SGMII;
   10977 		mediatype = WM_MEDIATYPE_COPPER;
   10978 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   10979 		sc->sc_flags |= WM_F_SGMII;
   10980 		mediatype = WM_MEDIATYPE_SERDES;
   10981 	}
   10982 
   10983 out:
   10984 	/* Restore I2C interface setting */
   10985 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10986 
   10987 	return mediatype;
   10988 }
   10989 
   10990 /*
   10991  * NVM related.
   10992  * Microwire, SPI (w/wo EERD) and Flash.
   10993  */
   10994 
   10995 /* Both spi and uwire */
   10996 
   10997 /*
   10998  * wm_eeprom_sendbits:
   10999  *
   11000  *	Send a series of bits to the EEPROM.
   11001  */
   11002 static void
   11003 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11004 {
   11005 	uint32_t reg;
   11006 	int x;
   11007 
   11008 	reg = CSR_READ(sc, WMREG_EECD);
   11009 
   11010 	for (x = nbits; x > 0; x--) {
   11011 		if (bits & (1U << (x - 1)))
   11012 			reg |= EECD_DI;
   11013 		else
   11014 			reg &= ~EECD_DI;
   11015 		CSR_WRITE(sc, WMREG_EECD, reg);
   11016 		CSR_WRITE_FLUSH(sc);
   11017 		delay(2);
   11018 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11019 		CSR_WRITE_FLUSH(sc);
   11020 		delay(2);
   11021 		CSR_WRITE(sc, WMREG_EECD, reg);
   11022 		CSR_WRITE_FLUSH(sc);
   11023 		delay(2);
   11024 	}
   11025 }
   11026 
   11027 /*
   11028  * wm_eeprom_recvbits:
   11029  *
   11030  *	Receive a series of bits from the EEPROM.
   11031  */
   11032 static void
   11033 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11034 {
   11035 	uint32_t reg, val;
   11036 	int x;
   11037 
   11038 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11039 
   11040 	val = 0;
   11041 	for (x = nbits; x > 0; x--) {
   11042 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11043 		CSR_WRITE_FLUSH(sc);
   11044 		delay(2);
   11045 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11046 			val |= (1U << (x - 1));
   11047 		CSR_WRITE(sc, WMREG_EECD, reg);
   11048 		CSR_WRITE_FLUSH(sc);
   11049 		delay(2);
   11050 	}
   11051 	*valp = val;
   11052 }
   11053 
   11054 /* Microwire */
   11055 
   11056 /*
   11057  * wm_nvm_read_uwire:
   11058  *
   11059  *	Read a word from the EEPROM using the MicroWire protocol.
   11060  */
   11061 static int
   11062 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11063 {
   11064 	uint32_t reg, val;
   11065 	int i;
   11066 
   11067 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11068 		device_xname(sc->sc_dev), __func__));
   11069 
   11070 	for (i = 0; i < wordcnt; i++) {
   11071 		/* Clear SK and DI. */
   11072 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11073 		CSR_WRITE(sc, WMREG_EECD, reg);
   11074 
   11075 		/*
   11076 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11077 		 * and Xen.
   11078 		 *
   11079 		 * We use this workaround only for 82540 because qemu's
   11080 		 * e1000 act as 82540.
   11081 		 */
   11082 		if (sc->sc_type == WM_T_82540) {
   11083 			reg |= EECD_SK;
   11084 			CSR_WRITE(sc, WMREG_EECD, reg);
   11085 			reg &= ~EECD_SK;
   11086 			CSR_WRITE(sc, WMREG_EECD, reg);
   11087 			CSR_WRITE_FLUSH(sc);
   11088 			delay(2);
   11089 		}
   11090 		/* XXX: end of workaround */
   11091 
   11092 		/* Set CHIP SELECT. */
   11093 		reg |= EECD_CS;
   11094 		CSR_WRITE(sc, WMREG_EECD, reg);
   11095 		CSR_WRITE_FLUSH(sc);
   11096 		delay(2);
   11097 
   11098 		/* Shift in the READ command. */
   11099 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11100 
   11101 		/* Shift in address. */
   11102 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11103 
   11104 		/* Shift out the data. */
   11105 		wm_eeprom_recvbits(sc, &val, 16);
   11106 		data[i] = val & 0xffff;
   11107 
   11108 		/* Clear CHIP SELECT. */
   11109 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11110 		CSR_WRITE(sc, WMREG_EECD, reg);
   11111 		CSR_WRITE_FLUSH(sc);
   11112 		delay(2);
   11113 	}
   11114 
   11115 	return 0;
   11116 }
   11117 
   11118 /* SPI */
   11119 
   11120 /*
   11121  * Set SPI and FLASH related information from the EECD register.
   11122  * For 82541 and 82547, the word size is taken from EEPROM.
   11123  */
   11124 static int
   11125 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11126 {
   11127 	int size;
   11128 	uint32_t reg;
   11129 	uint16_t data;
   11130 
   11131 	reg = CSR_READ(sc, WMREG_EECD);
   11132 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11133 
   11134 	/* Read the size of NVM from EECD by default */
   11135 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11136 	switch (sc->sc_type) {
   11137 	case WM_T_82541:
   11138 	case WM_T_82541_2:
   11139 	case WM_T_82547:
   11140 	case WM_T_82547_2:
   11141 		/* Set dummy value to access EEPROM */
   11142 		sc->sc_nvm_wordsize = 64;
   11143 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11144 		reg = data;
   11145 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11146 		if (size == 0)
   11147 			size = 6; /* 64 word size */
   11148 		else
   11149 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11150 		break;
   11151 	case WM_T_80003:
   11152 	case WM_T_82571:
   11153 	case WM_T_82572:
   11154 	case WM_T_82573: /* SPI case */
   11155 	case WM_T_82574: /* SPI case */
   11156 	case WM_T_82583: /* SPI case */
   11157 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11158 		if (size > 14)
   11159 			size = 14;
   11160 		break;
   11161 	case WM_T_82575:
   11162 	case WM_T_82576:
   11163 	case WM_T_82580:
   11164 	case WM_T_I350:
   11165 	case WM_T_I354:
   11166 	case WM_T_I210:
   11167 	case WM_T_I211:
   11168 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11169 		if (size > 15)
   11170 			size = 15;
   11171 		break;
   11172 	default:
   11173 		aprint_error_dev(sc->sc_dev,
   11174 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11175 		return -1;
   11176 		break;
   11177 	}
   11178 
   11179 	sc->sc_nvm_wordsize = 1 << size;
   11180 
   11181 	return 0;
   11182 }
   11183 
   11184 /*
   11185  * wm_nvm_ready_spi:
   11186  *
   11187  *	Wait for a SPI EEPROM to be ready for commands.
   11188  */
   11189 static int
   11190 wm_nvm_ready_spi(struct wm_softc *sc)
   11191 {
   11192 	uint32_t val;
   11193 	int usec;
   11194 
   11195 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11196 		device_xname(sc->sc_dev), __func__));
   11197 
   11198 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11199 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11200 		wm_eeprom_recvbits(sc, &val, 8);
   11201 		if ((val & SPI_SR_RDY) == 0)
   11202 			break;
   11203 	}
   11204 	if (usec >= SPI_MAX_RETRIES) {
   11205 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11206 		return 1;
   11207 	}
   11208 	return 0;
   11209 }
   11210 
   11211 /*
   11212  * wm_nvm_read_spi:
   11213  *
   11214  *	Read a work from the EEPROM using the SPI protocol.
   11215  */
   11216 static int
   11217 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11218 {
   11219 	uint32_t reg, val;
   11220 	int i;
   11221 	uint8_t opc;
   11222 
   11223 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11224 		device_xname(sc->sc_dev), __func__));
   11225 
   11226 	/* Clear SK and CS. */
   11227 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11228 	CSR_WRITE(sc, WMREG_EECD, reg);
   11229 	CSR_WRITE_FLUSH(sc);
   11230 	delay(2);
   11231 
   11232 	if (wm_nvm_ready_spi(sc))
   11233 		return 1;
   11234 
   11235 	/* Toggle CS to flush commands. */
   11236 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11237 	CSR_WRITE_FLUSH(sc);
   11238 	delay(2);
   11239 	CSR_WRITE(sc, WMREG_EECD, reg);
   11240 	CSR_WRITE_FLUSH(sc);
   11241 	delay(2);
   11242 
   11243 	opc = SPI_OPC_READ;
   11244 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11245 		opc |= SPI_OPC_A8;
   11246 
   11247 	wm_eeprom_sendbits(sc, opc, 8);
   11248 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11249 
   11250 	for (i = 0; i < wordcnt; i++) {
   11251 		wm_eeprom_recvbits(sc, &val, 16);
   11252 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11253 	}
   11254 
   11255 	/* Raise CS and clear SK. */
   11256 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11257 	CSR_WRITE(sc, WMREG_EECD, reg);
   11258 	CSR_WRITE_FLUSH(sc);
   11259 	delay(2);
   11260 
   11261 	return 0;
   11262 }
   11263 
   11264 /* Using with EERD */
   11265 
   11266 static int
   11267 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11268 {
   11269 	uint32_t attempts = 100000;
   11270 	uint32_t i, reg = 0;
   11271 	int32_t done = -1;
   11272 
   11273 	for (i = 0; i < attempts; i++) {
   11274 		reg = CSR_READ(sc, rw);
   11275 
   11276 		if (reg & EERD_DONE) {
   11277 			done = 0;
   11278 			break;
   11279 		}
   11280 		delay(5);
   11281 	}
   11282 
   11283 	return done;
   11284 }
   11285 
   11286 static int
   11287 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11288     uint16_t *data)
   11289 {
   11290 	int i, eerd = 0;
   11291 	int error = 0;
   11292 
   11293 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11294 		device_xname(sc->sc_dev), __func__));
   11295 
   11296 	for (i = 0; i < wordcnt; i++) {
   11297 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11298 
   11299 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11300 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11301 		if (error != 0)
   11302 			break;
   11303 
   11304 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11305 	}
   11306 
   11307 	return error;
   11308 }
   11309 
   11310 /* Flash */
   11311 
   11312 static int
   11313 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11314 {
   11315 	uint32_t eecd;
   11316 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11317 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11318 	uint8_t sig_byte = 0;
   11319 
   11320 	switch (sc->sc_type) {
   11321 	case WM_T_PCH_SPT:
   11322 		/*
   11323 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11324 		 * sector valid bits from the NVM.
   11325 		 */
   11326 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11327 		if ((*bank == 0) || (*bank == 1)) {
   11328 			aprint_error_dev(sc->sc_dev,
   11329 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11330 				*bank);
   11331 			return -1;
   11332 		} else {
   11333 			*bank = *bank - 2;
   11334 			return 0;
   11335 		}
   11336 	case WM_T_ICH8:
   11337 	case WM_T_ICH9:
   11338 		eecd = CSR_READ(sc, WMREG_EECD);
   11339 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11340 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11341 			return 0;
   11342 		}
   11343 		/* FALLTHROUGH */
   11344 	default:
   11345 		/* Default to 0 */
   11346 		*bank = 0;
   11347 
   11348 		/* Check bank 0 */
   11349 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11350 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11351 			*bank = 0;
   11352 			return 0;
   11353 		}
   11354 
   11355 		/* Check bank 1 */
   11356 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11357 		    &sig_byte);
   11358 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11359 			*bank = 1;
   11360 			return 0;
   11361 		}
   11362 	}
   11363 
   11364 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11365 		device_xname(sc->sc_dev)));
   11366 	return -1;
   11367 }
   11368 
   11369 /******************************************************************************
   11370  * This function does initial flash setup so that a new read/write/erase cycle
   11371  * can be started.
   11372  *
   11373  * sc - The pointer to the hw structure
   11374  ****************************************************************************/
   11375 static int32_t
   11376 wm_ich8_cycle_init(struct wm_softc *sc)
   11377 {
   11378 	uint16_t hsfsts;
   11379 	int32_t error = 1;
   11380 	int32_t i     = 0;
   11381 
   11382 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11383 
   11384 	/* May be check the Flash Des Valid bit in Hw status */
   11385 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11386 		return error;
   11387 	}
   11388 
   11389 	/* Clear FCERR in Hw status by writing 1 */
   11390 	/* Clear DAEL in Hw status by writing a 1 */
   11391 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11392 
   11393 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11394 
   11395 	/*
   11396 	 * Either we should have a hardware SPI cycle in progress bit to check
   11397 	 * against, in order to start a new cycle or FDONE bit should be
   11398 	 * changed in the hardware so that it is 1 after harware reset, which
   11399 	 * can then be used as an indication whether a cycle is in progress or
   11400 	 * has been completed .. we should also have some software semaphore
   11401 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11402 	 * threads access to those bits can be sequentiallized or a way so that
   11403 	 * 2 threads dont start the cycle at the same time
   11404 	 */
   11405 
   11406 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11407 		/*
   11408 		 * There is no cycle running at present, so we can start a
   11409 		 * cycle
   11410 		 */
   11411 
   11412 		/* Begin by setting Flash Cycle Done. */
   11413 		hsfsts |= HSFSTS_DONE;
   11414 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11415 		error = 0;
   11416 	} else {
   11417 		/*
   11418 		 * otherwise poll for sometime so the current cycle has a
   11419 		 * chance to end before giving up.
   11420 		 */
   11421 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11422 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11423 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11424 				error = 0;
   11425 				break;
   11426 			}
   11427 			delay(1);
   11428 		}
   11429 		if (error == 0) {
   11430 			/*
   11431 			 * Successful in waiting for previous cycle to timeout,
   11432 			 * now set the Flash Cycle Done.
   11433 			 */
   11434 			hsfsts |= HSFSTS_DONE;
   11435 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11436 		}
   11437 	}
   11438 	return error;
   11439 }
   11440 
   11441 /******************************************************************************
   11442  * This function starts a flash cycle and waits for its completion
   11443  *
   11444  * sc - The pointer to the hw structure
   11445  ****************************************************************************/
   11446 static int32_t
   11447 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11448 {
   11449 	uint16_t hsflctl;
   11450 	uint16_t hsfsts;
   11451 	int32_t error = 1;
   11452 	uint32_t i = 0;
   11453 
   11454 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11455 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11456 	hsflctl |= HSFCTL_GO;
   11457 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11458 
   11459 	/* Wait till FDONE bit is set to 1 */
   11460 	do {
   11461 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11462 		if (hsfsts & HSFSTS_DONE)
   11463 			break;
   11464 		delay(1);
   11465 		i++;
   11466 	} while (i < timeout);
   11467 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11468 		error = 0;
   11469 
   11470 	return error;
   11471 }
   11472 
   11473 /******************************************************************************
   11474  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11475  *
   11476  * sc - The pointer to the hw structure
   11477  * index - The index of the byte or word to read.
   11478  * size - Size of data to read, 1=byte 2=word, 4=dword
   11479  * data - Pointer to the word to store the value read.
   11480  *****************************************************************************/
   11481 static int32_t
   11482 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11483     uint32_t size, uint32_t *data)
   11484 {
   11485 	uint16_t hsfsts;
   11486 	uint16_t hsflctl;
   11487 	uint32_t flash_linear_address;
   11488 	uint32_t flash_data = 0;
   11489 	int32_t error = 1;
   11490 	int32_t count = 0;
   11491 
   11492 	if (size < 1  || size > 4 || data == 0x0 ||
   11493 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11494 		return error;
   11495 
   11496 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11497 	    sc->sc_ich8_flash_base;
   11498 
   11499 	do {
   11500 		delay(1);
   11501 		/* Steps */
   11502 		error = wm_ich8_cycle_init(sc);
   11503 		if (error)
   11504 			break;
   11505 
   11506 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11507 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11508 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11509 		    & HSFCTL_BCOUNT_MASK;
   11510 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11511 		if (sc->sc_type == WM_T_PCH_SPT) {
   11512 			/*
   11513 			 * In SPT, This register is in Lan memory space, not
   11514 			 * flash. Therefore, only 32 bit access is supported.
   11515 			 */
   11516 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11517 			    (uint32_t)hsflctl);
   11518 		} else
   11519 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11520 
   11521 		/*
   11522 		 * Write the last 24 bits of index into Flash Linear address
   11523 		 * field in Flash Address
   11524 		 */
   11525 		/* TODO: TBD maybe check the index against the size of flash */
   11526 
   11527 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11528 
   11529 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11530 
   11531 		/*
   11532 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11533 		 * the whole sequence a few more times, else read in (shift in)
   11534 		 * the Flash Data0, the order is least significant byte first
   11535 		 * msb to lsb
   11536 		 */
   11537 		if (error == 0) {
   11538 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11539 			if (size == 1)
   11540 				*data = (uint8_t)(flash_data & 0x000000FF);
   11541 			else if (size == 2)
   11542 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11543 			else if (size == 4)
   11544 				*data = (uint32_t)flash_data;
   11545 			break;
   11546 		} else {
   11547 			/*
   11548 			 * If we've gotten here, then things are probably
   11549 			 * completely hosed, but if the error condition is
   11550 			 * detected, it won't hurt to give it another try...
   11551 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11552 			 */
   11553 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11554 			if (hsfsts & HSFSTS_ERR) {
   11555 				/* Repeat for some time before giving up. */
   11556 				continue;
   11557 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11558 				break;
   11559 		}
   11560 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11561 
   11562 	return error;
   11563 }
   11564 
   11565 /******************************************************************************
   11566  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11567  *
   11568  * sc - pointer to wm_hw structure
   11569  * index - The index of the byte to read.
   11570  * data - Pointer to a byte to store the value read.
   11571  *****************************************************************************/
   11572 static int32_t
   11573 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11574 {
   11575 	int32_t status;
   11576 	uint32_t word = 0;
   11577 
   11578 	status = wm_read_ich8_data(sc, index, 1, &word);
   11579 	if (status == 0)
   11580 		*data = (uint8_t)word;
   11581 	else
   11582 		*data = 0;
   11583 
   11584 	return status;
   11585 }
   11586 
   11587 /******************************************************************************
   11588  * Reads a word from the NVM using the ICH8 flash access registers.
   11589  *
   11590  * sc - pointer to wm_hw structure
   11591  * index - The starting byte index of the word to read.
   11592  * data - Pointer to a word to store the value read.
   11593  *****************************************************************************/
   11594 static int32_t
   11595 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11596 {
   11597 	int32_t status;
   11598 	uint32_t word = 0;
   11599 
   11600 	status = wm_read_ich8_data(sc, index, 2, &word);
   11601 	if (status == 0)
   11602 		*data = (uint16_t)word;
   11603 	else
   11604 		*data = 0;
   11605 
   11606 	return status;
   11607 }
   11608 
   11609 /******************************************************************************
   11610  * Reads a dword from the NVM using the ICH8 flash access registers.
   11611  *
   11612  * sc - pointer to wm_hw structure
   11613  * index - The starting byte index of the word to read.
   11614  * data - Pointer to a word to store the value read.
   11615  *****************************************************************************/
   11616 static int32_t
   11617 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11618 {
   11619 	int32_t status;
   11620 
   11621 	status = wm_read_ich8_data(sc, index, 4, data);
   11622 	return status;
   11623 }
   11624 
   11625 /******************************************************************************
   11626  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11627  * register.
   11628  *
   11629  * sc - Struct containing variables accessed by shared code
   11630  * offset - offset of word in the EEPROM to read
   11631  * data - word read from the EEPROM
   11632  * words - number of words to read
   11633  *****************************************************************************/
   11634 static int
   11635 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11636 {
   11637 	int32_t  error = 0;
   11638 	uint32_t flash_bank = 0;
   11639 	uint32_t act_offset = 0;
   11640 	uint32_t bank_offset = 0;
   11641 	uint16_t word = 0;
   11642 	uint16_t i = 0;
   11643 
   11644 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11645 		device_xname(sc->sc_dev), __func__));
   11646 
   11647 	/*
   11648 	 * We need to know which is the valid flash bank.  In the event
   11649 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11650 	 * managing flash_bank.  So it cannot be trusted and needs
   11651 	 * to be updated with each read.
   11652 	 */
   11653 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11654 	if (error) {
   11655 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11656 			device_xname(sc->sc_dev)));
   11657 		flash_bank = 0;
   11658 	}
   11659 
   11660 	/*
   11661 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11662 	 * size
   11663 	 */
   11664 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11665 
   11666 	error = wm_get_swfwhw_semaphore(sc);
   11667 	if (error) {
   11668 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11669 		    __func__);
   11670 		return error;
   11671 	}
   11672 
   11673 	for (i = 0; i < words; i++) {
   11674 		/* The NVM part needs a byte offset, hence * 2 */
   11675 		act_offset = bank_offset + ((offset + i) * 2);
   11676 		error = wm_read_ich8_word(sc, act_offset, &word);
   11677 		if (error) {
   11678 			aprint_error_dev(sc->sc_dev,
   11679 			    "%s: failed to read NVM\n", __func__);
   11680 			break;
   11681 		}
   11682 		data[i] = word;
   11683 	}
   11684 
   11685 	wm_put_swfwhw_semaphore(sc);
   11686 	return error;
   11687 }
   11688 
   11689 /******************************************************************************
   11690  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11691  * register.
   11692  *
   11693  * sc - Struct containing variables accessed by shared code
   11694  * offset - offset of word in the EEPROM to read
   11695  * data - word read from the EEPROM
   11696  * words - number of words to read
   11697  *****************************************************************************/
   11698 static int
   11699 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11700 {
   11701 	int32_t  error = 0;
   11702 	uint32_t flash_bank = 0;
   11703 	uint32_t act_offset = 0;
   11704 	uint32_t bank_offset = 0;
   11705 	uint32_t dword = 0;
   11706 	uint16_t i = 0;
   11707 
   11708 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11709 		device_xname(sc->sc_dev), __func__));
   11710 
   11711 	/*
   11712 	 * We need to know which is the valid flash bank.  In the event
   11713 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11714 	 * managing flash_bank.  So it cannot be trusted and needs
   11715 	 * to be updated with each read.
   11716 	 */
   11717 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11718 	if (error) {
   11719 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11720 			device_xname(sc->sc_dev)));
   11721 		flash_bank = 0;
   11722 	}
   11723 
   11724 	/*
   11725 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11726 	 * size
   11727 	 */
   11728 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11729 
   11730 	error = wm_get_swfwhw_semaphore(sc);
   11731 	if (error) {
   11732 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11733 		    __func__);
   11734 		return error;
   11735 	}
   11736 
   11737 	for (i = 0; i < words; i++) {
   11738 		/* The NVM part needs a byte offset, hence * 2 */
   11739 		act_offset = bank_offset + ((offset + i) * 2);
   11740 		/* but we must read dword aligned, so mask ... */
   11741 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11742 		if (error) {
   11743 			aprint_error_dev(sc->sc_dev,
   11744 			    "%s: failed to read NVM\n", __func__);
   11745 			break;
   11746 		}
   11747 		/* ... and pick out low or high word */
   11748 		if ((act_offset & 0x2) == 0)
   11749 			data[i] = (uint16_t)(dword & 0xFFFF);
   11750 		else
   11751 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11752 	}
   11753 
   11754 	wm_put_swfwhw_semaphore(sc);
   11755 	return error;
   11756 }
   11757 
   11758 /* iNVM */
   11759 
   11760 static int
   11761 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11762 {
   11763 	int32_t  rv = 0;
   11764 	uint32_t invm_dword;
   11765 	uint16_t i;
   11766 	uint8_t record_type, word_address;
   11767 
   11768 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11769 		device_xname(sc->sc_dev), __func__));
   11770 
   11771 	for (i = 0; i < INVM_SIZE; i++) {
   11772 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11773 		/* Get record type */
   11774 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11775 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11776 			break;
   11777 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11778 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11779 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11780 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11781 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11782 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11783 			if (word_address == address) {
   11784 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11785 				rv = 0;
   11786 				break;
   11787 			}
   11788 		}
   11789 	}
   11790 
   11791 	return rv;
   11792 }
   11793 
   11794 static int
   11795 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11796 {
   11797 	int rv = 0;
   11798 	int i;
   11799 
   11800 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11801 		device_xname(sc->sc_dev), __func__));
   11802 
   11803 	for (i = 0; i < words; i++) {
   11804 		switch (offset + i) {
   11805 		case NVM_OFF_MACADDR:
   11806 		case NVM_OFF_MACADDR1:
   11807 		case NVM_OFF_MACADDR2:
   11808 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11809 			if (rv != 0) {
   11810 				data[i] = 0xffff;
   11811 				rv = -1;
   11812 			}
   11813 			break;
   11814 		case NVM_OFF_CFG2:
   11815 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11816 			if (rv != 0) {
   11817 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11818 				rv = 0;
   11819 			}
   11820 			break;
   11821 		case NVM_OFF_CFG4:
   11822 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11823 			if (rv != 0) {
   11824 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11825 				rv = 0;
   11826 			}
   11827 			break;
   11828 		case NVM_OFF_LED_1_CFG:
   11829 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11830 			if (rv != 0) {
   11831 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11832 				rv = 0;
   11833 			}
   11834 			break;
   11835 		case NVM_OFF_LED_0_2_CFG:
   11836 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11837 			if (rv != 0) {
   11838 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11839 				rv = 0;
   11840 			}
   11841 			break;
   11842 		case NVM_OFF_ID_LED_SETTINGS:
   11843 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11844 			if (rv != 0) {
   11845 				*data = ID_LED_RESERVED_FFFF;
   11846 				rv = 0;
   11847 			}
   11848 			break;
   11849 		default:
   11850 			DPRINTF(WM_DEBUG_NVM,
   11851 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11852 			*data = NVM_RESERVED_WORD;
   11853 			break;
   11854 		}
   11855 	}
   11856 
   11857 	return rv;
   11858 }
   11859 
   11860 /* Lock, detecting NVM type, validate checksum, version and read */
   11861 
   11862 /*
   11863  * wm_nvm_acquire:
   11864  *
   11865  *	Perform the EEPROM handshake required on some chips.
   11866  */
   11867 static int
   11868 wm_nvm_acquire(struct wm_softc *sc)
   11869 {
   11870 	uint32_t reg;
   11871 	int x;
   11872 	int ret = 0;
   11873 
   11874 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11875 		device_xname(sc->sc_dev), __func__));
   11876 
   11877 	if (sc->sc_type >= WM_T_ICH8) {
   11878 		ret = wm_get_nvm_ich8lan(sc);
   11879 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   11880 		ret = wm_get_swfwhw_semaphore(sc);
   11881 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   11882 		/* This will also do wm_get_swsm_semaphore() if needed */
   11883 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   11884 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11885 		ret = wm_get_swsm_semaphore(sc);
   11886 	}
   11887 
   11888 	if (ret) {
   11889 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11890 			__func__);
   11891 		return 1;
   11892 	}
   11893 
   11894 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11895 		reg = CSR_READ(sc, WMREG_EECD);
   11896 
   11897 		/* Request EEPROM access. */
   11898 		reg |= EECD_EE_REQ;
   11899 		CSR_WRITE(sc, WMREG_EECD, reg);
   11900 
   11901 		/* ..and wait for it to be granted. */
   11902 		for (x = 0; x < 1000; x++) {
   11903 			reg = CSR_READ(sc, WMREG_EECD);
   11904 			if (reg & EECD_EE_GNT)
   11905 				break;
   11906 			delay(5);
   11907 		}
   11908 		if ((reg & EECD_EE_GNT) == 0) {
   11909 			aprint_error_dev(sc->sc_dev,
   11910 			    "could not acquire EEPROM GNT\n");
   11911 			reg &= ~EECD_EE_REQ;
   11912 			CSR_WRITE(sc, WMREG_EECD, reg);
   11913 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11914 				wm_put_swfwhw_semaphore(sc);
   11915 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   11916 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11917 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11918 				wm_put_swsm_semaphore(sc);
   11919 			return 1;
   11920 		}
   11921 	}
   11922 
   11923 	return 0;
   11924 }
   11925 
   11926 /*
   11927  * wm_nvm_release:
   11928  *
   11929  *	Release the EEPROM mutex.
   11930  */
   11931 static void
   11932 wm_nvm_release(struct wm_softc *sc)
   11933 {
   11934 	uint32_t reg;
   11935 
   11936 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11937 		device_xname(sc->sc_dev), __func__));
   11938 
   11939 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11940 		reg = CSR_READ(sc, WMREG_EECD);
   11941 		reg &= ~EECD_EE_REQ;
   11942 		CSR_WRITE(sc, WMREG_EECD, reg);
   11943 	}
   11944 
   11945 	if (sc->sc_type >= WM_T_ICH8) {
   11946 		wm_put_nvm_ich8lan(sc);
   11947 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11948 		wm_put_swfwhw_semaphore(sc);
   11949 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   11950 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11951 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11952 		wm_put_swsm_semaphore(sc);
   11953 }
   11954 
   11955 static int
   11956 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   11957 {
   11958 	uint32_t eecd = 0;
   11959 
   11960 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   11961 	    || sc->sc_type == WM_T_82583) {
   11962 		eecd = CSR_READ(sc, WMREG_EECD);
   11963 
   11964 		/* Isolate bits 15 & 16 */
   11965 		eecd = ((eecd >> 15) & 0x03);
   11966 
   11967 		/* If both bits are set, device is Flash type */
   11968 		if (eecd == 0x03)
   11969 			return 0;
   11970 	}
   11971 	return 1;
   11972 }
   11973 
   11974 static int
   11975 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   11976 {
   11977 	uint32_t eec;
   11978 
   11979 	eec = CSR_READ(sc, WMREG_EEC);
   11980 	if ((eec & EEC_FLASH_DETECTED) != 0)
   11981 		return 1;
   11982 
   11983 	return 0;
   11984 }
   11985 
   11986 /*
   11987  * wm_nvm_validate_checksum
   11988  *
   11989  * The checksum is defined as the sum of the first 64 (16 bit) words.
   11990  */
   11991 static int
   11992 wm_nvm_validate_checksum(struct wm_softc *sc)
   11993 {
   11994 	uint16_t checksum;
   11995 	uint16_t eeprom_data;
   11996 #ifdef WM_DEBUG
   11997 	uint16_t csum_wordaddr, valid_checksum;
   11998 #endif
   11999 	int i;
   12000 
   12001 	checksum = 0;
   12002 
   12003 	/* Don't check for I211 */
   12004 	if (sc->sc_type == WM_T_I211)
   12005 		return 0;
   12006 
   12007 #ifdef WM_DEBUG
   12008 	if (sc->sc_type == WM_T_PCH_LPT) {
   12009 		csum_wordaddr = NVM_OFF_COMPAT;
   12010 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12011 	} else {
   12012 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12013 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12014 	}
   12015 
   12016 	/* Dump EEPROM image for debug */
   12017 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12018 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12019 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12020 		/* XXX PCH_SPT? */
   12021 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12022 		if ((eeprom_data & valid_checksum) == 0) {
   12023 			DPRINTF(WM_DEBUG_NVM,
   12024 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12025 				device_xname(sc->sc_dev), eeprom_data,
   12026 				    valid_checksum));
   12027 		}
   12028 	}
   12029 
   12030 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12031 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12032 		for (i = 0; i < NVM_SIZE; i++) {
   12033 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12034 				printf("XXXX ");
   12035 			else
   12036 				printf("%04hx ", eeprom_data);
   12037 			if (i % 8 == 7)
   12038 				printf("\n");
   12039 		}
   12040 	}
   12041 
   12042 #endif /* WM_DEBUG */
   12043 
   12044 	for (i = 0; i < NVM_SIZE; i++) {
   12045 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12046 			return 1;
   12047 		checksum += eeprom_data;
   12048 	}
   12049 
   12050 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12051 #ifdef WM_DEBUG
   12052 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12053 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12054 #endif
   12055 	}
   12056 
   12057 	return 0;
   12058 }
   12059 
   12060 static void
   12061 wm_nvm_version_invm(struct wm_softc *sc)
   12062 {
   12063 	uint32_t dword;
   12064 
   12065 	/*
   12066 	 * Linux's code to decode version is very strange, so we don't
   12067 	 * obey that algorithm and just use word 61 as the document.
   12068 	 * Perhaps it's not perfect though...
   12069 	 *
   12070 	 * Example:
   12071 	 *
   12072 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12073 	 */
   12074 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12075 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12076 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12077 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12078 }
   12079 
   12080 static void
   12081 wm_nvm_version(struct wm_softc *sc)
   12082 {
   12083 	uint16_t major, minor, build, patch;
   12084 	uint16_t uid0, uid1;
   12085 	uint16_t nvm_data;
   12086 	uint16_t off;
   12087 	bool check_version = false;
   12088 	bool check_optionrom = false;
   12089 	bool have_build = false;
   12090 
   12091 	/*
   12092 	 * Version format:
   12093 	 *
   12094 	 * XYYZ
   12095 	 * X0YZ
   12096 	 * X0YY
   12097 	 *
   12098 	 * Example:
   12099 	 *
   12100 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12101 	 *	82571	0x50a6	5.10.6?
   12102 	 *	82572	0x506a	5.6.10?
   12103 	 *	82572EI	0x5069	5.6.9?
   12104 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12105 	 *		0x2013	2.1.3?
   12106 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12107 	 */
   12108 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12109 	switch (sc->sc_type) {
   12110 	case WM_T_82571:
   12111 	case WM_T_82572:
   12112 	case WM_T_82574:
   12113 	case WM_T_82583:
   12114 		check_version = true;
   12115 		check_optionrom = true;
   12116 		have_build = true;
   12117 		break;
   12118 	case WM_T_82575:
   12119 	case WM_T_82576:
   12120 	case WM_T_82580:
   12121 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12122 			check_version = true;
   12123 		break;
   12124 	case WM_T_I211:
   12125 		wm_nvm_version_invm(sc);
   12126 		goto printver;
   12127 	case WM_T_I210:
   12128 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12129 			wm_nvm_version_invm(sc);
   12130 			goto printver;
   12131 		}
   12132 		/* FALLTHROUGH */
   12133 	case WM_T_I350:
   12134 	case WM_T_I354:
   12135 		check_version = true;
   12136 		check_optionrom = true;
   12137 		break;
   12138 	default:
   12139 		return;
   12140 	}
   12141 	if (check_version) {
   12142 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12143 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12144 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12145 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12146 			build = nvm_data & NVM_BUILD_MASK;
   12147 			have_build = true;
   12148 		} else
   12149 			minor = nvm_data & 0x00ff;
   12150 
   12151 		/* Decimal */
   12152 		minor = (minor / 16) * 10 + (minor % 16);
   12153 		sc->sc_nvm_ver_major = major;
   12154 		sc->sc_nvm_ver_minor = minor;
   12155 
   12156 printver:
   12157 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12158 		    sc->sc_nvm_ver_minor);
   12159 		if (have_build) {
   12160 			sc->sc_nvm_ver_build = build;
   12161 			aprint_verbose(".%d", build);
   12162 		}
   12163 	}
   12164 	if (check_optionrom) {
   12165 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12166 		/* Option ROM Version */
   12167 		if ((off != 0x0000) && (off != 0xffff)) {
   12168 			off += NVM_COMBO_VER_OFF;
   12169 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12170 			wm_nvm_read(sc, off, 1, &uid0);
   12171 			if ((uid0 != 0) && (uid0 != 0xffff)
   12172 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12173 				/* 16bits */
   12174 				major = uid0 >> 8;
   12175 				build = (uid0 << 8) | (uid1 >> 8);
   12176 				patch = uid1 & 0x00ff;
   12177 				aprint_verbose(", option ROM Version %d.%d.%d",
   12178 				    major, build, patch);
   12179 			}
   12180 		}
   12181 	}
   12182 
   12183 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12184 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12185 }
   12186 
   12187 /*
   12188  * wm_nvm_read:
   12189  *
   12190  *	Read data from the serial EEPROM.
   12191  */
   12192 static int
   12193 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12194 {
   12195 	int rv;
   12196 
   12197 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12198 		device_xname(sc->sc_dev), __func__));
   12199 
   12200 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12201 		return 1;
   12202 
   12203 	if (wm_nvm_acquire(sc))
   12204 		return 1;
   12205 
   12206 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12207 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12208 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   12209 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   12210 	else if (sc->sc_type == WM_T_PCH_SPT)
   12211 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   12212 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   12213 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   12214 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   12215 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   12216 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   12217 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   12218 	else
   12219 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   12220 
   12221 	wm_nvm_release(sc);
   12222 	return rv;
   12223 }
   12224 
   12225 /*
   12226  * Hardware semaphores.
   12227  * Very complexed...
   12228  */
   12229 
   12230 static int
   12231 wm_get_null(struct wm_softc *sc)
   12232 {
   12233 
   12234 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12235 		device_xname(sc->sc_dev), __func__));
   12236 	return 0;
   12237 }
   12238 
   12239 static void
   12240 wm_put_null(struct wm_softc *sc)
   12241 {
   12242 
   12243 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12244 		device_xname(sc->sc_dev), __func__));
   12245 	return;
   12246 }
   12247 
   12248 /*
   12249  * Get hardware semaphore.
   12250  * Same as e1000_get_hw_semaphore_generic()
   12251  */
   12252 static int
   12253 wm_get_swsm_semaphore(struct wm_softc *sc)
   12254 {
   12255 	int32_t timeout;
   12256 	uint32_t swsm;
   12257 
   12258 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12259 		device_xname(sc->sc_dev), __func__));
   12260 	KASSERT(sc->sc_nvm_wordsize > 0);
   12261 
   12262 	/* Get the SW semaphore. */
   12263 	timeout = sc->sc_nvm_wordsize + 1;
   12264 	while (timeout) {
   12265 		swsm = CSR_READ(sc, WMREG_SWSM);
   12266 
   12267 		if ((swsm & SWSM_SMBI) == 0)
   12268 			break;
   12269 
   12270 		delay(50);
   12271 		timeout--;
   12272 	}
   12273 
   12274 	if (timeout == 0) {
   12275 		aprint_error_dev(sc->sc_dev,
   12276 		    "could not acquire SWSM SMBI\n");
   12277 		return 1;
   12278 	}
   12279 
   12280 	/* Get the FW semaphore. */
   12281 	timeout = sc->sc_nvm_wordsize + 1;
   12282 	while (timeout) {
   12283 		swsm = CSR_READ(sc, WMREG_SWSM);
   12284 		swsm |= SWSM_SWESMBI;
   12285 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12286 		/* If we managed to set the bit we got the semaphore. */
   12287 		swsm = CSR_READ(sc, WMREG_SWSM);
   12288 		if (swsm & SWSM_SWESMBI)
   12289 			break;
   12290 
   12291 		delay(50);
   12292 		timeout--;
   12293 	}
   12294 
   12295 	if (timeout == 0) {
   12296 		aprint_error_dev(sc->sc_dev,
   12297 		    "could not acquire SWSM SWESMBI\n");
   12298 		/* Release semaphores */
   12299 		wm_put_swsm_semaphore(sc);
   12300 		return 1;
   12301 	}
   12302 	return 0;
   12303 }
   12304 
   12305 /*
   12306  * Put hardware semaphore.
   12307  * Same as e1000_put_hw_semaphore_generic()
   12308  */
   12309 static void
   12310 wm_put_swsm_semaphore(struct wm_softc *sc)
   12311 {
   12312 	uint32_t swsm;
   12313 
   12314 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12315 		device_xname(sc->sc_dev), __func__));
   12316 
   12317 	swsm = CSR_READ(sc, WMREG_SWSM);
   12318 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12319 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12320 }
   12321 
   12322 /*
   12323  * Get SW/FW semaphore.
   12324  * Same as e1000_acquire_swfw_sync_82575().
   12325  */
   12326 static int
   12327 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12328 {
   12329 	uint32_t swfw_sync;
   12330 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12331 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12332 	int timeout = 200;
   12333 
   12334 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12335 		device_xname(sc->sc_dev), __func__));
   12336 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12337 
   12338 	for (timeout = 0; timeout < 200; timeout++) {
   12339 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12340 			if (wm_get_swsm_semaphore(sc)) {
   12341 				aprint_error_dev(sc->sc_dev,
   12342 				    "%s: failed to get semaphore\n",
   12343 				    __func__);
   12344 				return 1;
   12345 			}
   12346 		}
   12347 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12348 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12349 			swfw_sync |= swmask;
   12350 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12351 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   12352 				wm_put_swsm_semaphore(sc);
   12353 			return 0;
   12354 		}
   12355 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   12356 			wm_put_swsm_semaphore(sc);
   12357 		delay(5000);
   12358 	}
   12359 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12360 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12361 	return 1;
   12362 }
   12363 
   12364 static void
   12365 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12366 {
   12367 	uint32_t swfw_sync;
   12368 
   12369 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12370 		device_xname(sc->sc_dev), __func__));
   12371 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12372 
   12373 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12374 		while (wm_get_swsm_semaphore(sc) != 0)
   12375 			continue;
   12376 	}
   12377 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12378 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12379 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12380 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   12381 		wm_put_swsm_semaphore(sc);
   12382 }
   12383 
   12384 static int
   12385 wm_get_phy_82575(struct wm_softc *sc)
   12386 {
   12387 
   12388 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12389 		device_xname(sc->sc_dev), __func__));
   12390 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12391 }
   12392 
   12393 static void
   12394 wm_put_phy_82575(struct wm_softc *sc)
   12395 {
   12396 
   12397 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12398 		device_xname(sc->sc_dev), __func__));
   12399 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12400 }
   12401 
   12402 static int
   12403 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12404 {
   12405 	uint32_t ext_ctrl;
   12406 	int timeout = 200;
   12407 
   12408 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12409 		device_xname(sc->sc_dev), __func__));
   12410 
   12411 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12412 	for (timeout = 0; timeout < 200; timeout++) {
   12413 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12414 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12415 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12416 
   12417 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12418 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12419 			return 0;
   12420 		delay(5000);
   12421 	}
   12422 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12423 	    device_xname(sc->sc_dev), ext_ctrl);
   12424 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12425 	return 1;
   12426 }
   12427 
   12428 static void
   12429 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12430 {
   12431 	uint32_t ext_ctrl;
   12432 
   12433 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12434 		device_xname(sc->sc_dev), __func__));
   12435 
   12436 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12437 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12438 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12439 
   12440 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12441 }
   12442 
   12443 static int
   12444 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12445 {
   12446 	uint32_t ext_ctrl;
   12447 	int timeout;
   12448 
   12449 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12450 		device_xname(sc->sc_dev), __func__));
   12451 	mutex_enter(sc->sc_ich_phymtx);
   12452 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12453 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12454 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12455 			break;
   12456 		delay(1000);
   12457 	}
   12458 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12459 		printf("%s: SW has already locked the resource\n",
   12460 		    device_xname(sc->sc_dev));
   12461 		goto out;
   12462 	}
   12463 
   12464 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12465 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12466 	for (timeout = 0; timeout < 1000; timeout++) {
   12467 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12468 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12469 			break;
   12470 		delay(1000);
   12471 	}
   12472 	if (timeout >= 1000) {
   12473 		printf("%s: failed to acquire semaphore\n",
   12474 		    device_xname(sc->sc_dev));
   12475 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12476 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12477 		goto out;
   12478 	}
   12479 	return 0;
   12480 
   12481 out:
   12482 	mutex_exit(sc->sc_ich_phymtx);
   12483 	return 1;
   12484 }
   12485 
   12486 static void
   12487 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12488 {
   12489 	uint32_t ext_ctrl;
   12490 
   12491 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12492 		device_xname(sc->sc_dev), __func__));
   12493 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12494 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12495 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12496 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12497 	} else {
   12498 		printf("%s: Semaphore unexpectedly released\n",
   12499 		    device_xname(sc->sc_dev));
   12500 	}
   12501 
   12502 	mutex_exit(sc->sc_ich_phymtx);
   12503 }
   12504 
   12505 static int
   12506 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12507 {
   12508 
   12509 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12510 		device_xname(sc->sc_dev), __func__));
   12511 	mutex_enter(sc->sc_ich_nvmmtx);
   12512 
   12513 	return 0;
   12514 }
   12515 
   12516 static void
   12517 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12518 {
   12519 
   12520 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12521 		device_xname(sc->sc_dev), __func__));
   12522 	mutex_exit(sc->sc_ich_nvmmtx);
   12523 }
   12524 
   12525 static int
   12526 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12527 {
   12528 	int i = 0;
   12529 	uint32_t reg;
   12530 
   12531 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12532 		device_xname(sc->sc_dev), __func__));
   12533 
   12534 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12535 	do {
   12536 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12537 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12538 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12539 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12540 			break;
   12541 		delay(2*1000);
   12542 		i++;
   12543 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12544 
   12545 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12546 		wm_put_hw_semaphore_82573(sc);
   12547 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12548 		    device_xname(sc->sc_dev));
   12549 		return -1;
   12550 	}
   12551 
   12552 	return 0;
   12553 }
   12554 
   12555 static void
   12556 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12557 {
   12558 	uint32_t reg;
   12559 
   12560 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12561 		device_xname(sc->sc_dev), __func__));
   12562 
   12563 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12564 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12565 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12566 }
   12567 
   12568 /*
   12569  * Management mode and power management related subroutines.
   12570  * BMC, AMT, suspend/resume and EEE.
   12571  */
   12572 
   12573 #ifdef WM_WOL
   12574 static int
   12575 wm_check_mng_mode(struct wm_softc *sc)
   12576 {
   12577 	int rv;
   12578 
   12579 	switch (sc->sc_type) {
   12580 	case WM_T_ICH8:
   12581 	case WM_T_ICH9:
   12582 	case WM_T_ICH10:
   12583 	case WM_T_PCH:
   12584 	case WM_T_PCH2:
   12585 	case WM_T_PCH_LPT:
   12586 	case WM_T_PCH_SPT:
   12587 		rv = wm_check_mng_mode_ich8lan(sc);
   12588 		break;
   12589 	case WM_T_82574:
   12590 	case WM_T_82583:
   12591 		rv = wm_check_mng_mode_82574(sc);
   12592 		break;
   12593 	case WM_T_82571:
   12594 	case WM_T_82572:
   12595 	case WM_T_82573:
   12596 	case WM_T_80003:
   12597 		rv = wm_check_mng_mode_generic(sc);
   12598 		break;
   12599 	default:
   12600 		/* noting to do */
   12601 		rv = 0;
   12602 		break;
   12603 	}
   12604 
   12605 	return rv;
   12606 }
   12607 
   12608 static int
   12609 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12610 {
   12611 	uint32_t fwsm;
   12612 
   12613 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12614 
   12615 	if (((fwsm & FWSM_FW_VALID) != 0)
   12616 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12617 		return 1;
   12618 
   12619 	return 0;
   12620 }
   12621 
   12622 static int
   12623 wm_check_mng_mode_82574(struct wm_softc *sc)
   12624 {
   12625 	uint16_t data;
   12626 
   12627 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12628 
   12629 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12630 		return 1;
   12631 
   12632 	return 0;
   12633 }
   12634 
   12635 static int
   12636 wm_check_mng_mode_generic(struct wm_softc *sc)
   12637 {
   12638 	uint32_t fwsm;
   12639 
   12640 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12641 
   12642 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12643 		return 1;
   12644 
   12645 	return 0;
   12646 }
   12647 #endif /* WM_WOL */
   12648 
   12649 static int
   12650 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12651 {
   12652 	uint32_t manc, fwsm, factps;
   12653 
   12654 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12655 		return 0;
   12656 
   12657 	manc = CSR_READ(sc, WMREG_MANC);
   12658 
   12659 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   12660 		device_xname(sc->sc_dev), manc));
   12661 	if ((manc & MANC_RECV_TCO_EN) == 0)
   12662 		return 0;
   12663 
   12664 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   12665 		fwsm = CSR_READ(sc, WMREG_FWSM);
   12666 		factps = CSR_READ(sc, WMREG_FACTPS);
   12667 		if (((factps & FACTPS_MNGCG) == 0)
   12668 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12669 			return 1;
   12670 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   12671 		uint16_t data;
   12672 
   12673 		factps = CSR_READ(sc, WMREG_FACTPS);
   12674 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12675 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   12676 			device_xname(sc->sc_dev), factps, data));
   12677 		if (((factps & FACTPS_MNGCG) == 0)
   12678 		    && ((data & NVM_CFG2_MNGM_MASK)
   12679 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   12680 			return 1;
   12681 	} else if (((manc & MANC_SMBUS_EN) != 0)
   12682 	    && ((manc & MANC_ASF_EN) == 0))
   12683 		return 1;
   12684 
   12685 	return 0;
   12686 }
   12687 
   12688 static bool
   12689 wm_phy_resetisblocked(struct wm_softc *sc)
   12690 {
   12691 	bool blocked = false;
   12692 	uint32_t reg;
   12693 	int i = 0;
   12694 
   12695 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12696 		device_xname(sc->sc_dev), __func__));
   12697 
   12698 	switch (sc->sc_type) {
   12699 	case WM_T_ICH8:
   12700 	case WM_T_ICH9:
   12701 	case WM_T_ICH10:
   12702 	case WM_T_PCH:
   12703 	case WM_T_PCH2:
   12704 	case WM_T_PCH_LPT:
   12705 	case WM_T_PCH_SPT:
   12706 		do {
   12707 			reg = CSR_READ(sc, WMREG_FWSM);
   12708 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12709 				blocked = true;
   12710 				delay(10*1000);
   12711 				continue;
   12712 			}
   12713 			blocked = false;
   12714 		} while (blocked && (i++ < 30));
   12715 		return blocked;
   12716 		break;
   12717 	case WM_T_82571:
   12718 	case WM_T_82572:
   12719 	case WM_T_82573:
   12720 	case WM_T_82574:
   12721 	case WM_T_82583:
   12722 	case WM_T_80003:
   12723 		reg = CSR_READ(sc, WMREG_MANC);
   12724 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12725 			return true;
   12726 		else
   12727 			return false;
   12728 		break;
   12729 	default:
   12730 		/* no problem */
   12731 		break;
   12732 	}
   12733 
   12734 	return false;
   12735 }
   12736 
   12737 static void
   12738 wm_get_hw_control(struct wm_softc *sc)
   12739 {
   12740 	uint32_t reg;
   12741 
   12742 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12743 		device_xname(sc->sc_dev), __func__));
   12744 
   12745 	if (sc->sc_type == WM_T_82573) {
   12746 		reg = CSR_READ(sc, WMREG_SWSM);
   12747 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12748 	} else if (sc->sc_type >= WM_T_82571) {
   12749 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12750 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12751 	}
   12752 }
   12753 
   12754 static void
   12755 wm_release_hw_control(struct wm_softc *sc)
   12756 {
   12757 	uint32_t reg;
   12758 
   12759 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12760 		device_xname(sc->sc_dev), __func__));
   12761 
   12762 	if (sc->sc_type == WM_T_82573) {
   12763 		reg = CSR_READ(sc, WMREG_SWSM);
   12764 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12765 	} else if (sc->sc_type >= WM_T_82571) {
   12766 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12767 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12768 	}
   12769 }
   12770 
   12771 static void
   12772 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12773 {
   12774 	uint32_t reg;
   12775 
   12776 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12777 		device_xname(sc->sc_dev), __func__));
   12778 
   12779 	if (sc->sc_type < WM_T_PCH2)
   12780 		return;
   12781 
   12782 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12783 
   12784 	if (gate)
   12785 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12786 	else
   12787 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12788 
   12789 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12790 }
   12791 
   12792 static void
   12793 wm_smbustopci(struct wm_softc *sc)
   12794 {
   12795 	uint32_t fwsm, reg;
   12796 	int rv = 0;
   12797 
   12798 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12799 		device_xname(sc->sc_dev), __func__));
   12800 
   12801 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12802 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12803 
   12804 	/* Disable ULP */
   12805 	wm_ulp_disable(sc);
   12806 
   12807 	/* Acquire PHY semaphore */
   12808 	sc->phy.acquire(sc);
   12809 
   12810 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12811 	switch (sc->sc_type) {
   12812 	case WM_T_PCH_LPT:
   12813 	case WM_T_PCH_SPT:
   12814 		if (wm_phy_is_accessible_pchlan(sc))
   12815 			break;
   12816 
   12817 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12818 		reg |= CTRL_EXT_FORCE_SMBUS;
   12819 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12820 #if 0
   12821 		/* XXX Isn't this required??? */
   12822 		CSR_WRITE_FLUSH(sc);
   12823 #endif
   12824 		delay(50 * 1000);
   12825 		/* FALLTHROUGH */
   12826 	case WM_T_PCH2:
   12827 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12828 			break;
   12829 		/* FALLTHROUGH */
   12830 	case WM_T_PCH:
   12831 		if (sc->sc_type == WM_T_PCH)
   12832 			if ((fwsm & FWSM_FW_VALID) != 0)
   12833 				break;
   12834 
   12835 		if (wm_phy_resetisblocked(sc) == true) {
   12836 			printf("XXX reset is blocked(3)\n");
   12837 			break;
   12838 		}
   12839 
   12840 		wm_toggle_lanphypc_pch_lpt(sc);
   12841 
   12842 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12843 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12844 				break;
   12845 
   12846 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12847 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12848 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12849 
   12850 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12851 				break;
   12852 			rv = -1;
   12853 		}
   12854 		break;
   12855 	default:
   12856 		break;
   12857 	}
   12858 
   12859 	/* Release semaphore */
   12860 	sc->phy.release(sc);
   12861 
   12862 	if (rv == 0) {
   12863 		if (wm_phy_resetisblocked(sc)) {
   12864 			printf("XXX reset is blocked(4)\n");
   12865 			goto out;
   12866 		}
   12867 		wm_reset_phy(sc);
   12868 		if (wm_phy_resetisblocked(sc))
   12869 			printf("XXX reset is blocked(4)\n");
   12870 	}
   12871 
   12872 out:
   12873 	/*
   12874 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   12875 	 */
   12876 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   12877 		delay(10*1000);
   12878 		wm_gate_hw_phy_config_ich8lan(sc, false);
   12879 	}
   12880 }
   12881 
   12882 static void
   12883 wm_init_manageability(struct wm_softc *sc)
   12884 {
   12885 
   12886 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12887 		device_xname(sc->sc_dev), __func__));
   12888 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12889 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   12890 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12891 
   12892 		/* Disable hardware interception of ARP */
   12893 		manc &= ~MANC_ARP_EN;
   12894 
   12895 		/* Enable receiving management packets to the host */
   12896 		if (sc->sc_type >= WM_T_82571) {
   12897 			manc |= MANC_EN_MNG2HOST;
   12898 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   12899 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   12900 		}
   12901 
   12902 		CSR_WRITE(sc, WMREG_MANC, manc);
   12903 	}
   12904 }
   12905 
   12906 static void
   12907 wm_release_manageability(struct wm_softc *sc)
   12908 {
   12909 
   12910 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12911 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12912 
   12913 		manc |= MANC_ARP_EN;
   12914 		if (sc->sc_type >= WM_T_82571)
   12915 			manc &= ~MANC_EN_MNG2HOST;
   12916 
   12917 		CSR_WRITE(sc, WMREG_MANC, manc);
   12918 	}
   12919 }
   12920 
   12921 static void
   12922 wm_get_wakeup(struct wm_softc *sc)
   12923 {
   12924 
   12925 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   12926 	switch (sc->sc_type) {
   12927 	case WM_T_82573:
   12928 	case WM_T_82583:
   12929 		sc->sc_flags |= WM_F_HAS_AMT;
   12930 		/* FALLTHROUGH */
   12931 	case WM_T_80003:
   12932 	case WM_T_82575:
   12933 	case WM_T_82576:
   12934 	case WM_T_82580:
   12935 	case WM_T_I350:
   12936 	case WM_T_I354:
   12937 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   12938 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   12939 		/* FALLTHROUGH */
   12940 	case WM_T_82541:
   12941 	case WM_T_82541_2:
   12942 	case WM_T_82547:
   12943 	case WM_T_82547_2:
   12944 	case WM_T_82571:
   12945 	case WM_T_82572:
   12946 	case WM_T_82574:
   12947 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12948 		break;
   12949 	case WM_T_ICH8:
   12950 	case WM_T_ICH9:
   12951 	case WM_T_ICH10:
   12952 	case WM_T_PCH:
   12953 	case WM_T_PCH2:
   12954 	case WM_T_PCH_LPT:
   12955 	case WM_T_PCH_SPT:
   12956 		sc->sc_flags |= WM_F_HAS_AMT;
   12957 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12958 		break;
   12959 	default:
   12960 		break;
   12961 	}
   12962 
   12963 	/* 1: HAS_MANAGE */
   12964 	if (wm_enable_mng_pass_thru(sc) != 0)
   12965 		sc->sc_flags |= WM_F_HAS_MANAGE;
   12966 
   12967 #ifdef WM_DEBUG
   12968 	printf("\n");
   12969 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   12970 		printf("HAS_AMT,");
   12971 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   12972 		printf("ARC_SUBSYS_VALID,");
   12973 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   12974 		printf("ASF_FIRMWARE_PRES,");
   12975 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   12976 		printf("HAS_MANAGE,");
   12977 	printf("\n");
   12978 #endif
   12979 	/*
   12980 	 * Note that the WOL flags is set after the resetting of the eeprom
   12981 	 * stuff
   12982 	 */
   12983 }
   12984 
   12985 /*
   12986  * Unconfigure Ultra Low Power mode.
   12987  * Only for I217 and newer (see below).
   12988  */
   12989 static void
   12990 wm_ulp_disable(struct wm_softc *sc)
   12991 {
   12992 	uint32_t reg;
   12993 	int i = 0;
   12994 
   12995 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12996 		device_xname(sc->sc_dev), __func__));
   12997 	/* Exclude old devices */
   12998 	if ((sc->sc_type < WM_T_PCH_LPT)
   12999 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13000 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13001 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13002 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13003 		return;
   13004 
   13005 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13006 		/* Request ME un-configure ULP mode in the PHY */
   13007 		reg = CSR_READ(sc, WMREG_H2ME);
   13008 		reg &= ~H2ME_ULP;
   13009 		reg |= H2ME_ENFORCE_SETTINGS;
   13010 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13011 
   13012 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13013 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13014 			if (i++ == 30) {
   13015 				printf("%s timed out\n", __func__);
   13016 				return;
   13017 			}
   13018 			delay(10 * 1000);
   13019 		}
   13020 		reg = CSR_READ(sc, WMREG_H2ME);
   13021 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13022 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13023 
   13024 		return;
   13025 	}
   13026 
   13027 	/* Acquire semaphore */
   13028 	sc->phy.acquire(sc);
   13029 
   13030 	/* Toggle LANPHYPC */
   13031 	wm_toggle_lanphypc_pch_lpt(sc);
   13032 
   13033 	/* Unforce SMBus mode in PHY */
   13034 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13035 	if (reg == 0x0000 || reg == 0xffff) {
   13036 		uint32_t reg2;
   13037 
   13038 		printf("%s: Force SMBus first.\n", __func__);
   13039 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13040 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13041 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13042 		delay(50 * 1000);
   13043 
   13044 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13045 	}
   13046 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13047 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13048 
   13049 	/* Unforce SMBus mode in MAC */
   13050 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13051 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13052 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13053 
   13054 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13055 	reg |= HV_PM_CTRL_K1_ENA;
   13056 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13057 
   13058 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13059 	reg &= ~(I218_ULP_CONFIG1_IND
   13060 	    | I218_ULP_CONFIG1_STICKY_ULP
   13061 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13062 	    | I218_ULP_CONFIG1_WOL_HOST
   13063 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13064 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13065 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13066 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13067 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13068 	reg |= I218_ULP_CONFIG1_START;
   13069 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13070 
   13071 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13072 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13073 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13074 
   13075 	/* Release semaphore */
   13076 	sc->phy.release(sc);
   13077 	wm_gmii_reset(sc);
   13078 	delay(50 * 1000);
   13079 }
   13080 
   13081 /* WOL in the newer chipset interfaces (pchlan) */
   13082 static void
   13083 wm_enable_phy_wakeup(struct wm_softc *sc)
   13084 {
   13085 #if 0
   13086 	uint16_t preg;
   13087 
   13088 	/* Copy MAC RARs to PHY RARs */
   13089 
   13090 	/* Copy MAC MTA to PHY MTA */
   13091 
   13092 	/* Configure PHY Rx Control register */
   13093 
   13094 	/* Enable PHY wakeup in MAC register */
   13095 
   13096 	/* Configure and enable PHY wakeup in PHY registers */
   13097 
   13098 	/* Activate PHY wakeup */
   13099 
   13100 	/* XXX */
   13101 #endif
   13102 }
   13103 
   13104 /* Power down workaround on D3 */
   13105 static void
   13106 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13107 {
   13108 	uint32_t reg;
   13109 	int i;
   13110 
   13111 	for (i = 0; i < 2; i++) {
   13112 		/* Disable link */
   13113 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13114 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13115 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13116 
   13117 		/*
   13118 		 * Call gig speed drop workaround on Gig disable before
   13119 		 * accessing any PHY registers
   13120 		 */
   13121 		if (sc->sc_type == WM_T_ICH8)
   13122 			wm_gig_downshift_workaround_ich8lan(sc);
   13123 
   13124 		/* Write VR power-down enable */
   13125 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13126 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13127 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13128 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13129 
   13130 		/* Read it back and test */
   13131 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13132 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13133 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13134 			break;
   13135 
   13136 		/* Issue PHY reset and repeat at most one more time */
   13137 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13138 	}
   13139 }
   13140 
   13141 static void
   13142 wm_enable_wakeup(struct wm_softc *sc)
   13143 {
   13144 	uint32_t reg, pmreg;
   13145 	pcireg_t pmode;
   13146 
   13147 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13148 		device_xname(sc->sc_dev), __func__));
   13149 
   13150 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13151 		&pmreg, NULL) == 0)
   13152 		return;
   13153 
   13154 	/* Advertise the wakeup capability */
   13155 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13156 	    | CTRL_SWDPIN(3));
   13157 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13158 
   13159 	/* ICH workaround */
   13160 	switch (sc->sc_type) {
   13161 	case WM_T_ICH8:
   13162 	case WM_T_ICH9:
   13163 	case WM_T_ICH10:
   13164 	case WM_T_PCH:
   13165 	case WM_T_PCH2:
   13166 	case WM_T_PCH_LPT:
   13167 	case WM_T_PCH_SPT:
   13168 		/* Disable gig during WOL */
   13169 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13170 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13171 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13172 		if (sc->sc_type == WM_T_PCH)
   13173 			wm_gmii_reset(sc);
   13174 
   13175 		/* Power down workaround */
   13176 		if (sc->sc_phytype == WMPHY_82577) {
   13177 			struct mii_softc *child;
   13178 
   13179 			/* Assume that the PHY is copper */
   13180 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13181 			if (child->mii_mpd_rev <= 2)
   13182 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13183 				    (768 << 5) | 25, 0x0444); /* magic num */
   13184 		}
   13185 		break;
   13186 	default:
   13187 		break;
   13188 	}
   13189 
   13190 	/* Keep the laser running on fiber adapters */
   13191 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13192 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13193 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13194 		reg |= CTRL_EXT_SWDPIN(3);
   13195 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13196 	}
   13197 
   13198 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13199 #if 0	/* for the multicast packet */
   13200 	reg |= WUFC_MC;
   13201 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13202 #endif
   13203 
   13204 	if (sc->sc_type >= WM_T_PCH)
   13205 		wm_enable_phy_wakeup(sc);
   13206 	else {
   13207 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13208 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13209 	}
   13210 
   13211 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13212 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13213 		|| (sc->sc_type == WM_T_PCH2))
   13214 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13215 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13216 
   13217 	/* Request PME */
   13218 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13219 #if 0
   13220 	/* Disable WOL */
   13221 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13222 #else
   13223 	/* For WOL */
   13224 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13225 #endif
   13226 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13227 }
   13228 
   13229 /* LPLU */
   13230 
   13231 static void
   13232 wm_lplu_d0_disable(struct wm_softc *sc)
   13233 {
   13234 	uint32_t reg;
   13235 
   13236 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13237 		device_xname(sc->sc_dev), __func__));
   13238 
   13239 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13240 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13241 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13242 }
   13243 
   13244 static void
   13245 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   13246 {
   13247 	uint32_t reg;
   13248 
   13249 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13250 		device_xname(sc->sc_dev), __func__));
   13251 
   13252 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13253 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13254 	reg |= HV_OEM_BITS_ANEGNOW;
   13255 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13256 }
   13257 
   13258 /* EEE */
   13259 
   13260 static void
   13261 wm_set_eee_i350(struct wm_softc *sc)
   13262 {
   13263 	uint32_t ipcnfg, eeer;
   13264 
   13265 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13266 	eeer = CSR_READ(sc, WMREG_EEER);
   13267 
   13268 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13269 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13270 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13271 		    | EEER_LPI_FC);
   13272 	} else {
   13273 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13274 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13275 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13276 		    | EEER_LPI_FC);
   13277 	}
   13278 
   13279 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13280 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13281 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13282 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13283 }
   13284 
   13285 /*
   13286  * Workarounds (mainly PHY related).
   13287  * Basically, PHY's workarounds are in the PHY drivers.
   13288  */
   13289 
   13290 /* Work-around for 82566 Kumeran PCS lock loss */
   13291 static void
   13292 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13293 {
   13294 #if 0
   13295 	int miistatus, active, i;
   13296 	int reg;
   13297 
   13298 	miistatus = sc->sc_mii.mii_media_status;
   13299 
   13300 	/* If the link is not up, do nothing */
   13301 	if ((miistatus & IFM_ACTIVE) == 0)
   13302 		return;
   13303 
   13304 	active = sc->sc_mii.mii_media_active;
   13305 
   13306 	/* Nothing to do if the link is other than 1Gbps */
   13307 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   13308 		return;
   13309 
   13310 	for (i = 0; i < 10; i++) {
   13311 		/* read twice */
   13312 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13313 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13314 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13315 			goto out;	/* GOOD! */
   13316 
   13317 		/* Reset the PHY */
   13318 		wm_gmii_reset(sc);
   13319 		delay(5*1000);
   13320 	}
   13321 
   13322 	/* Disable GigE link negotiation */
   13323 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13324 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13325 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13326 
   13327 	/*
   13328 	 * Call gig speed drop workaround on Gig disable before accessing
   13329 	 * any PHY registers.
   13330 	 */
   13331 	wm_gig_downshift_workaround_ich8lan(sc);
   13332 
   13333 out:
   13334 	return;
   13335 #endif
   13336 }
   13337 
   13338 /* WOL from S5 stops working */
   13339 static void
   13340 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13341 {
   13342 	uint16_t kmrn_reg;
   13343 
   13344 	/* Only for igp3 */
   13345 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13346 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13347 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13348 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13349 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13350 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13351 	}
   13352 }
   13353 
   13354 /*
   13355  * Workaround for pch's PHYs
   13356  * XXX should be moved to new PHY driver?
   13357  */
   13358 static void
   13359 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13360 {
   13361 
   13362 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13363 		device_xname(sc->sc_dev), __func__));
   13364 	KASSERT(sc->sc_type == WM_T_PCH);
   13365 
   13366 	if (sc->sc_phytype == WMPHY_82577)
   13367 		wm_set_mdio_slow_mode_hv(sc);
   13368 
   13369 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13370 
   13371 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13372 
   13373 	/* 82578 */
   13374 	if (sc->sc_phytype == WMPHY_82578) {
   13375 		struct mii_softc *child;
   13376 
   13377 		/*
   13378 		 * Return registers to default by doing a soft reset then
   13379 		 * writing 0x3140 to the control register
   13380 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13381 		 */
   13382 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13383 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13384 			PHY_RESET(child);
   13385 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13386 			    0x3140);
   13387 		}
   13388 	}
   13389 
   13390 	/* Select page 0 */
   13391 	sc->phy.acquire(sc);
   13392 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13393 	sc->phy.release(sc);
   13394 
   13395 	/*
   13396 	 * Configure the K1 Si workaround during phy reset assuming there is
   13397 	 * link so that it disables K1 if link is in 1Gbps.
   13398 	 */
   13399 	wm_k1_gig_workaround_hv(sc, 1);
   13400 }
   13401 
   13402 static void
   13403 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13404 {
   13405 
   13406 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13407 		device_xname(sc->sc_dev), __func__));
   13408 	KASSERT(sc->sc_type == WM_T_PCH2);
   13409 
   13410 	wm_set_mdio_slow_mode_hv(sc);
   13411 }
   13412 
   13413 static int
   13414 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13415 {
   13416 	int k1_enable = sc->sc_nvm_k1_enabled;
   13417 
   13418 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13419 		device_xname(sc->sc_dev), __func__));
   13420 
   13421 	if (sc->phy.acquire(sc) != 0)
   13422 		return -1;
   13423 
   13424 	if (link) {
   13425 		k1_enable = 0;
   13426 
   13427 		/* Link stall fix for link up */
   13428 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13429 	} else {
   13430 		/* Link stall fix for link down */
   13431 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13432 	}
   13433 
   13434 	wm_configure_k1_ich8lan(sc, k1_enable);
   13435 	sc->phy.release(sc);
   13436 
   13437 	return 0;
   13438 }
   13439 
   13440 static void
   13441 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13442 {
   13443 	uint32_t reg;
   13444 
   13445 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13446 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13447 	    reg | HV_KMRN_MDIO_SLOW);
   13448 }
   13449 
   13450 static void
   13451 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13452 {
   13453 	uint32_t ctrl, ctrl_ext, tmp;
   13454 	uint16_t kmrn_reg;
   13455 
   13456 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13457 
   13458 	if (k1_enable)
   13459 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13460 	else
   13461 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13462 
   13463 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13464 
   13465 	delay(20);
   13466 
   13467 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13468 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13469 
   13470 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13471 	tmp |= CTRL_FRCSPD;
   13472 
   13473 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13474 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13475 	CSR_WRITE_FLUSH(sc);
   13476 	delay(20);
   13477 
   13478 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13479 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13480 	CSR_WRITE_FLUSH(sc);
   13481 	delay(20);
   13482 }
   13483 
   13484 /* special case - for 82575 - need to do manual init ... */
   13485 static void
   13486 wm_reset_init_script_82575(struct wm_softc *sc)
   13487 {
   13488 	/*
   13489 	 * remark: this is untested code - we have no board without EEPROM
   13490 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13491 	 */
   13492 
   13493 	/* SerDes configuration via SERDESCTRL */
   13494 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13495 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13496 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13497 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13498 
   13499 	/* CCM configuration via CCMCTL register */
   13500 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13501 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13502 
   13503 	/* PCIe lanes configuration */
   13504 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13505 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13506 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13507 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13508 
   13509 	/* PCIe PLL Configuration */
   13510 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13511 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13512 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13513 }
   13514 
   13515 static void
   13516 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13517 {
   13518 	uint32_t reg;
   13519 	uint16_t nvmword;
   13520 	int rv;
   13521 
   13522 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13523 		return;
   13524 
   13525 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13526 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13527 	if (rv != 0) {
   13528 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13529 		    __func__);
   13530 		return;
   13531 	}
   13532 
   13533 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13534 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13535 		reg |= MDICNFG_DEST;
   13536 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13537 		reg |= MDICNFG_COM_MDIO;
   13538 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13539 }
   13540 
   13541 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13542 
   13543 static bool
   13544 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13545 {
   13546 	int i;
   13547 	uint32_t reg;
   13548 	uint16_t id1, id2;
   13549 
   13550 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13551 		device_xname(sc->sc_dev), __func__));
   13552 	id1 = id2 = 0xffff;
   13553 	for (i = 0; i < 2; i++) {
   13554 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13555 		if (MII_INVALIDID(id1))
   13556 			continue;
   13557 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13558 		if (MII_INVALIDID(id2))
   13559 			continue;
   13560 		break;
   13561 	}
   13562 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13563 		goto out;
   13564 	}
   13565 
   13566 	if (sc->sc_type < WM_T_PCH_LPT) {
   13567 		sc->phy.release(sc);
   13568 		wm_set_mdio_slow_mode_hv(sc);
   13569 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13570 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13571 		sc->phy.acquire(sc);
   13572 	}
   13573 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13574 		printf("XXX return with false\n");
   13575 		return false;
   13576 	}
   13577 out:
   13578 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13579 		/* Only unforce SMBus if ME is not active */
   13580 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13581 			/* Unforce SMBus mode in PHY */
   13582 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13583 			    CV_SMB_CTRL);
   13584 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13585 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13586 			    CV_SMB_CTRL, reg);
   13587 
   13588 			/* Unforce SMBus mode in MAC */
   13589 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13590 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13591 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13592 		}
   13593 	}
   13594 	return true;
   13595 }
   13596 
   13597 static void
   13598 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13599 {
   13600 	uint32_t reg;
   13601 	int i;
   13602 
   13603 	/* Set PHY Config Counter to 50msec */
   13604 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13605 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13606 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13607 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13608 
   13609 	/* Toggle LANPHYPC */
   13610 	reg = CSR_READ(sc, WMREG_CTRL);
   13611 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13612 	reg &= ~CTRL_LANPHYPC_VALUE;
   13613 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13614 	CSR_WRITE_FLUSH(sc);
   13615 	delay(1000);
   13616 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13617 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13618 	CSR_WRITE_FLUSH(sc);
   13619 
   13620 	if (sc->sc_type < WM_T_PCH_LPT)
   13621 		delay(50 * 1000);
   13622 	else {
   13623 		i = 20;
   13624 
   13625 		do {
   13626 			delay(5 * 1000);
   13627 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13628 		    && i--);
   13629 
   13630 		delay(30 * 1000);
   13631 	}
   13632 }
   13633 
   13634 static int
   13635 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13636 {
   13637 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13638 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13639 	uint32_t rxa;
   13640 	uint16_t scale = 0, lat_enc = 0;
   13641 	int64_t lat_ns, value;
   13642 
   13643 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13644 		device_xname(sc->sc_dev), __func__));
   13645 
   13646 	if (link) {
   13647 		pcireg_t preg;
   13648 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   13649 
   13650 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   13651 
   13652 		/*
   13653 		 * Determine the maximum latency tolerated by the device.
   13654 		 *
   13655 		 * Per the PCIe spec, the tolerated latencies are encoded as
   13656 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   13657 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   13658 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   13659 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   13660 		 */
   13661 		lat_ns = ((int64_t)rxa * 1024 -
   13662 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   13663 		if (lat_ns < 0)
   13664 			lat_ns = 0;
   13665 		else {
   13666 			uint32_t status;
   13667 			uint16_t speed;
   13668 
   13669 			status = CSR_READ(sc, WMREG_STATUS);
   13670 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   13671 			case STATUS_SPEED_10:
   13672 				speed = 10;
   13673 				break;
   13674 			case STATUS_SPEED_100:
   13675 				speed = 100;
   13676 				break;
   13677 			case STATUS_SPEED_1000:
   13678 				speed = 1000;
   13679 				break;
   13680 			default:
   13681 				printf("%s: Unknown speed (status = %08x)\n",
   13682 				    device_xname(sc->sc_dev), status);
   13683 				return -1;
   13684 			}
   13685 			lat_ns /= speed;
   13686 		}
   13687 		value = lat_ns;
   13688 
   13689 		while (value > LTRV_VALUE) {
   13690 			scale ++;
   13691 			value = howmany(value, __BIT(5));
   13692 		}
   13693 		if (scale > LTRV_SCALE_MAX) {
   13694 			printf("%s: Invalid LTR latency scale %d\n",
   13695 			    device_xname(sc->sc_dev), scale);
   13696 			return -1;
   13697 		}
   13698 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   13699 
   13700 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13701 		    WM_PCI_LTR_CAP_LPT);
   13702 		max_snoop = preg & 0xffff;
   13703 		max_nosnoop = preg >> 16;
   13704 
   13705 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   13706 
   13707 		if (lat_enc > max_ltr_enc) {
   13708 			lat_enc = max_ltr_enc;
   13709 		}
   13710 	}
   13711 	/* Snoop and No-Snoop latencies the same */
   13712 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   13713 	CSR_WRITE(sc, WMREG_LTRV, reg);
   13714 
   13715 	return 0;
   13716 }
   13717 
   13718 /*
   13719  * I210 Errata 25 and I211 Errata 10
   13720  * Slow System Clock.
   13721  */
   13722 static void
   13723 wm_pll_workaround_i210(struct wm_softc *sc)
   13724 {
   13725 	uint32_t mdicnfg, wuc;
   13726 	uint32_t reg;
   13727 	pcireg_t pcireg;
   13728 	uint32_t pmreg;
   13729 	uint16_t nvmword, tmp_nvmword;
   13730 	int phyval;
   13731 	bool wa_done = false;
   13732 	int i;
   13733 
   13734 	/* Save WUC and MDICNFG registers */
   13735 	wuc = CSR_READ(sc, WMREG_WUC);
   13736 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13737 
   13738 	reg = mdicnfg & ~MDICNFG_DEST;
   13739 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13740 
   13741 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13742 		nvmword = INVM_DEFAULT_AL;
   13743 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13744 
   13745 	/* Get Power Management cap offset */
   13746 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13747 		&pmreg, NULL) == 0)
   13748 		return;
   13749 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13750 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13751 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13752 
   13753 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13754 			break; /* OK */
   13755 		}
   13756 
   13757 		wa_done = true;
   13758 		/* Directly reset the internal PHY */
   13759 		reg = CSR_READ(sc, WMREG_CTRL);
   13760 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13761 
   13762 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13763 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13764 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13765 
   13766 		CSR_WRITE(sc, WMREG_WUC, 0);
   13767 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13768 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13769 
   13770 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13771 		    pmreg + PCI_PMCSR);
   13772 		pcireg |= PCI_PMCSR_STATE_D3;
   13773 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13774 		    pmreg + PCI_PMCSR, pcireg);
   13775 		delay(1000);
   13776 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13777 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13778 		    pmreg + PCI_PMCSR, pcireg);
   13779 
   13780 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13781 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13782 
   13783 		/* Restore WUC register */
   13784 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13785 	}
   13786 
   13787 	/* Restore MDICNFG setting */
   13788 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13789 	if (wa_done)
   13790 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13791 }
   13792