Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.503
      1 /*	$NetBSD: if_wm.c,v 1.503 2017/03/24 10:02:35 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Split header buffer for newer descriptors
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.503 2017/03/24 10:02:35 knakahara Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #include "opt_if_wm.h"
     92 #endif
     93 
     94 #include <sys/param.h>
     95 #include <sys/systm.h>
     96 #include <sys/callout.h>
     97 #include <sys/mbuf.h>
     98 #include <sys/malloc.h>
     99 #include <sys/kmem.h>
    100 #include <sys/kernel.h>
    101 #include <sys/socket.h>
    102 #include <sys/ioctl.h>
    103 #include <sys/errno.h>
    104 #include <sys/device.h>
    105 #include <sys/queue.h>
    106 #include <sys/syslog.h>
    107 #include <sys/interrupt.h>
    108 #include <sys/cpu.h>
    109 #include <sys/pcq.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/miivar.h>
    132 #include <dev/mii/miidevs.h>
    133 #include <dev/mii/mii_bitbang.h>
    134 #include <dev/mii/ikphyreg.h>
    135 #include <dev/mii/igphyreg.h>
    136 #include <dev/mii/igphyvar.h>
    137 #include <dev/mii/inbmphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 /*
    177  * Transmit descriptor list size.  Due to errata, we can only have
    178  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    179  * on >= 82544.  We tell the upper layers that they can queue a lot
    180  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    181  * of them at a time.
    182  *
    183  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    184  * chains containing many small mbufs have been observed in zero-copy
    185  * situations with jumbo frames.
    186  */
    187 #define	WM_NTXSEGS		256
    188 #define	WM_IFQUEUELEN		256
    189 #define	WM_TXQUEUELEN_MAX	64
    190 #define	WM_TXQUEUELEN_MAX_82547	16
    191 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    192 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    193 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    194 #define	WM_NTXDESC_82542	256
    195 #define	WM_NTXDESC_82544	4096
    196 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    197 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    198 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    199 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    200 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    201 
    202 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    203 
    204 #define	WM_TXINTERQSIZE		256
    205 
    206 /*
    207  * Receive descriptor list size.  We have one Rx buffer for normal
    208  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    209  * packet.  We allocate 256 receive descriptors, each with a 2k
    210  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    211  */
    212 #define	WM_NRXDESC		256
    213 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    214 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    215 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    216 
    217 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    218 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    219 #endif
    220 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    221 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    222 #endif
    223 
    224 typedef union txdescs {
    225 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    226 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    227 } txdescs_t;
    228 
    229 typedef union rxdescs {
    230 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    231 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    232 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    233 } rxdescs_t;
    234 
    235 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    236 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    237 
    238 /*
    239  * Software state for transmit jobs.
    240  */
    241 struct wm_txsoft {
    242 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    243 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    244 	int txs_firstdesc;		/* first descriptor in packet */
    245 	int txs_lastdesc;		/* last descriptor in packet */
    246 	int txs_ndesc;			/* # of descriptors used */
    247 };
    248 
    249 /*
    250  * Software state for receive buffers.  Each descriptor gets a
    251  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    252  * more than one buffer, we chain them together.
    253  */
    254 struct wm_rxsoft {
    255 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    256 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    257 };
    258 
    259 #define WM_LINKUP_TIMEOUT	50
    260 
    261 static uint16_t swfwphysem[] = {
    262 	SWFW_PHY0_SM,
    263 	SWFW_PHY1_SM,
    264 	SWFW_PHY2_SM,
    265 	SWFW_PHY3_SM
    266 };
    267 
    268 static const uint32_t wm_82580_rxpbs_table[] = {
    269 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    270 };
    271 
    272 struct wm_softc;
    273 
    274 #ifdef WM_EVENT_COUNTERS
    275 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    276 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    277 	struct evcnt qname##_ev_##evname;
    278 
    279 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    280 	do{								\
    281 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    282 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    283 		    "%s%02d%s", #qname, (qnum), #evname);		\
    284 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    285 		    (evtype), NULL, (xname),				\
    286 		    (q)->qname##_##evname##_evcnt_name);		\
    287 	}while(0)
    288 
    289 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    290 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    291 
    292 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    293 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    294 
    295 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    296 	evcnt_detach(&(q)->qname##_ev_##evname);
    297 #endif /* WM_EVENT_COUNTERS */
    298 
    299 struct wm_txqueue {
    300 	kmutex_t *txq_lock;		/* lock for tx operations */
    301 
    302 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    303 
    304 	/* Software state for the transmit descriptors. */
    305 	int txq_num;			/* must be a power of two */
    306 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    307 
    308 	/* TX control data structures. */
    309 	int txq_ndesc;			/* must be a power of two */
    310 	size_t txq_descsize;		/* a tx descriptor size */
    311 	txdescs_t *txq_descs_u;
    312         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    313 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    314 	int txq_desc_rseg;		/* real number of control segment */
    315 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    316 #define	txq_descs	txq_descs_u->sctxu_txdescs
    317 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    318 
    319 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    320 
    321 	int txq_free;			/* number of free Tx descriptors */
    322 	int txq_next;			/* next ready Tx descriptor */
    323 
    324 	int txq_sfree;			/* number of free Tx jobs */
    325 	int txq_snext;			/* next free Tx job */
    326 	int txq_sdirty;			/* dirty Tx jobs */
    327 
    328 	/* These 4 variables are used only on the 82547. */
    329 	int txq_fifo_size;		/* Tx FIFO size */
    330 	int txq_fifo_head;		/* current head of FIFO */
    331 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    332 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    333 
    334 	/*
    335 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    336 	 * CPUs. This queue intermediate them without block.
    337 	 */
    338 	pcq_t *txq_interq;
    339 
    340 	/*
    341 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    342 	 * to manage Tx H/W queue's busy flag.
    343 	 */
    344 	int txq_flags;			/* flags for H/W queue, see below */
    345 #define	WM_TXQ_NO_SPACE	0x1
    346 
    347 	bool txq_stopping;
    348 
    349 	uint32_t txq_packets;		/* for AIM */
    350 	uint32_t txq_bytes;		/* for AIM */
    351 #ifdef WM_EVENT_COUNTERS
    352 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    353 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    354 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    355 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    356 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    357 						/* XXX not used? */
    358 
    359 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    360 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    361 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    362 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    363 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    364 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    365 
    366 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    367 
    368 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    369 
    370 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    371 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    372 #endif /* WM_EVENT_COUNTERS */
    373 };
    374 
    375 struct wm_rxqueue {
    376 	kmutex_t *rxq_lock;		/* lock for rx operations */
    377 
    378 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    379 
    380 	/* Software state for the receive descriptors. */
    381 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    382 
    383 	/* RX control data structures. */
    384 	int rxq_ndesc;			/* must be a power of two */
    385 	size_t rxq_descsize;		/* a rx descriptor size */
    386 	rxdescs_t *rxq_descs_u;
    387 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    388 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    389 	int rxq_desc_rseg;		/* real number of control segment */
    390 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    391 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    392 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    393 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    394 
    395 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    396 
    397 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    398 	int rxq_discard;
    399 	int rxq_len;
    400 	struct mbuf *rxq_head;
    401 	struct mbuf *rxq_tail;
    402 	struct mbuf **rxq_tailp;
    403 
    404 	bool rxq_stopping;
    405 
    406 	uint32_t rxq_packets;		/* for AIM */
    407 	uint32_t rxq_bytes;		/* for AIM */
    408 #ifdef WM_EVENT_COUNTERS
    409 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    410 
    411 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    412 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    413 #endif
    414 };
    415 
    416 struct wm_queue {
    417 	int wmq_id;			/* index of transmit and receive queues */
    418 	int wmq_intr_idx;		/* index of MSI-X tables */
    419 
    420 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    421 	bool wmq_set_itr;
    422 
    423 	struct wm_txqueue wmq_txq;
    424 	struct wm_rxqueue wmq_rxq;
    425 
    426 	void *wmq_si;
    427 };
    428 
    429 struct wm_phyop {
    430 	int (*acquire)(struct wm_softc *);
    431 	void (*release)(struct wm_softc *);
    432 	int reset_delay_us;
    433 };
    434 
    435 /*
    436  * Software state per device.
    437  */
    438 struct wm_softc {
    439 	device_t sc_dev;		/* generic device information */
    440 	bus_space_tag_t sc_st;		/* bus space tag */
    441 	bus_space_handle_t sc_sh;	/* bus space handle */
    442 	bus_size_t sc_ss;		/* bus space size */
    443 	bus_space_tag_t sc_iot;		/* I/O space tag */
    444 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    445 	bus_size_t sc_ios;		/* I/O space size */
    446 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    447 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    448 	bus_size_t sc_flashs;		/* flash registers space size */
    449 	off_t sc_flashreg_offset;	/*
    450 					 * offset to flash registers from
    451 					 * start of BAR
    452 					 */
    453 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    454 
    455 	struct ethercom sc_ethercom;	/* ethernet common data */
    456 	struct mii_data sc_mii;		/* MII/media information */
    457 
    458 	pci_chipset_tag_t sc_pc;
    459 	pcitag_t sc_pcitag;
    460 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    461 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    462 
    463 	uint16_t sc_pcidevid;		/* PCI device ID */
    464 	wm_chip_type sc_type;		/* MAC type */
    465 	int sc_rev;			/* MAC revision */
    466 	wm_phy_type sc_phytype;		/* PHY type */
    467 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    468 #define	WM_MEDIATYPE_UNKNOWN		0x00
    469 #define	WM_MEDIATYPE_FIBER		0x01
    470 #define	WM_MEDIATYPE_COPPER		0x02
    471 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    472 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    473 	int sc_flags;			/* flags; see below */
    474 	int sc_if_flags;		/* last if_flags */
    475 	int sc_flowflags;		/* 802.3x flow control flags */
    476 	int sc_align_tweak;
    477 
    478 	void *sc_ihs[WM_MAX_NINTR];	/*
    479 					 * interrupt cookie.
    480 					 * legacy and msi use sc_ihs[0].
    481 					 */
    482 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    483 	int sc_nintrs;			/* number of interrupts */
    484 
    485 	int sc_link_intr_idx;		/* index of MSI-X tables */
    486 
    487 	callout_t sc_tick_ch;		/* tick callout */
    488 	bool sc_core_stopping;
    489 
    490 	int sc_nvm_ver_major;
    491 	int sc_nvm_ver_minor;
    492 	int sc_nvm_ver_build;
    493 	int sc_nvm_addrbits;		/* NVM address bits */
    494 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    495 	int sc_ich8_flash_base;
    496 	int sc_ich8_flash_bank_size;
    497 	int sc_nvm_k1_enabled;
    498 
    499 	int sc_nqueues;
    500 	struct wm_queue *sc_queue;
    501 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    502 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    503 
    504 	int sc_affinity_offset;
    505 
    506 #ifdef WM_EVENT_COUNTERS
    507 	/* Event counters. */
    508 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    509 
    510         /* WM_T_82542_2_1 only */
    511 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    512 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    513 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    514 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    515 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    516 #endif /* WM_EVENT_COUNTERS */
    517 
    518 	/* This variable are used only on the 82547. */
    519 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    520 
    521 	uint32_t sc_ctrl;		/* prototype CTRL register */
    522 #if 0
    523 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    524 #endif
    525 	uint32_t sc_icr;		/* prototype interrupt bits */
    526 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    527 	uint32_t sc_tctl;		/* prototype TCTL register */
    528 	uint32_t sc_rctl;		/* prototype RCTL register */
    529 	uint32_t sc_txcw;		/* prototype TXCW register */
    530 	uint32_t sc_tipg;		/* prototype TIPG register */
    531 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    532 	uint32_t sc_pba;		/* prototype PBA register */
    533 
    534 	int sc_tbi_linkup;		/* TBI link status */
    535 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    536 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    537 
    538 	int sc_mchash_type;		/* multicast filter offset */
    539 
    540 	krndsource_t rnd_source;	/* random source */
    541 
    542 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    543 
    544 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    545 	kmutex_t *sc_ich_phymtx;	/*
    546 					 * 82574/82583/ICH/PCH specific PHY
    547 					 * mutex. For 82574/82583, the mutex
    548 					 * is used for both PHY and NVM.
    549 					 */
    550 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    551 
    552 	struct wm_phyop phy;
    553 };
    554 
    555 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    556 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    557 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    558 
    559 #define	WM_RXCHAIN_RESET(rxq)						\
    560 do {									\
    561 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    562 	*(rxq)->rxq_tailp = NULL;					\
    563 	(rxq)->rxq_len = 0;						\
    564 } while (/*CONSTCOND*/0)
    565 
    566 #define	WM_RXCHAIN_LINK(rxq, m)						\
    567 do {									\
    568 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    569 	(rxq)->rxq_tailp = &(m)->m_next;				\
    570 } while (/*CONSTCOND*/0)
    571 
    572 #ifdef WM_EVENT_COUNTERS
    573 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    574 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    575 
    576 #define WM_Q_EVCNT_INCR(qname, evname)			\
    577 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    578 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    579 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    580 #else /* !WM_EVENT_COUNTERS */
    581 #define	WM_EVCNT_INCR(ev)	/* nothing */
    582 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    583 
    584 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    585 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    586 #endif /* !WM_EVENT_COUNTERS */
    587 
    588 #define	CSR_READ(sc, reg)						\
    589 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    590 #define	CSR_WRITE(sc, reg, val)						\
    591 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    592 #define	CSR_WRITE_FLUSH(sc)						\
    593 	(void) CSR_READ((sc), WMREG_STATUS)
    594 
    595 #define ICH8_FLASH_READ32(sc, reg)					\
    596 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    597 	    (reg) + sc->sc_flashreg_offset)
    598 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    599 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    600 	    (reg) + sc->sc_flashreg_offset, (data))
    601 
    602 #define ICH8_FLASH_READ16(sc, reg)					\
    603 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    604 	    (reg) + sc->sc_flashreg_offset)
    605 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    606 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    607 	    (reg) + sc->sc_flashreg_offset, (data))
    608 
    609 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    610 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    611 
    612 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    613 #define	WM_CDTXADDR_HI(txq, x)						\
    614 	(sizeof(bus_addr_t) == 8 ?					\
    615 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    616 
    617 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    618 #define	WM_CDRXADDR_HI(rxq, x)						\
    619 	(sizeof(bus_addr_t) == 8 ?					\
    620 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    621 
    622 /*
    623  * Register read/write functions.
    624  * Other than CSR_{READ|WRITE}().
    625  */
    626 #if 0
    627 static inline uint32_t wm_io_read(struct wm_softc *, int);
    628 #endif
    629 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    630 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    631 	uint32_t, uint32_t);
    632 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    633 
    634 /*
    635  * Descriptor sync/init functions.
    636  */
    637 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    638 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    639 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    640 
    641 /*
    642  * Device driver interface functions and commonly used functions.
    643  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    644  */
    645 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    646 static int	wm_match(device_t, cfdata_t, void *);
    647 static void	wm_attach(device_t, device_t, void *);
    648 static int	wm_detach(device_t, int);
    649 static bool	wm_suspend(device_t, const pmf_qual_t *);
    650 static bool	wm_resume(device_t, const pmf_qual_t *);
    651 static void	wm_watchdog(struct ifnet *);
    652 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    653 static void	wm_tick(void *);
    654 static int	wm_ifflags_cb(struct ethercom *);
    655 static int	wm_ioctl(struct ifnet *, u_long, void *);
    656 /* MAC address related */
    657 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    658 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    659 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    660 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    661 static void	wm_set_filter(struct wm_softc *);
    662 /* Reset and init related */
    663 static void	wm_set_vlan(struct wm_softc *);
    664 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    665 static void	wm_get_auto_rd_done(struct wm_softc *);
    666 static void	wm_lan_init_done(struct wm_softc *);
    667 static void	wm_get_cfg_done(struct wm_softc *);
    668 static void	wm_initialize_hardware_bits(struct wm_softc *);
    669 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    670 static void	wm_reset_phy(struct wm_softc *);
    671 static void	wm_flush_desc_rings(struct wm_softc *);
    672 static void	wm_reset(struct wm_softc *);
    673 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    674 static void	wm_rxdrain(struct wm_rxqueue *);
    675 static void	wm_rss_getkey(uint8_t *);
    676 static void	wm_init_rss(struct wm_softc *);
    677 static void	wm_adjust_qnum(struct wm_softc *, int);
    678 static inline bool	wm_is_using_msix(struct wm_softc *);
    679 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    680 static int	wm_softint_establish(struct wm_softc *, int, int);
    681 static int	wm_setup_legacy(struct wm_softc *);
    682 static int	wm_setup_msix(struct wm_softc *);
    683 static int	wm_init(struct ifnet *);
    684 static int	wm_init_locked(struct ifnet *);
    685 static void	wm_turnon(struct wm_softc *);
    686 static void	wm_turnoff(struct wm_softc *);
    687 static void	wm_stop(struct ifnet *, int);
    688 static void	wm_stop_locked(struct ifnet *, int);
    689 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    690 static void	wm_82547_txfifo_stall(void *);
    691 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    692 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    693 /* DMA related */
    694 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    695 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    696 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    697 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    698     struct wm_txqueue *);
    699 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    700 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    701 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    702     struct wm_rxqueue *);
    703 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    704 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    705 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    706 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    707 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    708 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    709 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    710     struct wm_txqueue *);
    711 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    712     struct wm_rxqueue *);
    713 static int	wm_alloc_txrx_queues(struct wm_softc *);
    714 static void	wm_free_txrx_queues(struct wm_softc *);
    715 static int	wm_init_txrx_queues(struct wm_softc *);
    716 /* Start */
    717 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    718     struct wm_txsoft *, uint32_t *, uint8_t *);
    719 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    720 static void	wm_start(struct ifnet *);
    721 static void	wm_start_locked(struct ifnet *);
    722 static int	wm_transmit(struct ifnet *, struct mbuf *);
    723 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    724 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    725 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    726     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    727 static void	wm_nq_start(struct ifnet *);
    728 static void	wm_nq_start_locked(struct ifnet *);
    729 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    730 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    731 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    732 static void	wm_deferred_start_locked(struct wm_txqueue *);
    733 static void	wm_handle_queue(void *);
    734 /* Interrupt */
    735 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    736 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    737 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    738 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    739 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    740 static void	wm_linkintr(struct wm_softc *, uint32_t);
    741 static int	wm_intr_legacy(void *);
    742 static inline void	wm_txrxintr_disable(struct wm_queue *);
    743 static inline void	wm_txrxintr_enable(struct wm_queue *);
    744 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    745 static int	wm_txrxintr_msix(void *);
    746 static int	wm_linkintr_msix(void *);
    747 
    748 /*
    749  * Media related.
    750  * GMII, SGMII, TBI, SERDES and SFP.
    751  */
    752 /* Common */
    753 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    754 /* GMII related */
    755 static void	wm_gmii_reset(struct wm_softc *);
    756 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    757 static int	wm_get_phy_id_82575(struct wm_softc *);
    758 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    759 static int	wm_gmii_mediachange(struct ifnet *);
    760 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    761 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    762 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    763 static int	wm_gmii_i82543_readreg(device_t, int, int);
    764 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    765 static int	wm_gmii_mdic_readreg(device_t, int, int);
    766 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    767 static int	wm_gmii_i82544_readreg(device_t, int, int);
    768 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    769 static int	wm_gmii_i80003_readreg(device_t, int, int);
    770 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    771 static int	wm_gmii_bm_readreg(device_t, int, int);
    772 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    773 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    774 static int	wm_gmii_hv_readreg(device_t, int, int);
    775 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    776 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    777 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    778 static int	wm_gmii_82580_readreg(device_t, int, int);
    779 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    780 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    781 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    782 static void	wm_gmii_statchg(struct ifnet *);
    783 /*
    784  * kumeran related (80003, ICH* and PCH*).
    785  * These functions are not for accessing MII registers but for accessing
    786  * kumeran specific registers.
    787  */
    788 static int	wm_kmrn_readreg(struct wm_softc *, int);
    789 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    790 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    791 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    792 /* SGMII */
    793 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    794 static int	wm_sgmii_readreg(device_t, int, int);
    795 static void	wm_sgmii_writereg(device_t, int, int, int);
    796 /* TBI related */
    797 static void	wm_tbi_mediainit(struct wm_softc *);
    798 static int	wm_tbi_mediachange(struct ifnet *);
    799 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    800 static int	wm_check_for_link(struct wm_softc *);
    801 static void	wm_tbi_tick(struct wm_softc *);
    802 /* SERDES related */
    803 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    804 static int	wm_serdes_mediachange(struct ifnet *);
    805 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    806 static void	wm_serdes_tick(struct wm_softc *);
    807 /* SFP related */
    808 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    809 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    810 
    811 /*
    812  * NVM related.
    813  * Microwire, SPI (w/wo EERD) and Flash.
    814  */
    815 /* Misc functions */
    816 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    817 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    818 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    819 /* Microwire */
    820 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    821 /* SPI */
    822 static int	wm_nvm_ready_spi(struct wm_softc *);
    823 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    824 /* Using with EERD */
    825 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    826 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    827 /* Flash */
    828 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    829     unsigned int *);
    830 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    831 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    832 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    833 	uint32_t *);
    834 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    835 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    836 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    837 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    838 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    839 /* iNVM */
    840 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    841 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    842 /* Lock, detecting NVM type, validate checksum and read */
    843 static int	wm_nvm_acquire(struct wm_softc *);
    844 static void	wm_nvm_release(struct wm_softc *);
    845 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    846 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    847 static int	wm_nvm_validate_checksum(struct wm_softc *);
    848 static void	wm_nvm_version_invm(struct wm_softc *);
    849 static void	wm_nvm_version(struct wm_softc *);
    850 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    851 
    852 /*
    853  * Hardware semaphores.
    854  * Very complexed...
    855  */
    856 static int	wm_get_null(struct wm_softc *);
    857 static void	wm_put_null(struct wm_softc *);
    858 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    859 static void	wm_put_swsm_semaphore(struct wm_softc *);
    860 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    861 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    862 static int	wm_get_phy_82575(struct wm_softc *);
    863 static void	wm_put_phy_82575(struct wm_softc *);
    864 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    865 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    866 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    867 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    868 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    869 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    870 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    871 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    872 
    873 /*
    874  * Management mode and power management related subroutines.
    875  * BMC, AMT, suspend/resume and EEE.
    876  */
    877 #if 0
    878 static int	wm_check_mng_mode(struct wm_softc *);
    879 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    880 static int	wm_check_mng_mode_82574(struct wm_softc *);
    881 static int	wm_check_mng_mode_generic(struct wm_softc *);
    882 #endif
    883 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    884 static bool	wm_phy_resetisblocked(struct wm_softc *);
    885 static void	wm_get_hw_control(struct wm_softc *);
    886 static void	wm_release_hw_control(struct wm_softc *);
    887 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    888 static void	wm_smbustopci(struct wm_softc *);
    889 static void	wm_init_manageability(struct wm_softc *);
    890 static void	wm_release_manageability(struct wm_softc *);
    891 static void	wm_get_wakeup(struct wm_softc *);
    892 static void	wm_ulp_disable(struct wm_softc *);
    893 static void	wm_enable_phy_wakeup(struct wm_softc *);
    894 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    895 static void	wm_enable_wakeup(struct wm_softc *);
    896 /* LPLU (Low Power Link Up) */
    897 static void	wm_lplu_d0_disable(struct wm_softc *);
    898 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    899 /* EEE */
    900 static void	wm_set_eee_i350(struct wm_softc *);
    901 
    902 /*
    903  * Workarounds (mainly PHY related).
    904  * Basically, PHY's workarounds are in the PHY drivers.
    905  */
    906 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    907 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    908 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    909 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    910 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    911 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    912 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    913 static void	wm_reset_init_script_82575(struct wm_softc *);
    914 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    915 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    916 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    917 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    918 static void	wm_pll_workaround_i210(struct wm_softc *);
    919 
    920 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    921     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    922 
    923 /*
    924  * Devices supported by this driver.
    925  */
    926 static const struct wm_product {
    927 	pci_vendor_id_t		wmp_vendor;
    928 	pci_product_id_t	wmp_product;
    929 	const char		*wmp_name;
    930 	wm_chip_type		wmp_type;
    931 	uint32_t		wmp_flags;
    932 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    933 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    934 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    935 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    936 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    937 } wm_products[] = {
    938 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    939 	  "Intel i82542 1000BASE-X Ethernet",
    940 	  WM_T_82542_2_1,	WMP_F_FIBER },
    941 
    942 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    943 	  "Intel i82543GC 1000BASE-X Ethernet",
    944 	  WM_T_82543,		WMP_F_FIBER },
    945 
    946 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    947 	  "Intel i82543GC 1000BASE-T Ethernet",
    948 	  WM_T_82543,		WMP_F_COPPER },
    949 
    950 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    951 	  "Intel i82544EI 1000BASE-T Ethernet",
    952 	  WM_T_82544,		WMP_F_COPPER },
    953 
    954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    955 	  "Intel i82544EI 1000BASE-X Ethernet",
    956 	  WM_T_82544,		WMP_F_FIBER },
    957 
    958 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    959 	  "Intel i82544GC 1000BASE-T Ethernet",
    960 	  WM_T_82544,		WMP_F_COPPER },
    961 
    962 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    963 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    964 	  WM_T_82544,		WMP_F_COPPER },
    965 
    966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    967 	  "Intel i82540EM 1000BASE-T Ethernet",
    968 	  WM_T_82540,		WMP_F_COPPER },
    969 
    970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    971 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    972 	  WM_T_82540,		WMP_F_COPPER },
    973 
    974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    975 	  "Intel i82540EP 1000BASE-T Ethernet",
    976 	  WM_T_82540,		WMP_F_COPPER },
    977 
    978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    979 	  "Intel i82540EP 1000BASE-T Ethernet",
    980 	  WM_T_82540,		WMP_F_COPPER },
    981 
    982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    983 	  "Intel i82540EP 1000BASE-T Ethernet",
    984 	  WM_T_82540,		WMP_F_COPPER },
    985 
    986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    987 	  "Intel i82545EM 1000BASE-T Ethernet",
    988 	  WM_T_82545,		WMP_F_COPPER },
    989 
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    991 	  "Intel i82545GM 1000BASE-T Ethernet",
    992 	  WM_T_82545_3,		WMP_F_COPPER },
    993 
    994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    995 	  "Intel i82545GM 1000BASE-X Ethernet",
    996 	  WM_T_82545_3,		WMP_F_FIBER },
    997 
    998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    999 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1000 	  WM_T_82545_3,		WMP_F_SERDES },
   1001 
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1003 	  "Intel i82546EB 1000BASE-T Ethernet",
   1004 	  WM_T_82546,		WMP_F_COPPER },
   1005 
   1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1007 	  "Intel i82546EB 1000BASE-T Ethernet",
   1008 	  WM_T_82546,		WMP_F_COPPER },
   1009 
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1011 	  "Intel i82545EM 1000BASE-X Ethernet",
   1012 	  WM_T_82545,		WMP_F_FIBER },
   1013 
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1015 	  "Intel i82546EB 1000BASE-X Ethernet",
   1016 	  WM_T_82546,		WMP_F_FIBER },
   1017 
   1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1019 	  "Intel i82546GB 1000BASE-T Ethernet",
   1020 	  WM_T_82546_3,		WMP_F_COPPER },
   1021 
   1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1023 	  "Intel i82546GB 1000BASE-X Ethernet",
   1024 	  WM_T_82546_3,		WMP_F_FIBER },
   1025 
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1027 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1028 	  WM_T_82546_3,		WMP_F_SERDES },
   1029 
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1031 	  "i82546GB quad-port Gigabit Ethernet",
   1032 	  WM_T_82546_3,		WMP_F_COPPER },
   1033 
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1035 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1036 	  WM_T_82546_3,		WMP_F_COPPER },
   1037 
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1039 	  "Intel PRO/1000MT (82546GB)",
   1040 	  WM_T_82546_3,		WMP_F_COPPER },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1043 	  "Intel i82541EI 1000BASE-T Ethernet",
   1044 	  WM_T_82541,		WMP_F_COPPER },
   1045 
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1047 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1048 	  WM_T_82541,		WMP_F_COPPER },
   1049 
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1051 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1052 	  WM_T_82541,		WMP_F_COPPER },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1055 	  "Intel i82541ER 1000BASE-T Ethernet",
   1056 	  WM_T_82541_2,		WMP_F_COPPER },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1059 	  "Intel i82541GI 1000BASE-T Ethernet",
   1060 	  WM_T_82541_2,		WMP_F_COPPER },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1063 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1064 	  WM_T_82541_2,		WMP_F_COPPER },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1067 	  "Intel i82541PI 1000BASE-T Ethernet",
   1068 	  WM_T_82541_2,		WMP_F_COPPER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1071 	  "Intel i82547EI 1000BASE-T Ethernet",
   1072 	  WM_T_82547,		WMP_F_COPPER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1075 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1076 	  WM_T_82547,		WMP_F_COPPER },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1079 	  "Intel i82547GI 1000BASE-T Ethernet",
   1080 	  WM_T_82547_2,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1083 	  "Intel PRO/1000 PT (82571EB)",
   1084 	  WM_T_82571,		WMP_F_COPPER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1087 	  "Intel PRO/1000 PF (82571EB)",
   1088 	  WM_T_82571,		WMP_F_FIBER },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1091 	  "Intel PRO/1000 PB (82571EB)",
   1092 	  WM_T_82571,		WMP_F_SERDES },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1095 	  "Intel PRO/1000 QT (82571EB)",
   1096 	  WM_T_82571,		WMP_F_COPPER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1099 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1100 	  WM_T_82571,		WMP_F_COPPER, },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1103 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1104 	  WM_T_82571,		WMP_F_COPPER, },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1107 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1108 	  WM_T_82571,		WMP_F_SERDES, },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1111 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1112 	  WM_T_82571,		WMP_F_SERDES, },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1115 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1116 	  WM_T_82571,		WMP_F_FIBER, },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1119 	  "Intel i82572EI 1000baseT Ethernet",
   1120 	  WM_T_82572,		WMP_F_COPPER },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1123 	  "Intel i82572EI 1000baseX Ethernet",
   1124 	  WM_T_82572,		WMP_F_FIBER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1127 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1128 	  WM_T_82572,		WMP_F_SERDES },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1131 	  "Intel i82572EI 1000baseT Ethernet",
   1132 	  WM_T_82572,		WMP_F_COPPER },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1135 	  "Intel i82573E",
   1136 	  WM_T_82573,		WMP_F_COPPER },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1139 	  "Intel i82573E IAMT",
   1140 	  WM_T_82573,		WMP_F_COPPER },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1143 	  "Intel i82573L Gigabit Ethernet",
   1144 	  WM_T_82573,		WMP_F_COPPER },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1147 	  "Intel i82574L",
   1148 	  WM_T_82574,		WMP_F_COPPER },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1151 	  "Intel i82574L",
   1152 	  WM_T_82574,		WMP_F_COPPER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1155 	  "Intel i82583V",
   1156 	  WM_T_82583,		WMP_F_COPPER },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1159 	  "i80003 dual 1000baseT Ethernet",
   1160 	  WM_T_80003,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1163 	  "i80003 dual 1000baseX Ethernet",
   1164 	  WM_T_80003,		WMP_F_COPPER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1167 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1168 	  WM_T_80003,		WMP_F_SERDES },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1171 	  "Intel i80003 1000baseT Ethernet",
   1172 	  WM_T_80003,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1175 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1176 	  WM_T_80003,		WMP_F_SERDES },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1179 	  "Intel i82801H (M_AMT) LAN Controller",
   1180 	  WM_T_ICH8,		WMP_F_COPPER },
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1182 	  "Intel i82801H (AMT) LAN Controller",
   1183 	  WM_T_ICH8,		WMP_F_COPPER },
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1185 	  "Intel i82801H LAN Controller",
   1186 	  WM_T_ICH8,		WMP_F_COPPER },
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1188 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1189 	  WM_T_ICH8,		WMP_F_COPPER },
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1191 	  "Intel i82801H (M) LAN Controller",
   1192 	  WM_T_ICH8,		WMP_F_COPPER },
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1194 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1195 	  WM_T_ICH8,		WMP_F_COPPER },
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1197 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1198 	  WM_T_ICH8,		WMP_F_COPPER },
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1200 	  "82567V-3 LAN Controller",
   1201 	  WM_T_ICH8,		WMP_F_COPPER },
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1203 	  "82801I (AMT) LAN Controller",
   1204 	  WM_T_ICH9,		WMP_F_COPPER },
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1206 	  "82801I 10/100 LAN Controller",
   1207 	  WM_T_ICH9,		WMP_F_COPPER },
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1209 	  "82801I (G) 10/100 LAN Controller",
   1210 	  WM_T_ICH9,		WMP_F_COPPER },
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1212 	  "82801I (GT) 10/100 LAN Controller",
   1213 	  WM_T_ICH9,		WMP_F_COPPER },
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1215 	  "82801I (C) LAN Controller",
   1216 	  WM_T_ICH9,		WMP_F_COPPER },
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1218 	  "82801I mobile LAN Controller",
   1219 	  WM_T_ICH9,		WMP_F_COPPER },
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1221 	  "82801I mobile (V) LAN Controller",
   1222 	  WM_T_ICH9,		WMP_F_COPPER },
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1224 	  "82801I mobile (AMT) LAN Controller",
   1225 	  WM_T_ICH9,		WMP_F_COPPER },
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1227 	  "82567LM-4 LAN Controller",
   1228 	  WM_T_ICH9,		WMP_F_COPPER },
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1230 	  "82567LM-2 LAN Controller",
   1231 	  WM_T_ICH10,		WMP_F_COPPER },
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1233 	  "82567LF-2 LAN Controller",
   1234 	  WM_T_ICH10,		WMP_F_COPPER },
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1236 	  "82567LM-3 LAN Controller",
   1237 	  WM_T_ICH10,		WMP_F_COPPER },
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1239 	  "82567LF-3 LAN Controller",
   1240 	  WM_T_ICH10,		WMP_F_COPPER },
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1242 	  "82567V-2 LAN Controller",
   1243 	  WM_T_ICH10,		WMP_F_COPPER },
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1245 	  "82567V-3? LAN Controller",
   1246 	  WM_T_ICH10,		WMP_F_COPPER },
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1248 	  "HANKSVILLE LAN Controller",
   1249 	  WM_T_ICH10,		WMP_F_COPPER },
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1251 	  "PCH LAN (82577LM) Controller",
   1252 	  WM_T_PCH,		WMP_F_COPPER },
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1254 	  "PCH LAN (82577LC) Controller",
   1255 	  WM_T_PCH,		WMP_F_COPPER },
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1257 	  "PCH LAN (82578DM) Controller",
   1258 	  WM_T_PCH,		WMP_F_COPPER },
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1260 	  "PCH LAN (82578DC) Controller",
   1261 	  WM_T_PCH,		WMP_F_COPPER },
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1263 	  "PCH2 LAN (82579LM) Controller",
   1264 	  WM_T_PCH2,		WMP_F_COPPER },
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1266 	  "PCH2 LAN (82579V) Controller",
   1267 	  WM_T_PCH2,		WMP_F_COPPER },
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1269 	  "82575EB dual-1000baseT Ethernet",
   1270 	  WM_T_82575,		WMP_F_COPPER },
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1272 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1273 	  WM_T_82575,		WMP_F_SERDES },
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1275 	  "82575GB quad-1000baseT Ethernet",
   1276 	  WM_T_82575,		WMP_F_COPPER },
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1278 	  "82575GB quad-1000baseT Ethernet (PM)",
   1279 	  WM_T_82575,		WMP_F_COPPER },
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1281 	  "82576 1000BaseT Ethernet",
   1282 	  WM_T_82576,		WMP_F_COPPER },
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1284 	  "82576 1000BaseX Ethernet",
   1285 	  WM_T_82576,		WMP_F_FIBER },
   1286 
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1288 	  "82576 gigabit Ethernet (SERDES)",
   1289 	  WM_T_82576,		WMP_F_SERDES },
   1290 
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1292 	  "82576 quad-1000BaseT Ethernet",
   1293 	  WM_T_82576,		WMP_F_COPPER },
   1294 
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1296 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1297 	  WM_T_82576,		WMP_F_COPPER },
   1298 
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1300 	  "82576 gigabit Ethernet",
   1301 	  WM_T_82576,		WMP_F_COPPER },
   1302 
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1304 	  "82576 gigabit Ethernet (SERDES)",
   1305 	  WM_T_82576,		WMP_F_SERDES },
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1307 	  "82576 quad-gigabit Ethernet (SERDES)",
   1308 	  WM_T_82576,		WMP_F_SERDES },
   1309 
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1311 	  "82580 1000BaseT Ethernet",
   1312 	  WM_T_82580,		WMP_F_COPPER },
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1314 	  "82580 1000BaseX Ethernet",
   1315 	  WM_T_82580,		WMP_F_FIBER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1318 	  "82580 1000BaseT Ethernet (SERDES)",
   1319 	  WM_T_82580,		WMP_F_SERDES },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1322 	  "82580 gigabit Ethernet (SGMII)",
   1323 	  WM_T_82580,		WMP_F_COPPER },
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1325 	  "82580 dual-1000BaseT Ethernet",
   1326 	  WM_T_82580,		WMP_F_COPPER },
   1327 
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1329 	  "82580 quad-1000BaseX Ethernet",
   1330 	  WM_T_82580,		WMP_F_FIBER },
   1331 
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1333 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1334 	  WM_T_82580,		WMP_F_COPPER },
   1335 
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1337 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1338 	  WM_T_82580,		WMP_F_SERDES },
   1339 
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1341 	  "DH89XXCC 1000BASE-KX Ethernet",
   1342 	  WM_T_82580,		WMP_F_SERDES },
   1343 
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1345 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1346 	  WM_T_82580,		WMP_F_SERDES },
   1347 
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1349 	  "I350 Gigabit Network Connection",
   1350 	  WM_T_I350,		WMP_F_COPPER },
   1351 
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1353 	  "I350 Gigabit Fiber Network Connection",
   1354 	  WM_T_I350,		WMP_F_FIBER },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1357 	  "I350 Gigabit Backplane Connection",
   1358 	  WM_T_I350,		WMP_F_SERDES },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1361 	  "I350 Quad Port Gigabit Ethernet",
   1362 	  WM_T_I350,		WMP_F_SERDES },
   1363 
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1365 	  "I350 Gigabit Connection",
   1366 	  WM_T_I350,		WMP_F_COPPER },
   1367 
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1369 	  "I354 Gigabit Ethernet (KX)",
   1370 	  WM_T_I354,		WMP_F_SERDES },
   1371 
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1373 	  "I354 Gigabit Ethernet (SGMII)",
   1374 	  WM_T_I354,		WMP_F_COPPER },
   1375 
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1377 	  "I354 Gigabit Ethernet (2.5G)",
   1378 	  WM_T_I354,		WMP_F_COPPER },
   1379 
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1381 	  "I210-T1 Ethernet Server Adapter",
   1382 	  WM_T_I210,		WMP_F_COPPER },
   1383 
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1385 	  "I210 Ethernet (Copper OEM)",
   1386 	  WM_T_I210,		WMP_F_COPPER },
   1387 
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1389 	  "I210 Ethernet (Copper IT)",
   1390 	  WM_T_I210,		WMP_F_COPPER },
   1391 
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1393 	  "I210 Ethernet (FLASH less)",
   1394 	  WM_T_I210,		WMP_F_COPPER },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1397 	  "I210 Gigabit Ethernet (Fiber)",
   1398 	  WM_T_I210,		WMP_F_FIBER },
   1399 
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1401 	  "I210 Gigabit Ethernet (SERDES)",
   1402 	  WM_T_I210,		WMP_F_SERDES },
   1403 
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1405 	  "I210 Gigabit Ethernet (FLASH less)",
   1406 	  WM_T_I210,		WMP_F_SERDES },
   1407 
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1409 	  "I210 Gigabit Ethernet (SGMII)",
   1410 	  WM_T_I210,		WMP_F_COPPER },
   1411 
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1413 	  "I211 Ethernet (COPPER)",
   1414 	  WM_T_I211,		WMP_F_COPPER },
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1416 	  "I217 V Ethernet Connection",
   1417 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1419 	  "I217 LM Ethernet Connection",
   1420 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1422 	  "I218 V Ethernet Connection",
   1423 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1425 	  "I218 V Ethernet Connection",
   1426 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1427 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1428 	  "I218 V Ethernet Connection",
   1429 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1431 	  "I218 LM Ethernet Connection",
   1432 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1434 	  "I218 LM Ethernet Connection",
   1435 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1437 	  "I218 LM Ethernet Connection",
   1438 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1439 #if 0
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1441 	  "I219 V Ethernet Connection",
   1442 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1444 	  "I219 V Ethernet Connection",
   1445 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1447 	  "I219 V Ethernet Connection",
   1448 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1450 	  "I219 V Ethernet Connection",
   1451 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1453 	  "I219 LM Ethernet Connection",
   1454 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1456 	  "I219 LM Ethernet Connection",
   1457 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1459 	  "I219 LM Ethernet Connection",
   1460 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1462 	  "I219 LM Ethernet Connection",
   1463 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1465 	  "I219 LM Ethernet Connection",
   1466 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1467 #endif
   1468 	{ 0,			0,
   1469 	  NULL,
   1470 	  0,			0 },
   1471 };
   1472 
   1473 /*
   1474  * Register read/write functions.
   1475  * Other than CSR_{READ|WRITE}().
   1476  */
   1477 
   1478 #if 0 /* Not currently used */
   1479 static inline uint32_t
   1480 wm_io_read(struct wm_softc *sc, int reg)
   1481 {
   1482 
   1483 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1484 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1485 }
   1486 #endif
   1487 
   1488 static inline void
   1489 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1490 {
   1491 
   1492 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1493 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1494 }
   1495 
   1496 static inline void
   1497 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1498     uint32_t data)
   1499 {
   1500 	uint32_t regval;
   1501 	int i;
   1502 
   1503 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1504 
   1505 	CSR_WRITE(sc, reg, regval);
   1506 
   1507 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1508 		delay(5);
   1509 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1510 			break;
   1511 	}
   1512 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1513 		aprint_error("%s: WARNING:"
   1514 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1515 		    device_xname(sc->sc_dev), reg);
   1516 	}
   1517 }
   1518 
   1519 static inline void
   1520 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1521 {
   1522 	wa->wa_low = htole32(v & 0xffffffffU);
   1523 	if (sizeof(bus_addr_t) == 8)
   1524 		wa->wa_high = htole32((uint64_t) v >> 32);
   1525 	else
   1526 		wa->wa_high = 0;
   1527 }
   1528 
   1529 /*
   1530  * Descriptor sync/init functions.
   1531  */
   1532 static inline void
   1533 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1534 {
   1535 	struct wm_softc *sc = txq->txq_sc;
   1536 
   1537 	/* If it will wrap around, sync to the end of the ring. */
   1538 	if ((start + num) > WM_NTXDESC(txq)) {
   1539 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1540 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1541 		    (WM_NTXDESC(txq) - start), ops);
   1542 		num -= (WM_NTXDESC(txq) - start);
   1543 		start = 0;
   1544 	}
   1545 
   1546 	/* Now sync whatever is left. */
   1547 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1548 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1549 }
   1550 
   1551 static inline void
   1552 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1553 {
   1554 	struct wm_softc *sc = rxq->rxq_sc;
   1555 
   1556 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1557 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1558 }
   1559 
   1560 static inline void
   1561 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1562 {
   1563 	struct wm_softc *sc = rxq->rxq_sc;
   1564 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1565 	struct mbuf *m = rxs->rxs_mbuf;
   1566 
   1567 	/*
   1568 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1569 	 * so that the payload after the Ethernet header is aligned
   1570 	 * to a 4-byte boundary.
   1571 
   1572 	 * XXX BRAINDAMAGE ALERT!
   1573 	 * The stupid chip uses the same size for every buffer, which
   1574 	 * is set in the Receive Control register.  We are using the 2K
   1575 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1576 	 * reason, we can't "scoot" packets longer than the standard
   1577 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1578 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1579 	 * the upper layer copy the headers.
   1580 	 */
   1581 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1582 
   1583 	if (sc->sc_type == WM_T_82574) {
   1584 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1585 		rxd->erx_data.erxd_addr =
   1586 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1587 		rxd->erx_data.erxd_dd = 0;
   1588 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1589 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1590 
   1591 		rxd->nqrx_data.nrxd_paddr =
   1592 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1593 		/* Currently, split header is not supported. */
   1594 		rxd->nqrx_data.nrxd_haddr = 0;
   1595 	} else {
   1596 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1597 
   1598 		wm_set_dma_addr(&rxd->wrx_addr,
   1599 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1600 		rxd->wrx_len = 0;
   1601 		rxd->wrx_cksum = 0;
   1602 		rxd->wrx_status = 0;
   1603 		rxd->wrx_errors = 0;
   1604 		rxd->wrx_special = 0;
   1605 	}
   1606 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1607 
   1608 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1609 }
   1610 
   1611 /*
   1612  * Device driver interface functions and commonly used functions.
   1613  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1614  */
   1615 
   1616 /* Lookup supported device table */
   1617 static const struct wm_product *
   1618 wm_lookup(const struct pci_attach_args *pa)
   1619 {
   1620 	const struct wm_product *wmp;
   1621 
   1622 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1623 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1624 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1625 			return wmp;
   1626 	}
   1627 	return NULL;
   1628 }
   1629 
   1630 /* The match function (ca_match) */
   1631 static int
   1632 wm_match(device_t parent, cfdata_t cf, void *aux)
   1633 {
   1634 	struct pci_attach_args *pa = aux;
   1635 
   1636 	if (wm_lookup(pa) != NULL)
   1637 		return 1;
   1638 
   1639 	return 0;
   1640 }
   1641 
   1642 /* The attach function (ca_attach) */
   1643 static void
   1644 wm_attach(device_t parent, device_t self, void *aux)
   1645 {
   1646 	struct wm_softc *sc = device_private(self);
   1647 	struct pci_attach_args *pa = aux;
   1648 	prop_dictionary_t dict;
   1649 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1650 	pci_chipset_tag_t pc = pa->pa_pc;
   1651 	int counts[PCI_INTR_TYPE_SIZE];
   1652 	pci_intr_type_t max_type;
   1653 	const char *eetype, *xname;
   1654 	bus_space_tag_t memt;
   1655 	bus_space_handle_t memh;
   1656 	bus_size_t memsize;
   1657 	int memh_valid;
   1658 	int i, error;
   1659 	const struct wm_product *wmp;
   1660 	prop_data_t ea;
   1661 	prop_number_t pn;
   1662 	uint8_t enaddr[ETHER_ADDR_LEN];
   1663 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1664 	pcireg_t preg, memtype;
   1665 	uint16_t eeprom_data, apme_mask;
   1666 	bool force_clear_smbi;
   1667 	uint32_t link_mode;
   1668 	uint32_t reg;
   1669 
   1670 	sc->sc_dev = self;
   1671 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1672 	sc->sc_core_stopping = false;
   1673 
   1674 	wmp = wm_lookup(pa);
   1675 #ifdef DIAGNOSTIC
   1676 	if (wmp == NULL) {
   1677 		printf("\n");
   1678 		panic("wm_attach: impossible");
   1679 	}
   1680 #endif
   1681 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1682 
   1683 	sc->sc_pc = pa->pa_pc;
   1684 	sc->sc_pcitag = pa->pa_tag;
   1685 
   1686 	if (pci_dma64_available(pa))
   1687 		sc->sc_dmat = pa->pa_dmat64;
   1688 	else
   1689 		sc->sc_dmat = pa->pa_dmat;
   1690 
   1691 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1692 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1693 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1694 
   1695 	sc->sc_type = wmp->wmp_type;
   1696 
   1697 	/* Set default function pointers */
   1698 	sc->phy.acquire = wm_get_null;
   1699 	sc->phy.release = wm_put_null;
   1700 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1701 
   1702 	if (sc->sc_type < WM_T_82543) {
   1703 		if (sc->sc_rev < 2) {
   1704 			aprint_error_dev(sc->sc_dev,
   1705 			    "i82542 must be at least rev. 2\n");
   1706 			return;
   1707 		}
   1708 		if (sc->sc_rev < 3)
   1709 			sc->sc_type = WM_T_82542_2_0;
   1710 	}
   1711 
   1712 	/*
   1713 	 * Disable MSI for Errata:
   1714 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1715 	 *
   1716 	 *  82544: Errata 25
   1717 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1718 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1719 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1720 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1721 	 *
   1722 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1723 	 *
   1724 	 *  82571 & 82572: Errata 63
   1725 	 */
   1726 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1727 	    || (sc->sc_type == WM_T_82572))
   1728 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1729 
   1730 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1731 	    || (sc->sc_type == WM_T_82580)
   1732 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1733 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1734 		sc->sc_flags |= WM_F_NEWQUEUE;
   1735 
   1736 	/* Set device properties (mactype) */
   1737 	dict = device_properties(sc->sc_dev);
   1738 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1739 
   1740 	/*
   1741 	 * Map the device.  All devices support memory-mapped acccess,
   1742 	 * and it is really required for normal operation.
   1743 	 */
   1744 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1745 	switch (memtype) {
   1746 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1747 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1748 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1749 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1750 		break;
   1751 	default:
   1752 		memh_valid = 0;
   1753 		break;
   1754 	}
   1755 
   1756 	if (memh_valid) {
   1757 		sc->sc_st = memt;
   1758 		sc->sc_sh = memh;
   1759 		sc->sc_ss = memsize;
   1760 	} else {
   1761 		aprint_error_dev(sc->sc_dev,
   1762 		    "unable to map device registers\n");
   1763 		return;
   1764 	}
   1765 
   1766 	/*
   1767 	 * In addition, i82544 and later support I/O mapped indirect
   1768 	 * register access.  It is not desirable (nor supported in
   1769 	 * this driver) to use it for normal operation, though it is
   1770 	 * required to work around bugs in some chip versions.
   1771 	 */
   1772 	if (sc->sc_type >= WM_T_82544) {
   1773 		/* First we have to find the I/O BAR. */
   1774 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1775 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1776 			if (memtype == PCI_MAPREG_TYPE_IO)
   1777 				break;
   1778 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1779 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1780 				i += 4;	/* skip high bits, too */
   1781 		}
   1782 		if (i < PCI_MAPREG_END) {
   1783 			/*
   1784 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1785 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1786 			 * It's no problem because newer chips has no this
   1787 			 * bug.
   1788 			 *
   1789 			 * The i8254x doesn't apparently respond when the
   1790 			 * I/O BAR is 0, which looks somewhat like it's not
   1791 			 * been configured.
   1792 			 */
   1793 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1794 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1795 				aprint_error_dev(sc->sc_dev,
   1796 				    "WARNING: I/O BAR at zero.\n");
   1797 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1798 					0, &sc->sc_iot, &sc->sc_ioh,
   1799 					NULL, &sc->sc_ios) == 0) {
   1800 				sc->sc_flags |= WM_F_IOH_VALID;
   1801 			} else {
   1802 				aprint_error_dev(sc->sc_dev,
   1803 				    "WARNING: unable to map I/O space\n");
   1804 			}
   1805 		}
   1806 
   1807 	}
   1808 
   1809 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1810 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1811 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1812 	if (sc->sc_type < WM_T_82542_2_1)
   1813 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1814 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1815 
   1816 	/* power up chip */
   1817 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1818 	    NULL)) && error != EOPNOTSUPP) {
   1819 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1820 		return;
   1821 	}
   1822 
   1823 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1824 
   1825 	/* Allocation settings */
   1826 	max_type = PCI_INTR_TYPE_MSIX;
   1827 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1828 	counts[PCI_INTR_TYPE_MSI] = 1;
   1829 	counts[PCI_INTR_TYPE_INTX] = 1;
   1830 
   1831 alloc_retry:
   1832 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1833 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1834 		return;
   1835 	}
   1836 
   1837 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1838 		error = wm_setup_msix(sc);
   1839 		if (error) {
   1840 			pci_intr_release(pc, sc->sc_intrs,
   1841 			    counts[PCI_INTR_TYPE_MSIX]);
   1842 
   1843 			/* Setup for MSI: Disable MSI-X */
   1844 			max_type = PCI_INTR_TYPE_MSI;
   1845 			counts[PCI_INTR_TYPE_MSI] = 1;
   1846 			counts[PCI_INTR_TYPE_INTX] = 1;
   1847 			goto alloc_retry;
   1848 		}
   1849 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1850 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1851 		error = wm_setup_legacy(sc);
   1852 		if (error) {
   1853 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1854 			    counts[PCI_INTR_TYPE_MSI]);
   1855 
   1856 			/* The next try is for INTx: Disable MSI */
   1857 			max_type = PCI_INTR_TYPE_INTX;
   1858 			counts[PCI_INTR_TYPE_INTX] = 1;
   1859 			goto alloc_retry;
   1860 		}
   1861 	} else {
   1862 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1863 		error = wm_setup_legacy(sc);
   1864 		if (error) {
   1865 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1866 			    counts[PCI_INTR_TYPE_INTX]);
   1867 			return;
   1868 		}
   1869 	}
   1870 
   1871 	/*
   1872 	 * Check the function ID (unit number of the chip).
   1873 	 */
   1874 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1875 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1876 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1877 	    || (sc->sc_type == WM_T_82580)
   1878 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1879 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1880 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1881 	else
   1882 		sc->sc_funcid = 0;
   1883 
   1884 	/*
   1885 	 * Determine a few things about the bus we're connected to.
   1886 	 */
   1887 	if (sc->sc_type < WM_T_82543) {
   1888 		/* We don't really know the bus characteristics here. */
   1889 		sc->sc_bus_speed = 33;
   1890 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1891 		/*
   1892 		 * CSA (Communication Streaming Architecture) is about as fast
   1893 		 * a 32-bit 66MHz PCI Bus.
   1894 		 */
   1895 		sc->sc_flags |= WM_F_CSA;
   1896 		sc->sc_bus_speed = 66;
   1897 		aprint_verbose_dev(sc->sc_dev,
   1898 		    "Communication Streaming Architecture\n");
   1899 		if (sc->sc_type == WM_T_82547) {
   1900 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1901 			callout_setfunc(&sc->sc_txfifo_ch,
   1902 					wm_82547_txfifo_stall, sc);
   1903 			aprint_verbose_dev(sc->sc_dev,
   1904 			    "using 82547 Tx FIFO stall work-around\n");
   1905 		}
   1906 	} else if (sc->sc_type >= WM_T_82571) {
   1907 		sc->sc_flags |= WM_F_PCIE;
   1908 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1909 		    && (sc->sc_type != WM_T_ICH10)
   1910 		    && (sc->sc_type != WM_T_PCH)
   1911 		    && (sc->sc_type != WM_T_PCH2)
   1912 		    && (sc->sc_type != WM_T_PCH_LPT)
   1913 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1914 			/* ICH* and PCH* have no PCIe capability registers */
   1915 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1916 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1917 				NULL) == 0)
   1918 				aprint_error_dev(sc->sc_dev,
   1919 				    "unable to find PCIe capability\n");
   1920 		}
   1921 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1922 	} else {
   1923 		reg = CSR_READ(sc, WMREG_STATUS);
   1924 		if (reg & STATUS_BUS64)
   1925 			sc->sc_flags |= WM_F_BUS64;
   1926 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1927 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1928 
   1929 			sc->sc_flags |= WM_F_PCIX;
   1930 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1931 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1932 				aprint_error_dev(sc->sc_dev,
   1933 				    "unable to find PCIX capability\n");
   1934 			else if (sc->sc_type != WM_T_82545_3 &&
   1935 				 sc->sc_type != WM_T_82546_3) {
   1936 				/*
   1937 				 * Work around a problem caused by the BIOS
   1938 				 * setting the max memory read byte count
   1939 				 * incorrectly.
   1940 				 */
   1941 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1942 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1943 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1944 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1945 
   1946 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1947 				    PCIX_CMD_BYTECNT_SHIFT;
   1948 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1949 				    PCIX_STATUS_MAXB_SHIFT;
   1950 				if (bytecnt > maxb) {
   1951 					aprint_verbose_dev(sc->sc_dev,
   1952 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1953 					    512 << bytecnt, 512 << maxb);
   1954 					pcix_cmd = (pcix_cmd &
   1955 					    ~PCIX_CMD_BYTECNT_MASK) |
   1956 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1957 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1958 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1959 					    pcix_cmd);
   1960 				}
   1961 			}
   1962 		}
   1963 		/*
   1964 		 * The quad port adapter is special; it has a PCIX-PCIX
   1965 		 * bridge on the board, and can run the secondary bus at
   1966 		 * a higher speed.
   1967 		 */
   1968 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1969 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1970 								      : 66;
   1971 		} else if (sc->sc_flags & WM_F_PCIX) {
   1972 			switch (reg & STATUS_PCIXSPD_MASK) {
   1973 			case STATUS_PCIXSPD_50_66:
   1974 				sc->sc_bus_speed = 66;
   1975 				break;
   1976 			case STATUS_PCIXSPD_66_100:
   1977 				sc->sc_bus_speed = 100;
   1978 				break;
   1979 			case STATUS_PCIXSPD_100_133:
   1980 				sc->sc_bus_speed = 133;
   1981 				break;
   1982 			default:
   1983 				aprint_error_dev(sc->sc_dev,
   1984 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1985 				    reg & STATUS_PCIXSPD_MASK);
   1986 				sc->sc_bus_speed = 66;
   1987 				break;
   1988 			}
   1989 		} else
   1990 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1991 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1992 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1993 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1994 	}
   1995 
   1996 	/* clear interesting stat counters */
   1997 	CSR_READ(sc, WMREG_COLC);
   1998 	CSR_READ(sc, WMREG_RXERRC);
   1999 
   2000 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2001 	    || (sc->sc_type >= WM_T_ICH8))
   2002 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2003 	if (sc->sc_type >= WM_T_ICH8)
   2004 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2005 
   2006 	/* Set PHY, NVM mutex related stuff */
   2007 	switch (sc->sc_type) {
   2008 	case WM_T_82542_2_0:
   2009 	case WM_T_82542_2_1:
   2010 	case WM_T_82543:
   2011 	case WM_T_82544:
   2012 		/* Microwire */
   2013 		sc->sc_nvm_wordsize = 64;
   2014 		sc->sc_nvm_addrbits = 6;
   2015 		break;
   2016 	case WM_T_82540:
   2017 	case WM_T_82545:
   2018 	case WM_T_82545_3:
   2019 	case WM_T_82546:
   2020 	case WM_T_82546_3:
   2021 		/* Microwire */
   2022 		reg = CSR_READ(sc, WMREG_EECD);
   2023 		if (reg & EECD_EE_SIZE) {
   2024 			sc->sc_nvm_wordsize = 256;
   2025 			sc->sc_nvm_addrbits = 8;
   2026 		} else {
   2027 			sc->sc_nvm_wordsize = 64;
   2028 			sc->sc_nvm_addrbits = 6;
   2029 		}
   2030 		sc->sc_flags |= WM_F_LOCK_EECD;
   2031 		break;
   2032 	case WM_T_82541:
   2033 	case WM_T_82541_2:
   2034 	case WM_T_82547:
   2035 	case WM_T_82547_2:
   2036 		sc->sc_flags |= WM_F_LOCK_EECD;
   2037 		reg = CSR_READ(sc, WMREG_EECD);
   2038 		if (reg & EECD_EE_TYPE) {
   2039 			/* SPI */
   2040 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2041 			wm_nvm_set_addrbits_size_eecd(sc);
   2042 		} else {
   2043 			/* Microwire */
   2044 			if ((reg & EECD_EE_ABITS) != 0) {
   2045 				sc->sc_nvm_wordsize = 256;
   2046 				sc->sc_nvm_addrbits = 8;
   2047 			} else {
   2048 				sc->sc_nvm_wordsize = 64;
   2049 				sc->sc_nvm_addrbits = 6;
   2050 			}
   2051 		}
   2052 		break;
   2053 	case WM_T_82571:
   2054 	case WM_T_82572:
   2055 		/* SPI */
   2056 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2057 		wm_nvm_set_addrbits_size_eecd(sc);
   2058 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2059 		sc->phy.acquire = wm_get_swsm_semaphore;
   2060 		sc->phy.release = wm_put_swsm_semaphore;
   2061 		break;
   2062 	case WM_T_82573:
   2063 	case WM_T_82574:
   2064 	case WM_T_82583:
   2065 		if (sc->sc_type == WM_T_82573) {
   2066 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2067 			sc->phy.acquire = wm_get_swsm_semaphore;
   2068 			sc->phy.release = wm_put_swsm_semaphore;
   2069 		} else {
   2070 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2071 			/* Both PHY and NVM use the same semaphore. */
   2072 			sc->phy.acquire
   2073 			    = wm_get_swfwhw_semaphore;
   2074 			sc->phy.release
   2075 			    = wm_put_swfwhw_semaphore;
   2076 		}
   2077 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2078 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2079 			sc->sc_nvm_wordsize = 2048;
   2080 		} else {
   2081 			/* SPI */
   2082 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2083 			wm_nvm_set_addrbits_size_eecd(sc);
   2084 		}
   2085 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2086 		break;
   2087 	case WM_T_82575:
   2088 	case WM_T_82576:
   2089 	case WM_T_82580:
   2090 	case WM_T_I350:
   2091 	case WM_T_I354:
   2092 	case WM_T_80003:
   2093 		/* SPI */
   2094 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2095 		wm_nvm_set_addrbits_size_eecd(sc);
   2096 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2097 		    | WM_F_LOCK_SWSM;
   2098 		sc->phy.acquire = wm_get_phy_82575;
   2099 		sc->phy.release = wm_put_phy_82575;
   2100 		break;
   2101 	case WM_T_ICH8:
   2102 	case WM_T_ICH9:
   2103 	case WM_T_ICH10:
   2104 	case WM_T_PCH:
   2105 	case WM_T_PCH2:
   2106 	case WM_T_PCH_LPT:
   2107 		/* FLASH */
   2108 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2109 		sc->sc_nvm_wordsize = 2048;
   2110 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2111 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2112 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2113 			aprint_error_dev(sc->sc_dev,
   2114 			    "can't map FLASH registers\n");
   2115 			goto out;
   2116 		}
   2117 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2118 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2119 		    ICH_FLASH_SECTOR_SIZE;
   2120 		sc->sc_ich8_flash_bank_size =
   2121 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2122 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2123 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2124 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2125 		sc->sc_flashreg_offset = 0;
   2126 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2127 		sc->phy.release = wm_put_swflag_ich8lan;
   2128 		break;
   2129 	case WM_T_PCH_SPT:
   2130 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2131 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2132 		sc->sc_flasht = sc->sc_st;
   2133 		sc->sc_flashh = sc->sc_sh;
   2134 		sc->sc_ich8_flash_base = 0;
   2135 		sc->sc_nvm_wordsize =
   2136 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2137 			* NVM_SIZE_MULTIPLIER;
   2138 		/* It is size in bytes, we want words */
   2139 		sc->sc_nvm_wordsize /= 2;
   2140 		/* assume 2 banks */
   2141 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2142 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2143 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2144 		sc->phy.release = wm_put_swflag_ich8lan;
   2145 		break;
   2146 	case WM_T_I210:
   2147 	case WM_T_I211:
   2148 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2149 			wm_nvm_set_addrbits_size_eecd(sc);
   2150 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2151 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2152 		} else {
   2153 			sc->sc_nvm_wordsize = INVM_SIZE;
   2154 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2155 		}
   2156 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2157 		sc->phy.acquire = wm_get_phy_82575;
   2158 		sc->phy.release = wm_put_phy_82575;
   2159 		break;
   2160 	default:
   2161 		break;
   2162 	}
   2163 
   2164 	/* Reset the chip to a known state. */
   2165 	wm_reset(sc);
   2166 
   2167 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2168 	switch (sc->sc_type) {
   2169 	case WM_T_82571:
   2170 	case WM_T_82572:
   2171 		reg = CSR_READ(sc, WMREG_SWSM2);
   2172 		if ((reg & SWSM2_LOCK) == 0) {
   2173 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2174 			force_clear_smbi = true;
   2175 		} else
   2176 			force_clear_smbi = false;
   2177 		break;
   2178 	case WM_T_82573:
   2179 	case WM_T_82574:
   2180 	case WM_T_82583:
   2181 		force_clear_smbi = true;
   2182 		break;
   2183 	default:
   2184 		force_clear_smbi = false;
   2185 		break;
   2186 	}
   2187 	if (force_clear_smbi) {
   2188 		reg = CSR_READ(sc, WMREG_SWSM);
   2189 		if ((reg & SWSM_SMBI) != 0)
   2190 			aprint_error_dev(sc->sc_dev,
   2191 			    "Please update the Bootagent\n");
   2192 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2193 	}
   2194 
   2195 	/*
   2196 	 * Defer printing the EEPROM type until after verifying the checksum
   2197 	 * This allows the EEPROM type to be printed correctly in the case
   2198 	 * that no EEPROM is attached.
   2199 	 */
   2200 	/*
   2201 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2202 	 * this for later, so we can fail future reads from the EEPROM.
   2203 	 */
   2204 	if (wm_nvm_validate_checksum(sc)) {
   2205 		/*
   2206 		 * Read twice again because some PCI-e parts fail the
   2207 		 * first check due to the link being in sleep state.
   2208 		 */
   2209 		if (wm_nvm_validate_checksum(sc))
   2210 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2211 	}
   2212 
   2213 	/* Set device properties (macflags) */
   2214 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2215 
   2216 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2217 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2218 	else {
   2219 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2220 		    sc->sc_nvm_wordsize);
   2221 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2222 			aprint_verbose("iNVM");
   2223 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2224 			aprint_verbose("FLASH(HW)");
   2225 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2226 			aprint_verbose("FLASH");
   2227 		else {
   2228 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2229 				eetype = "SPI";
   2230 			else
   2231 				eetype = "MicroWire";
   2232 			aprint_verbose("(%d address bits) %s EEPROM",
   2233 			    sc->sc_nvm_addrbits, eetype);
   2234 		}
   2235 	}
   2236 	wm_nvm_version(sc);
   2237 	aprint_verbose("\n");
   2238 
   2239 	/* Check for I21[01] PLL workaround */
   2240 	if (sc->sc_type == WM_T_I210)
   2241 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2242 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2243 		/* NVM image release 3.25 has a workaround */
   2244 		if ((sc->sc_nvm_ver_major < 3)
   2245 		    || ((sc->sc_nvm_ver_major == 3)
   2246 			&& (sc->sc_nvm_ver_minor < 25))) {
   2247 			aprint_verbose_dev(sc->sc_dev,
   2248 			    "ROM image version %d.%d is older than 3.25\n",
   2249 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2250 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2251 		}
   2252 	}
   2253 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2254 		wm_pll_workaround_i210(sc);
   2255 
   2256 	wm_get_wakeup(sc);
   2257 
   2258 	/* Non-AMT based hardware can now take control from firmware */
   2259 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2260 		wm_get_hw_control(sc);
   2261 
   2262 	/*
   2263 	 * Read the Ethernet address from the EEPROM, if not first found
   2264 	 * in device properties.
   2265 	 */
   2266 	ea = prop_dictionary_get(dict, "mac-address");
   2267 	if (ea != NULL) {
   2268 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2269 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2270 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2271 	} else {
   2272 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2273 			aprint_error_dev(sc->sc_dev,
   2274 			    "unable to read Ethernet address\n");
   2275 			goto out;
   2276 		}
   2277 	}
   2278 
   2279 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2280 	    ether_sprintf(enaddr));
   2281 
   2282 	/*
   2283 	 * Read the config info from the EEPROM, and set up various
   2284 	 * bits in the control registers based on their contents.
   2285 	 */
   2286 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2287 	if (pn != NULL) {
   2288 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2289 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2290 	} else {
   2291 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2292 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2293 			goto out;
   2294 		}
   2295 	}
   2296 
   2297 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2298 	if (pn != NULL) {
   2299 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2300 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2301 	} else {
   2302 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2303 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2304 			goto out;
   2305 		}
   2306 	}
   2307 
   2308 	/* check for WM_F_WOL */
   2309 	switch (sc->sc_type) {
   2310 	case WM_T_82542_2_0:
   2311 	case WM_T_82542_2_1:
   2312 	case WM_T_82543:
   2313 		/* dummy? */
   2314 		eeprom_data = 0;
   2315 		apme_mask = NVM_CFG3_APME;
   2316 		break;
   2317 	case WM_T_82544:
   2318 		apme_mask = NVM_CFG2_82544_APM_EN;
   2319 		eeprom_data = cfg2;
   2320 		break;
   2321 	case WM_T_82546:
   2322 	case WM_T_82546_3:
   2323 	case WM_T_82571:
   2324 	case WM_T_82572:
   2325 	case WM_T_82573:
   2326 	case WM_T_82574:
   2327 	case WM_T_82583:
   2328 	case WM_T_80003:
   2329 	default:
   2330 		apme_mask = NVM_CFG3_APME;
   2331 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2332 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2333 		break;
   2334 	case WM_T_82575:
   2335 	case WM_T_82576:
   2336 	case WM_T_82580:
   2337 	case WM_T_I350:
   2338 	case WM_T_I354: /* XXX ok? */
   2339 	case WM_T_ICH8:
   2340 	case WM_T_ICH9:
   2341 	case WM_T_ICH10:
   2342 	case WM_T_PCH:
   2343 	case WM_T_PCH2:
   2344 	case WM_T_PCH_LPT:
   2345 	case WM_T_PCH_SPT:
   2346 		/* XXX The funcid should be checked on some devices */
   2347 		apme_mask = WUC_APME;
   2348 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2349 		break;
   2350 	}
   2351 
   2352 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2353 	if ((eeprom_data & apme_mask) != 0)
   2354 		sc->sc_flags |= WM_F_WOL;
   2355 #ifdef WM_DEBUG
   2356 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2357 		printf("WOL\n");
   2358 #endif
   2359 
   2360 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2361 		/* Check NVM for autonegotiation */
   2362 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2363 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2364 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2365 		}
   2366 	}
   2367 
   2368 	/*
   2369 	 * XXX need special handling for some multiple port cards
   2370 	 * to disable a paticular port.
   2371 	 */
   2372 
   2373 	if (sc->sc_type >= WM_T_82544) {
   2374 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2375 		if (pn != NULL) {
   2376 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2377 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2378 		} else {
   2379 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2380 				aprint_error_dev(sc->sc_dev,
   2381 				    "unable to read SWDPIN\n");
   2382 				goto out;
   2383 			}
   2384 		}
   2385 	}
   2386 
   2387 	if (cfg1 & NVM_CFG1_ILOS)
   2388 		sc->sc_ctrl |= CTRL_ILOS;
   2389 
   2390 	/*
   2391 	 * XXX
   2392 	 * This code isn't correct because pin 2 and 3 are located
   2393 	 * in different position on newer chips. Check all datasheet.
   2394 	 *
   2395 	 * Until resolve this problem, check if a chip < 82580
   2396 	 */
   2397 	if (sc->sc_type <= WM_T_82580) {
   2398 		if (sc->sc_type >= WM_T_82544) {
   2399 			sc->sc_ctrl |=
   2400 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2401 			    CTRL_SWDPIO_SHIFT;
   2402 			sc->sc_ctrl |=
   2403 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2404 			    CTRL_SWDPINS_SHIFT;
   2405 		} else {
   2406 			sc->sc_ctrl |=
   2407 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2408 			    CTRL_SWDPIO_SHIFT;
   2409 		}
   2410 	}
   2411 
   2412 	/* XXX For other than 82580? */
   2413 	if (sc->sc_type == WM_T_82580) {
   2414 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2415 		if (nvmword & __BIT(13))
   2416 			sc->sc_ctrl |= CTRL_ILOS;
   2417 	}
   2418 
   2419 #if 0
   2420 	if (sc->sc_type >= WM_T_82544) {
   2421 		if (cfg1 & NVM_CFG1_IPS0)
   2422 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2423 		if (cfg1 & NVM_CFG1_IPS1)
   2424 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2425 		sc->sc_ctrl_ext |=
   2426 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2427 		    CTRL_EXT_SWDPIO_SHIFT;
   2428 		sc->sc_ctrl_ext |=
   2429 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2430 		    CTRL_EXT_SWDPINS_SHIFT;
   2431 	} else {
   2432 		sc->sc_ctrl_ext |=
   2433 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2434 		    CTRL_EXT_SWDPIO_SHIFT;
   2435 	}
   2436 #endif
   2437 
   2438 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2439 #if 0
   2440 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2441 #endif
   2442 
   2443 	if (sc->sc_type == WM_T_PCH) {
   2444 		uint16_t val;
   2445 
   2446 		/* Save the NVM K1 bit setting */
   2447 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2448 
   2449 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2450 			sc->sc_nvm_k1_enabled = 1;
   2451 		else
   2452 			sc->sc_nvm_k1_enabled = 0;
   2453 	}
   2454 
   2455 	/*
   2456 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2457 	 * media structures accordingly.
   2458 	 */
   2459 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2460 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2461 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2462 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2463 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2464 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2465 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2466 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2467 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2468 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2469 	    || (sc->sc_type ==WM_T_I211)) {
   2470 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2471 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2472 		switch (link_mode) {
   2473 		case CTRL_EXT_LINK_MODE_1000KX:
   2474 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2475 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2476 			break;
   2477 		case CTRL_EXT_LINK_MODE_SGMII:
   2478 			if (wm_sgmii_uses_mdio(sc)) {
   2479 				aprint_verbose_dev(sc->sc_dev,
   2480 				    "SGMII(MDIO)\n");
   2481 				sc->sc_flags |= WM_F_SGMII;
   2482 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2483 				break;
   2484 			}
   2485 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2486 			/*FALLTHROUGH*/
   2487 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2488 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2489 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2490 				if (link_mode
   2491 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2492 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2493 					sc->sc_flags |= WM_F_SGMII;
   2494 				} else {
   2495 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2496 					aprint_verbose_dev(sc->sc_dev,
   2497 					    "SERDES\n");
   2498 				}
   2499 				break;
   2500 			}
   2501 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2502 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2503 
   2504 			/* Change current link mode setting */
   2505 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2506 			switch (sc->sc_mediatype) {
   2507 			case WM_MEDIATYPE_COPPER:
   2508 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2509 				break;
   2510 			case WM_MEDIATYPE_SERDES:
   2511 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2512 				break;
   2513 			default:
   2514 				break;
   2515 			}
   2516 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2517 			break;
   2518 		case CTRL_EXT_LINK_MODE_GMII:
   2519 		default:
   2520 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2521 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2522 			break;
   2523 		}
   2524 
   2525 		reg &= ~CTRL_EXT_I2C_ENA;
   2526 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2527 			reg |= CTRL_EXT_I2C_ENA;
   2528 		else
   2529 			reg &= ~CTRL_EXT_I2C_ENA;
   2530 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2531 
   2532 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2533 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2534 		else
   2535 			wm_tbi_mediainit(sc);
   2536 	} else if (sc->sc_type < WM_T_82543 ||
   2537 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2538 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2539 			aprint_error_dev(sc->sc_dev,
   2540 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2541 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2542 		}
   2543 		wm_tbi_mediainit(sc);
   2544 	} else {
   2545 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2546 			aprint_error_dev(sc->sc_dev,
   2547 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2548 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2549 		}
   2550 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2551 	}
   2552 
   2553 	ifp = &sc->sc_ethercom.ec_if;
   2554 	xname = device_xname(sc->sc_dev);
   2555 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2556 	ifp->if_softc = sc;
   2557 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2558 #ifdef WM_MPSAFE
   2559 	ifp->if_extflags = IFEF_START_MPSAFE;
   2560 #endif
   2561 	ifp->if_ioctl = wm_ioctl;
   2562 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2563 		ifp->if_start = wm_nq_start;
   2564 		/*
   2565 		 * When the number of CPUs is one and the controller can use
   2566 		 * MSII-X, wm(4) use MSI-X but *does not* use multiqueue.
   2567 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2568 		 * and the other is used for link status changing.
   2569 		 * In this situation, wm_nq_transmit() is disadvantageous
   2570 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2571 		 */
   2572 		if (wm_is_using_multiqueue(sc))
   2573 			ifp->if_transmit = wm_nq_transmit;
   2574 	} else {
   2575 		ifp->if_start = wm_start;
   2576 		/*
   2577 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2578 		 */
   2579 		if (wm_is_using_multiqueue(sc))
   2580 			ifp->if_transmit = wm_transmit;
   2581 	}
   2582 	ifp->if_watchdog = wm_watchdog;
   2583 	ifp->if_init = wm_init;
   2584 	ifp->if_stop = wm_stop;
   2585 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2586 	IFQ_SET_READY(&ifp->if_snd);
   2587 
   2588 	/* Check for jumbo frame */
   2589 	switch (sc->sc_type) {
   2590 	case WM_T_82573:
   2591 		/* XXX limited to 9234 if ASPM is disabled */
   2592 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2593 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2594 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2595 		break;
   2596 	case WM_T_82571:
   2597 	case WM_T_82572:
   2598 	case WM_T_82574:
   2599 	case WM_T_82575:
   2600 	case WM_T_82576:
   2601 	case WM_T_82580:
   2602 	case WM_T_I350:
   2603 	case WM_T_I354: /* XXXX ok? */
   2604 	case WM_T_I210:
   2605 	case WM_T_I211:
   2606 	case WM_T_80003:
   2607 	case WM_T_ICH9:
   2608 	case WM_T_ICH10:
   2609 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2610 	case WM_T_PCH_LPT:
   2611 	case WM_T_PCH_SPT:
   2612 		/* XXX limited to 9234 */
   2613 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2614 		break;
   2615 	case WM_T_PCH:
   2616 		/* XXX limited to 4096 */
   2617 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2618 		break;
   2619 	case WM_T_82542_2_0:
   2620 	case WM_T_82542_2_1:
   2621 	case WM_T_82583:
   2622 	case WM_T_ICH8:
   2623 		/* No support for jumbo frame */
   2624 		break;
   2625 	default:
   2626 		/* ETHER_MAX_LEN_JUMBO */
   2627 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2628 		break;
   2629 	}
   2630 
   2631 	/* If we're a i82543 or greater, we can support VLANs. */
   2632 	if (sc->sc_type >= WM_T_82543)
   2633 		sc->sc_ethercom.ec_capabilities |=
   2634 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2635 
   2636 	/*
   2637 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2638 	 * on i82543 and later.
   2639 	 */
   2640 	if (sc->sc_type >= WM_T_82543) {
   2641 		ifp->if_capabilities |=
   2642 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2643 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2644 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2645 		    IFCAP_CSUM_TCPv6_Tx |
   2646 		    IFCAP_CSUM_UDPv6_Tx;
   2647 	}
   2648 
   2649 	/*
   2650 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2651 	 *
   2652 	 *	82541GI (8086:1076) ... no
   2653 	 *	82572EI (8086:10b9) ... yes
   2654 	 */
   2655 	if (sc->sc_type >= WM_T_82571) {
   2656 		ifp->if_capabilities |=
   2657 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2658 	}
   2659 
   2660 	/*
   2661 	 * If we're a i82544 or greater (except i82547), we can do
   2662 	 * TCP segmentation offload.
   2663 	 */
   2664 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2665 		ifp->if_capabilities |= IFCAP_TSOv4;
   2666 	}
   2667 
   2668 	if (sc->sc_type >= WM_T_82571) {
   2669 		ifp->if_capabilities |= IFCAP_TSOv6;
   2670 	}
   2671 
   2672 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2673 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2674 
   2675 #ifdef WM_MPSAFE
   2676 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2677 #else
   2678 	sc->sc_core_lock = NULL;
   2679 #endif
   2680 
   2681 	/* Attach the interface. */
   2682 	if_initialize(ifp);
   2683 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2684 	ether_ifattach(ifp, enaddr);
   2685 	if_register(ifp);
   2686 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2687 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2688 			  RND_FLAG_DEFAULT);
   2689 
   2690 #ifdef WM_EVENT_COUNTERS
   2691 	/* Attach event counters. */
   2692 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2693 	    NULL, xname, "linkintr");
   2694 
   2695 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2696 	    NULL, xname, "tx_xoff");
   2697 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2698 	    NULL, xname, "tx_xon");
   2699 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2700 	    NULL, xname, "rx_xoff");
   2701 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2702 	    NULL, xname, "rx_xon");
   2703 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2704 	    NULL, xname, "rx_macctl");
   2705 #endif /* WM_EVENT_COUNTERS */
   2706 
   2707 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2708 		pmf_class_network_register(self, ifp);
   2709 	else
   2710 		aprint_error_dev(self, "couldn't establish power handler\n");
   2711 
   2712 	sc->sc_flags |= WM_F_ATTACHED;
   2713  out:
   2714 	return;
   2715 }
   2716 
   2717 /* The detach function (ca_detach) */
   2718 static int
   2719 wm_detach(device_t self, int flags __unused)
   2720 {
   2721 	struct wm_softc *sc = device_private(self);
   2722 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2723 	int i;
   2724 
   2725 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2726 		return 0;
   2727 
   2728 	/* Stop the interface. Callouts are stopped in it. */
   2729 	wm_stop(ifp, 1);
   2730 
   2731 	pmf_device_deregister(self);
   2732 
   2733 #ifdef WM_EVENT_COUNTERS
   2734 	evcnt_detach(&sc->sc_ev_linkintr);
   2735 
   2736 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2737 	evcnt_detach(&sc->sc_ev_tx_xon);
   2738 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2739 	evcnt_detach(&sc->sc_ev_rx_xon);
   2740 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2741 #endif /* WM_EVENT_COUNTERS */
   2742 
   2743 	/* Tell the firmware about the release */
   2744 	WM_CORE_LOCK(sc);
   2745 	wm_release_manageability(sc);
   2746 	wm_release_hw_control(sc);
   2747 	wm_enable_wakeup(sc);
   2748 	WM_CORE_UNLOCK(sc);
   2749 
   2750 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2751 
   2752 	/* Delete all remaining media. */
   2753 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2754 
   2755 	ether_ifdetach(ifp);
   2756 	if_detach(ifp);
   2757 	if_percpuq_destroy(sc->sc_ipq);
   2758 
   2759 	/* Unload RX dmamaps and free mbufs */
   2760 	for (i = 0; i < sc->sc_nqueues; i++) {
   2761 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2762 		mutex_enter(rxq->rxq_lock);
   2763 		wm_rxdrain(rxq);
   2764 		mutex_exit(rxq->rxq_lock);
   2765 	}
   2766 	/* Must unlock here */
   2767 
   2768 	/* Disestablish the interrupt handler */
   2769 	for (i = 0; i < sc->sc_nintrs; i++) {
   2770 		if (sc->sc_ihs[i] != NULL) {
   2771 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2772 			sc->sc_ihs[i] = NULL;
   2773 		}
   2774 	}
   2775 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2776 
   2777 	wm_free_txrx_queues(sc);
   2778 
   2779 	/* Unmap the registers */
   2780 	if (sc->sc_ss) {
   2781 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2782 		sc->sc_ss = 0;
   2783 	}
   2784 	if (sc->sc_ios) {
   2785 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2786 		sc->sc_ios = 0;
   2787 	}
   2788 	if (sc->sc_flashs) {
   2789 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2790 		sc->sc_flashs = 0;
   2791 	}
   2792 
   2793 	if (sc->sc_core_lock)
   2794 		mutex_obj_free(sc->sc_core_lock);
   2795 	if (sc->sc_ich_phymtx)
   2796 		mutex_obj_free(sc->sc_ich_phymtx);
   2797 	if (sc->sc_ich_nvmmtx)
   2798 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2799 
   2800 	return 0;
   2801 }
   2802 
   2803 static bool
   2804 wm_suspend(device_t self, const pmf_qual_t *qual)
   2805 {
   2806 	struct wm_softc *sc = device_private(self);
   2807 
   2808 	wm_release_manageability(sc);
   2809 	wm_release_hw_control(sc);
   2810 	wm_enable_wakeup(sc);
   2811 
   2812 	return true;
   2813 }
   2814 
   2815 static bool
   2816 wm_resume(device_t self, const pmf_qual_t *qual)
   2817 {
   2818 	struct wm_softc *sc = device_private(self);
   2819 
   2820 	wm_init_manageability(sc);
   2821 
   2822 	return true;
   2823 }
   2824 
   2825 /*
   2826  * wm_watchdog:		[ifnet interface function]
   2827  *
   2828  *	Watchdog timer handler.
   2829  */
   2830 static void
   2831 wm_watchdog(struct ifnet *ifp)
   2832 {
   2833 	int qid;
   2834 	struct wm_softc *sc = ifp->if_softc;
   2835 
   2836 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2837 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2838 
   2839 		wm_watchdog_txq(ifp, txq);
   2840 	}
   2841 
   2842 	/* Reset the interface. */
   2843 	(void) wm_init(ifp);
   2844 
   2845 	/*
   2846 	 * There are still some upper layer processing which call
   2847 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2848 	 */
   2849 	/* Try to get more packets going. */
   2850 	ifp->if_start(ifp);
   2851 }
   2852 
   2853 static void
   2854 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2855 {
   2856 	struct wm_softc *sc = ifp->if_softc;
   2857 
   2858 	/*
   2859 	 * Since we're using delayed interrupts, sweep up
   2860 	 * before we report an error.
   2861 	 */
   2862 	mutex_enter(txq->txq_lock);
   2863 	wm_txeof(sc, txq);
   2864 	mutex_exit(txq->txq_lock);
   2865 
   2866 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2867 #ifdef WM_DEBUG
   2868 		int i, j;
   2869 		struct wm_txsoft *txs;
   2870 #endif
   2871 		log(LOG_ERR,
   2872 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2873 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2874 		    txq->txq_next);
   2875 		ifp->if_oerrors++;
   2876 #ifdef WM_DEBUG
   2877 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2878 		    i = WM_NEXTTXS(txq, i)) {
   2879 		    txs = &txq->txq_soft[i];
   2880 		    printf("txs %d tx %d -> %d\n",
   2881 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2882 		    for (j = txs->txs_firstdesc; ;
   2883 			j = WM_NEXTTX(txq, j)) {
   2884 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2885 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2886 			printf("\t %#08x%08x\n",
   2887 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2888 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2889 			if (j == txs->txs_lastdesc)
   2890 				break;
   2891 			}
   2892 		}
   2893 #endif
   2894 	}
   2895 }
   2896 
   2897 /*
   2898  * wm_tick:
   2899  *
   2900  *	One second timer, used to check link status, sweep up
   2901  *	completed transmit jobs, etc.
   2902  */
   2903 static void
   2904 wm_tick(void *arg)
   2905 {
   2906 	struct wm_softc *sc = arg;
   2907 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2908 #ifndef WM_MPSAFE
   2909 	int s = splnet();
   2910 #endif
   2911 
   2912 	WM_CORE_LOCK(sc);
   2913 
   2914 	if (sc->sc_core_stopping)
   2915 		goto out;
   2916 
   2917 	if (sc->sc_type >= WM_T_82542_2_1) {
   2918 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2919 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2920 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2921 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2922 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2923 	}
   2924 
   2925 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2926 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2927 	    + CSR_READ(sc, WMREG_CRCERRS)
   2928 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2929 	    + CSR_READ(sc, WMREG_SYMERRC)
   2930 	    + CSR_READ(sc, WMREG_RXERRC)
   2931 	    + CSR_READ(sc, WMREG_SEC)
   2932 	    + CSR_READ(sc, WMREG_CEXTERR)
   2933 	    + CSR_READ(sc, WMREG_RLEC);
   2934 	/*
   2935 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2936 	 * memory. It does not mean the number of dropped packet. Because
   2937 	 * ethernet controller can receive packets in such case if there is
   2938 	 * space in phy's FIFO.
   2939 	 *
   2940 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2941 	 * own EVCNT instead of if_iqdrops.
   2942 	 */
   2943 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2944 
   2945 	if (sc->sc_flags & WM_F_HAS_MII)
   2946 		mii_tick(&sc->sc_mii);
   2947 	else if ((sc->sc_type >= WM_T_82575)
   2948 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2949 		wm_serdes_tick(sc);
   2950 	else
   2951 		wm_tbi_tick(sc);
   2952 
   2953 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2954 out:
   2955 	WM_CORE_UNLOCK(sc);
   2956 #ifndef WM_MPSAFE
   2957 	splx(s);
   2958 #endif
   2959 }
   2960 
   2961 static int
   2962 wm_ifflags_cb(struct ethercom *ec)
   2963 {
   2964 	struct ifnet *ifp = &ec->ec_if;
   2965 	struct wm_softc *sc = ifp->if_softc;
   2966 	int rc = 0;
   2967 
   2968 	WM_CORE_LOCK(sc);
   2969 
   2970 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2971 	sc->sc_if_flags = ifp->if_flags;
   2972 
   2973 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2974 		rc = ENETRESET;
   2975 		goto out;
   2976 	}
   2977 
   2978 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2979 		wm_set_filter(sc);
   2980 
   2981 	wm_set_vlan(sc);
   2982 
   2983 out:
   2984 	WM_CORE_UNLOCK(sc);
   2985 
   2986 	return rc;
   2987 }
   2988 
   2989 /*
   2990  * wm_ioctl:		[ifnet interface function]
   2991  *
   2992  *	Handle control requests from the operator.
   2993  */
   2994 static int
   2995 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2996 {
   2997 	struct wm_softc *sc = ifp->if_softc;
   2998 	struct ifreq *ifr = (struct ifreq *) data;
   2999 	struct ifaddr *ifa = (struct ifaddr *)data;
   3000 	struct sockaddr_dl *sdl;
   3001 	int s, error;
   3002 
   3003 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3004 		device_xname(sc->sc_dev), __func__));
   3005 
   3006 #ifndef WM_MPSAFE
   3007 	s = splnet();
   3008 #endif
   3009 	switch (cmd) {
   3010 	case SIOCSIFMEDIA:
   3011 	case SIOCGIFMEDIA:
   3012 		WM_CORE_LOCK(sc);
   3013 		/* Flow control requires full-duplex mode. */
   3014 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3015 		    (ifr->ifr_media & IFM_FDX) == 0)
   3016 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3017 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3018 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3019 				/* We can do both TXPAUSE and RXPAUSE. */
   3020 				ifr->ifr_media |=
   3021 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3022 			}
   3023 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3024 		}
   3025 		WM_CORE_UNLOCK(sc);
   3026 #ifdef WM_MPSAFE
   3027 		s = splnet();
   3028 #endif
   3029 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3030 #ifdef WM_MPSAFE
   3031 		splx(s);
   3032 #endif
   3033 		break;
   3034 	case SIOCINITIFADDR:
   3035 		WM_CORE_LOCK(sc);
   3036 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3037 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3038 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3039 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3040 			/* unicast address is first multicast entry */
   3041 			wm_set_filter(sc);
   3042 			error = 0;
   3043 			WM_CORE_UNLOCK(sc);
   3044 			break;
   3045 		}
   3046 		WM_CORE_UNLOCK(sc);
   3047 		/*FALLTHROUGH*/
   3048 	default:
   3049 #ifdef WM_MPSAFE
   3050 		s = splnet();
   3051 #endif
   3052 		/* It may call wm_start, so unlock here */
   3053 		error = ether_ioctl(ifp, cmd, data);
   3054 #ifdef WM_MPSAFE
   3055 		splx(s);
   3056 #endif
   3057 		if (error != ENETRESET)
   3058 			break;
   3059 
   3060 		error = 0;
   3061 
   3062 		if (cmd == SIOCSIFCAP) {
   3063 			error = (*ifp->if_init)(ifp);
   3064 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3065 			;
   3066 		else if (ifp->if_flags & IFF_RUNNING) {
   3067 			/*
   3068 			 * Multicast list has changed; set the hardware filter
   3069 			 * accordingly.
   3070 			 */
   3071 			WM_CORE_LOCK(sc);
   3072 			wm_set_filter(sc);
   3073 			WM_CORE_UNLOCK(sc);
   3074 		}
   3075 		break;
   3076 	}
   3077 
   3078 #ifndef WM_MPSAFE
   3079 	splx(s);
   3080 #endif
   3081 	return error;
   3082 }
   3083 
   3084 /* MAC address related */
   3085 
   3086 /*
   3087  * Get the offset of MAC address and return it.
   3088  * If error occured, use offset 0.
   3089  */
   3090 static uint16_t
   3091 wm_check_alt_mac_addr(struct wm_softc *sc)
   3092 {
   3093 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3094 	uint16_t offset = NVM_OFF_MACADDR;
   3095 
   3096 	/* Try to read alternative MAC address pointer */
   3097 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3098 		return 0;
   3099 
   3100 	/* Check pointer if it's valid or not. */
   3101 	if ((offset == 0x0000) || (offset == 0xffff))
   3102 		return 0;
   3103 
   3104 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3105 	/*
   3106 	 * Check whether alternative MAC address is valid or not.
   3107 	 * Some cards have non 0xffff pointer but those don't use
   3108 	 * alternative MAC address in reality.
   3109 	 *
   3110 	 * Check whether the broadcast bit is set or not.
   3111 	 */
   3112 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3113 		if (((myea[0] & 0xff) & 0x01) == 0)
   3114 			return offset; /* Found */
   3115 
   3116 	/* Not found */
   3117 	return 0;
   3118 }
   3119 
   3120 static int
   3121 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3122 {
   3123 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3124 	uint16_t offset = NVM_OFF_MACADDR;
   3125 	int do_invert = 0;
   3126 
   3127 	switch (sc->sc_type) {
   3128 	case WM_T_82580:
   3129 	case WM_T_I350:
   3130 	case WM_T_I354:
   3131 		/* EEPROM Top Level Partitioning */
   3132 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3133 		break;
   3134 	case WM_T_82571:
   3135 	case WM_T_82575:
   3136 	case WM_T_82576:
   3137 	case WM_T_80003:
   3138 	case WM_T_I210:
   3139 	case WM_T_I211:
   3140 		offset = wm_check_alt_mac_addr(sc);
   3141 		if (offset == 0)
   3142 			if ((sc->sc_funcid & 0x01) == 1)
   3143 				do_invert = 1;
   3144 		break;
   3145 	default:
   3146 		if ((sc->sc_funcid & 0x01) == 1)
   3147 			do_invert = 1;
   3148 		break;
   3149 	}
   3150 
   3151 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3152 		goto bad;
   3153 
   3154 	enaddr[0] = myea[0] & 0xff;
   3155 	enaddr[1] = myea[0] >> 8;
   3156 	enaddr[2] = myea[1] & 0xff;
   3157 	enaddr[3] = myea[1] >> 8;
   3158 	enaddr[4] = myea[2] & 0xff;
   3159 	enaddr[5] = myea[2] >> 8;
   3160 
   3161 	/*
   3162 	 * Toggle the LSB of the MAC address on the second port
   3163 	 * of some dual port cards.
   3164 	 */
   3165 	if (do_invert != 0)
   3166 		enaddr[5] ^= 1;
   3167 
   3168 	return 0;
   3169 
   3170  bad:
   3171 	return -1;
   3172 }
   3173 
   3174 /*
   3175  * wm_set_ral:
   3176  *
   3177  *	Set an entery in the receive address list.
   3178  */
   3179 static void
   3180 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3181 {
   3182 	uint32_t ral_lo, ral_hi;
   3183 
   3184 	if (enaddr != NULL) {
   3185 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3186 		    (enaddr[3] << 24);
   3187 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3188 		ral_hi |= RAL_AV;
   3189 	} else {
   3190 		ral_lo = 0;
   3191 		ral_hi = 0;
   3192 	}
   3193 
   3194 	if (sc->sc_type >= WM_T_82544) {
   3195 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3196 		    ral_lo);
   3197 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3198 		    ral_hi);
   3199 	} else {
   3200 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3201 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3202 	}
   3203 }
   3204 
   3205 /*
   3206  * wm_mchash:
   3207  *
   3208  *	Compute the hash of the multicast address for the 4096-bit
   3209  *	multicast filter.
   3210  */
   3211 static uint32_t
   3212 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3213 {
   3214 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3215 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3216 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3217 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3218 	uint32_t hash;
   3219 
   3220 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3221 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3222 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3223 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3224 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3225 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3226 		return (hash & 0x3ff);
   3227 	}
   3228 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3229 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3230 
   3231 	return (hash & 0xfff);
   3232 }
   3233 
   3234 /*
   3235  * wm_set_filter:
   3236  *
   3237  *	Set up the receive filter.
   3238  */
   3239 static void
   3240 wm_set_filter(struct wm_softc *sc)
   3241 {
   3242 	struct ethercom *ec = &sc->sc_ethercom;
   3243 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3244 	struct ether_multi *enm;
   3245 	struct ether_multistep step;
   3246 	bus_addr_t mta_reg;
   3247 	uint32_t hash, reg, bit;
   3248 	int i, size, ralmax;
   3249 
   3250 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3251 		device_xname(sc->sc_dev), __func__));
   3252 
   3253 	if (sc->sc_type >= WM_T_82544)
   3254 		mta_reg = WMREG_CORDOVA_MTA;
   3255 	else
   3256 		mta_reg = WMREG_MTA;
   3257 
   3258 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3259 
   3260 	if (ifp->if_flags & IFF_BROADCAST)
   3261 		sc->sc_rctl |= RCTL_BAM;
   3262 	if (ifp->if_flags & IFF_PROMISC) {
   3263 		sc->sc_rctl |= RCTL_UPE;
   3264 		goto allmulti;
   3265 	}
   3266 
   3267 	/*
   3268 	 * Set the station address in the first RAL slot, and
   3269 	 * clear the remaining slots.
   3270 	 */
   3271 	if (sc->sc_type == WM_T_ICH8)
   3272 		size = WM_RAL_TABSIZE_ICH8 -1;
   3273 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3274 	    || (sc->sc_type == WM_T_PCH))
   3275 		size = WM_RAL_TABSIZE_ICH8;
   3276 	else if (sc->sc_type == WM_T_PCH2)
   3277 		size = WM_RAL_TABSIZE_PCH2;
   3278 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3279 		size = WM_RAL_TABSIZE_PCH_LPT;
   3280 	else if (sc->sc_type == WM_T_82575)
   3281 		size = WM_RAL_TABSIZE_82575;
   3282 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3283 		size = WM_RAL_TABSIZE_82576;
   3284 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3285 		size = WM_RAL_TABSIZE_I350;
   3286 	else
   3287 		size = WM_RAL_TABSIZE;
   3288 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3289 
   3290 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3291 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3292 		switch (i) {
   3293 		case 0:
   3294 			/* We can use all entries */
   3295 			ralmax = size;
   3296 			break;
   3297 		case 1:
   3298 			/* Only RAR[0] */
   3299 			ralmax = 1;
   3300 			break;
   3301 		default:
   3302 			/* available SHRA + RAR[0] */
   3303 			ralmax = i + 1;
   3304 		}
   3305 	} else
   3306 		ralmax = size;
   3307 	for (i = 1; i < size; i++) {
   3308 		if (i < ralmax)
   3309 			wm_set_ral(sc, NULL, i);
   3310 	}
   3311 
   3312 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3313 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3314 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3315 	    || (sc->sc_type == WM_T_PCH_SPT))
   3316 		size = WM_ICH8_MC_TABSIZE;
   3317 	else
   3318 		size = WM_MC_TABSIZE;
   3319 	/* Clear out the multicast table. */
   3320 	for (i = 0; i < size; i++)
   3321 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3322 
   3323 	ETHER_LOCK(ec);
   3324 	ETHER_FIRST_MULTI(step, ec, enm);
   3325 	while (enm != NULL) {
   3326 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3327 			ETHER_UNLOCK(ec);
   3328 			/*
   3329 			 * We must listen to a range of multicast addresses.
   3330 			 * For now, just accept all multicasts, rather than
   3331 			 * trying to set only those filter bits needed to match
   3332 			 * the range.  (At this time, the only use of address
   3333 			 * ranges is for IP multicast routing, for which the
   3334 			 * range is big enough to require all bits set.)
   3335 			 */
   3336 			goto allmulti;
   3337 		}
   3338 
   3339 		hash = wm_mchash(sc, enm->enm_addrlo);
   3340 
   3341 		reg = (hash >> 5);
   3342 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3343 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3344 		    || (sc->sc_type == WM_T_PCH2)
   3345 		    || (sc->sc_type == WM_T_PCH_LPT)
   3346 		    || (sc->sc_type == WM_T_PCH_SPT))
   3347 			reg &= 0x1f;
   3348 		else
   3349 			reg &= 0x7f;
   3350 		bit = hash & 0x1f;
   3351 
   3352 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3353 		hash |= 1U << bit;
   3354 
   3355 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3356 			/*
   3357 			 * 82544 Errata 9: Certain register cannot be written
   3358 			 * with particular alignments in PCI-X bus operation
   3359 			 * (FCAH, MTA and VFTA).
   3360 			 */
   3361 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3362 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3363 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3364 		} else
   3365 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3366 
   3367 		ETHER_NEXT_MULTI(step, enm);
   3368 	}
   3369 	ETHER_UNLOCK(ec);
   3370 
   3371 	ifp->if_flags &= ~IFF_ALLMULTI;
   3372 	goto setit;
   3373 
   3374  allmulti:
   3375 	ifp->if_flags |= IFF_ALLMULTI;
   3376 	sc->sc_rctl |= RCTL_MPE;
   3377 
   3378  setit:
   3379 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3380 }
   3381 
   3382 /* Reset and init related */
   3383 
   3384 static void
   3385 wm_set_vlan(struct wm_softc *sc)
   3386 {
   3387 
   3388 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3389 		device_xname(sc->sc_dev), __func__));
   3390 
   3391 	/* Deal with VLAN enables. */
   3392 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3393 		sc->sc_ctrl |= CTRL_VME;
   3394 	else
   3395 		sc->sc_ctrl &= ~CTRL_VME;
   3396 
   3397 	/* Write the control registers. */
   3398 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3399 }
   3400 
   3401 static void
   3402 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3403 {
   3404 	uint32_t gcr;
   3405 	pcireg_t ctrl2;
   3406 
   3407 	gcr = CSR_READ(sc, WMREG_GCR);
   3408 
   3409 	/* Only take action if timeout value is defaulted to 0 */
   3410 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3411 		goto out;
   3412 
   3413 	if ((gcr & GCR_CAP_VER2) == 0) {
   3414 		gcr |= GCR_CMPL_TMOUT_10MS;
   3415 		goto out;
   3416 	}
   3417 
   3418 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3419 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3420 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3421 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3422 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3423 
   3424 out:
   3425 	/* Disable completion timeout resend */
   3426 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3427 
   3428 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3429 }
   3430 
   3431 void
   3432 wm_get_auto_rd_done(struct wm_softc *sc)
   3433 {
   3434 	int i;
   3435 
   3436 	/* wait for eeprom to reload */
   3437 	switch (sc->sc_type) {
   3438 	case WM_T_82571:
   3439 	case WM_T_82572:
   3440 	case WM_T_82573:
   3441 	case WM_T_82574:
   3442 	case WM_T_82583:
   3443 	case WM_T_82575:
   3444 	case WM_T_82576:
   3445 	case WM_T_82580:
   3446 	case WM_T_I350:
   3447 	case WM_T_I354:
   3448 	case WM_T_I210:
   3449 	case WM_T_I211:
   3450 	case WM_T_80003:
   3451 	case WM_T_ICH8:
   3452 	case WM_T_ICH9:
   3453 		for (i = 0; i < 10; i++) {
   3454 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3455 				break;
   3456 			delay(1000);
   3457 		}
   3458 		if (i == 10) {
   3459 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3460 			    "complete\n", device_xname(sc->sc_dev));
   3461 		}
   3462 		break;
   3463 	default:
   3464 		break;
   3465 	}
   3466 }
   3467 
   3468 void
   3469 wm_lan_init_done(struct wm_softc *sc)
   3470 {
   3471 	uint32_t reg = 0;
   3472 	int i;
   3473 
   3474 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3475 		device_xname(sc->sc_dev), __func__));
   3476 
   3477 	/* Wait for eeprom to reload */
   3478 	switch (sc->sc_type) {
   3479 	case WM_T_ICH10:
   3480 	case WM_T_PCH:
   3481 	case WM_T_PCH2:
   3482 	case WM_T_PCH_LPT:
   3483 	case WM_T_PCH_SPT:
   3484 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3485 			reg = CSR_READ(sc, WMREG_STATUS);
   3486 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3487 				break;
   3488 			delay(100);
   3489 		}
   3490 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3491 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3492 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3493 		}
   3494 		break;
   3495 	default:
   3496 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3497 		    __func__);
   3498 		break;
   3499 	}
   3500 
   3501 	reg &= ~STATUS_LAN_INIT_DONE;
   3502 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3503 }
   3504 
   3505 void
   3506 wm_get_cfg_done(struct wm_softc *sc)
   3507 {
   3508 	int mask;
   3509 	uint32_t reg;
   3510 	int i;
   3511 
   3512 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3513 		device_xname(sc->sc_dev), __func__));
   3514 
   3515 	/* Wait for eeprom to reload */
   3516 	switch (sc->sc_type) {
   3517 	case WM_T_82542_2_0:
   3518 	case WM_T_82542_2_1:
   3519 		/* null */
   3520 		break;
   3521 	case WM_T_82543:
   3522 	case WM_T_82544:
   3523 	case WM_T_82540:
   3524 	case WM_T_82545:
   3525 	case WM_T_82545_3:
   3526 	case WM_T_82546:
   3527 	case WM_T_82546_3:
   3528 	case WM_T_82541:
   3529 	case WM_T_82541_2:
   3530 	case WM_T_82547:
   3531 	case WM_T_82547_2:
   3532 	case WM_T_82573:
   3533 	case WM_T_82574:
   3534 	case WM_T_82583:
   3535 		/* generic */
   3536 		delay(10*1000);
   3537 		break;
   3538 	case WM_T_80003:
   3539 	case WM_T_82571:
   3540 	case WM_T_82572:
   3541 	case WM_T_82575:
   3542 	case WM_T_82576:
   3543 	case WM_T_82580:
   3544 	case WM_T_I350:
   3545 	case WM_T_I354:
   3546 	case WM_T_I210:
   3547 	case WM_T_I211:
   3548 		if (sc->sc_type == WM_T_82571) {
   3549 			/* Only 82571 shares port 0 */
   3550 			mask = EEMNGCTL_CFGDONE_0;
   3551 		} else
   3552 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3553 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3554 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3555 				break;
   3556 			delay(1000);
   3557 		}
   3558 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3559 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3560 				device_xname(sc->sc_dev), __func__));
   3561 		}
   3562 		break;
   3563 	case WM_T_ICH8:
   3564 	case WM_T_ICH9:
   3565 	case WM_T_ICH10:
   3566 	case WM_T_PCH:
   3567 	case WM_T_PCH2:
   3568 	case WM_T_PCH_LPT:
   3569 	case WM_T_PCH_SPT:
   3570 		delay(10*1000);
   3571 		if (sc->sc_type >= WM_T_ICH10)
   3572 			wm_lan_init_done(sc);
   3573 		else
   3574 			wm_get_auto_rd_done(sc);
   3575 
   3576 		reg = CSR_READ(sc, WMREG_STATUS);
   3577 		if ((reg & STATUS_PHYRA) != 0)
   3578 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3579 		break;
   3580 	default:
   3581 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3582 		    __func__);
   3583 		break;
   3584 	}
   3585 }
   3586 
   3587 /* Init hardware bits */
   3588 void
   3589 wm_initialize_hardware_bits(struct wm_softc *sc)
   3590 {
   3591 	uint32_t tarc0, tarc1, reg;
   3592 
   3593 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3594 		device_xname(sc->sc_dev), __func__));
   3595 
   3596 	/* For 82571 variant, 80003 and ICHs */
   3597 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3598 	    || (sc->sc_type >= WM_T_80003)) {
   3599 
   3600 		/* Transmit Descriptor Control 0 */
   3601 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3602 		reg |= TXDCTL_COUNT_DESC;
   3603 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3604 
   3605 		/* Transmit Descriptor Control 1 */
   3606 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3607 		reg |= TXDCTL_COUNT_DESC;
   3608 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3609 
   3610 		/* TARC0 */
   3611 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3612 		switch (sc->sc_type) {
   3613 		case WM_T_82571:
   3614 		case WM_T_82572:
   3615 		case WM_T_82573:
   3616 		case WM_T_82574:
   3617 		case WM_T_82583:
   3618 		case WM_T_80003:
   3619 			/* Clear bits 30..27 */
   3620 			tarc0 &= ~__BITS(30, 27);
   3621 			break;
   3622 		default:
   3623 			break;
   3624 		}
   3625 
   3626 		switch (sc->sc_type) {
   3627 		case WM_T_82571:
   3628 		case WM_T_82572:
   3629 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3630 
   3631 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3632 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3633 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3634 			/* 8257[12] Errata No.7 */
   3635 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3636 
   3637 			/* TARC1 bit 28 */
   3638 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3639 				tarc1 &= ~__BIT(28);
   3640 			else
   3641 				tarc1 |= __BIT(28);
   3642 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3643 
   3644 			/*
   3645 			 * 8257[12] Errata No.13
   3646 			 * Disable Dyamic Clock Gating.
   3647 			 */
   3648 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3649 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3650 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3651 			break;
   3652 		case WM_T_82573:
   3653 		case WM_T_82574:
   3654 		case WM_T_82583:
   3655 			if ((sc->sc_type == WM_T_82574)
   3656 			    || (sc->sc_type == WM_T_82583))
   3657 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3658 
   3659 			/* Extended Device Control */
   3660 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3661 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3662 			reg |= __BIT(22);	/* Set bit 22 */
   3663 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3664 
   3665 			/* Device Control */
   3666 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3667 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3668 
   3669 			/* PCIe Control Register */
   3670 			/*
   3671 			 * 82573 Errata (unknown).
   3672 			 *
   3673 			 * 82574 Errata 25 and 82583 Errata 12
   3674 			 * "Dropped Rx Packets":
   3675 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3676 			 */
   3677 			reg = CSR_READ(sc, WMREG_GCR);
   3678 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3679 			CSR_WRITE(sc, WMREG_GCR, reg);
   3680 
   3681 			if ((sc->sc_type == WM_T_82574)
   3682 			    || (sc->sc_type == WM_T_82583)) {
   3683 				/*
   3684 				 * Document says this bit must be set for
   3685 				 * proper operation.
   3686 				 */
   3687 				reg = CSR_READ(sc, WMREG_GCR);
   3688 				reg |= __BIT(22);
   3689 				CSR_WRITE(sc, WMREG_GCR, reg);
   3690 
   3691 				/*
   3692 				 * Apply workaround for hardware errata
   3693 				 * documented in errata docs Fixes issue where
   3694 				 * some error prone or unreliable PCIe
   3695 				 * completions are occurring, particularly
   3696 				 * with ASPM enabled. Without fix, issue can
   3697 				 * cause Tx timeouts.
   3698 				 */
   3699 				reg = CSR_READ(sc, WMREG_GCR2);
   3700 				reg |= __BIT(0);
   3701 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3702 			}
   3703 			break;
   3704 		case WM_T_80003:
   3705 			/* TARC0 */
   3706 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3707 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3708 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3709 
   3710 			/* TARC1 bit 28 */
   3711 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3712 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3713 				tarc1 &= ~__BIT(28);
   3714 			else
   3715 				tarc1 |= __BIT(28);
   3716 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3717 			break;
   3718 		case WM_T_ICH8:
   3719 		case WM_T_ICH9:
   3720 		case WM_T_ICH10:
   3721 		case WM_T_PCH:
   3722 		case WM_T_PCH2:
   3723 		case WM_T_PCH_LPT:
   3724 		case WM_T_PCH_SPT:
   3725 			/* TARC0 */
   3726 			if ((sc->sc_type == WM_T_ICH8)
   3727 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3728 				/* Set TARC0 bits 29 and 28 */
   3729 				tarc0 |= __BITS(29, 28);
   3730 			}
   3731 			/* Set TARC0 bits 23,24,26,27 */
   3732 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3733 
   3734 			/* CTRL_EXT */
   3735 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3736 			reg |= __BIT(22);	/* Set bit 22 */
   3737 			/*
   3738 			 * Enable PHY low-power state when MAC is at D3
   3739 			 * w/o WoL
   3740 			 */
   3741 			if (sc->sc_type >= WM_T_PCH)
   3742 				reg |= CTRL_EXT_PHYPDEN;
   3743 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3744 
   3745 			/* TARC1 */
   3746 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3747 			/* bit 28 */
   3748 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3749 				tarc1 &= ~__BIT(28);
   3750 			else
   3751 				tarc1 |= __BIT(28);
   3752 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3753 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3754 
   3755 			/* Device Status */
   3756 			if (sc->sc_type == WM_T_ICH8) {
   3757 				reg = CSR_READ(sc, WMREG_STATUS);
   3758 				reg &= ~__BIT(31);
   3759 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3760 
   3761 			}
   3762 
   3763 			/* IOSFPC */
   3764 			if (sc->sc_type == WM_T_PCH_SPT) {
   3765 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3766 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3767 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3768 			}
   3769 			/*
   3770 			 * Work-around descriptor data corruption issue during
   3771 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3772 			 * capability.
   3773 			 */
   3774 			reg = CSR_READ(sc, WMREG_RFCTL);
   3775 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3776 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3777 			break;
   3778 		default:
   3779 			break;
   3780 		}
   3781 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3782 
   3783 		switch (sc->sc_type) {
   3784 		/*
   3785 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3786 		 * Avoid RSS Hash Value bug.
   3787 		 */
   3788 		case WM_T_82571:
   3789 		case WM_T_82572:
   3790 		case WM_T_82573:
   3791 		case WM_T_80003:
   3792 		case WM_T_ICH8:
   3793 			reg = CSR_READ(sc, WMREG_RFCTL);
   3794 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3795 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3796 			break;
   3797 		case WM_T_82574:
   3798 			/* use extened Rx descriptor. */
   3799 			reg = CSR_READ(sc, WMREG_RFCTL);
   3800 			reg |= WMREG_RFCTL_EXSTEN;
   3801 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3802 			break;
   3803 		default:
   3804 			break;
   3805 		}
   3806 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   3807 		/*
   3808 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   3809 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   3810 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   3811 		 * Correctly by the Device"
   3812 		 *
   3813 		 * I354(C2000) Errata AVR53:
   3814 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   3815 		 * Hang"
   3816 		 */
   3817 		reg = CSR_READ(sc, WMREG_RFCTL);
   3818 		reg |= WMREG_RFCTL_IPV6EXDIS;
   3819 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   3820 	}
   3821 }
   3822 
   3823 static uint32_t
   3824 wm_rxpbs_adjust_82580(uint32_t val)
   3825 {
   3826 	uint32_t rv = 0;
   3827 
   3828 	if (val < __arraycount(wm_82580_rxpbs_table))
   3829 		rv = wm_82580_rxpbs_table[val];
   3830 
   3831 	return rv;
   3832 }
   3833 
   3834 /*
   3835  * wm_reset_phy:
   3836  *
   3837  *	generic PHY reset function.
   3838  *	Same as e1000_phy_hw_reset_generic()
   3839  */
   3840 static void
   3841 wm_reset_phy(struct wm_softc *sc)
   3842 {
   3843 	uint32_t reg;
   3844 
   3845 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3846 		device_xname(sc->sc_dev), __func__));
   3847 	if (wm_phy_resetisblocked(sc))
   3848 		return;
   3849 
   3850 	sc->phy.acquire(sc);
   3851 
   3852 	reg = CSR_READ(sc, WMREG_CTRL);
   3853 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3854 	CSR_WRITE_FLUSH(sc);
   3855 
   3856 	delay(sc->phy.reset_delay_us);
   3857 
   3858 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3859 	CSR_WRITE_FLUSH(sc);
   3860 
   3861 	delay(150);
   3862 
   3863 	sc->phy.release(sc);
   3864 
   3865 	wm_get_cfg_done(sc);
   3866 }
   3867 
   3868 static void
   3869 wm_flush_desc_rings(struct wm_softc *sc)
   3870 {
   3871 	pcireg_t preg;
   3872 	uint32_t reg;
   3873 	int nexttx;
   3874 
   3875 	/* First, disable MULR fix in FEXTNVM11 */
   3876 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3877 	reg |= FEXTNVM11_DIS_MULRFIX;
   3878 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3879 
   3880 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3881 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3882 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3883 		struct wm_txqueue *txq;
   3884 		wiseman_txdesc_t *txd;
   3885 
   3886 		/* TX */
   3887 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3888 		    device_xname(sc->sc_dev), preg, reg);
   3889 		reg = CSR_READ(sc, WMREG_TCTL);
   3890 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3891 
   3892 		txq = &sc->sc_queue[0].wmq_txq;
   3893 		nexttx = txq->txq_next;
   3894 		txd = &txq->txq_descs[nexttx];
   3895 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3896 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3897 		txd->wtx_fields.wtxu_status = 0;
   3898 		txd->wtx_fields.wtxu_options = 0;
   3899 		txd->wtx_fields.wtxu_vlan = 0;
   3900 
   3901 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3902 			BUS_SPACE_BARRIER_WRITE);
   3903 
   3904 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3905 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3906 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3907 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3908 		delay(250);
   3909 	}
   3910 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3911 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3912 		uint32_t rctl;
   3913 
   3914 		/* RX */
   3915 		printf("%s: Need RX flush (reg = %08x)\n",
   3916 		    device_xname(sc->sc_dev), preg);
   3917 		rctl = CSR_READ(sc, WMREG_RCTL);
   3918 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3919 		CSR_WRITE_FLUSH(sc);
   3920 		delay(150);
   3921 
   3922 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3923 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3924 		reg &= 0xffffc000;
   3925 		/*
   3926 		 * update thresholds: prefetch threshold to 31, host threshold
   3927 		 * to 1 and make sure the granularity is "descriptors" and not
   3928 		 * "cache lines"
   3929 		 */
   3930 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   3931 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   3932 
   3933 		/*
   3934 		 * momentarily enable the RX ring for the changes to take
   3935 		 * effect
   3936 		 */
   3937 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   3938 		CSR_WRITE_FLUSH(sc);
   3939 		delay(150);
   3940 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3941 	}
   3942 }
   3943 
   3944 /*
   3945  * wm_reset:
   3946  *
   3947  *	Reset the i82542 chip.
   3948  */
   3949 static void
   3950 wm_reset(struct wm_softc *sc)
   3951 {
   3952 	int phy_reset = 0;
   3953 	int i, error = 0;
   3954 	uint32_t reg;
   3955 
   3956 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3957 		device_xname(sc->sc_dev), __func__));
   3958 	KASSERT(sc->sc_type != 0);
   3959 
   3960 	/*
   3961 	 * Allocate on-chip memory according to the MTU size.
   3962 	 * The Packet Buffer Allocation register must be written
   3963 	 * before the chip is reset.
   3964 	 */
   3965 	switch (sc->sc_type) {
   3966 	case WM_T_82547:
   3967 	case WM_T_82547_2:
   3968 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3969 		    PBA_22K : PBA_30K;
   3970 		for (i = 0; i < sc->sc_nqueues; i++) {
   3971 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3972 			txq->txq_fifo_head = 0;
   3973 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3974 			txq->txq_fifo_size =
   3975 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3976 			txq->txq_fifo_stall = 0;
   3977 		}
   3978 		break;
   3979 	case WM_T_82571:
   3980 	case WM_T_82572:
   3981 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3982 	case WM_T_80003:
   3983 		sc->sc_pba = PBA_32K;
   3984 		break;
   3985 	case WM_T_82573:
   3986 		sc->sc_pba = PBA_12K;
   3987 		break;
   3988 	case WM_T_82574:
   3989 	case WM_T_82583:
   3990 		sc->sc_pba = PBA_20K;
   3991 		break;
   3992 	case WM_T_82576:
   3993 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3994 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3995 		break;
   3996 	case WM_T_82580:
   3997 	case WM_T_I350:
   3998 	case WM_T_I354:
   3999 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4000 		break;
   4001 	case WM_T_I210:
   4002 	case WM_T_I211:
   4003 		sc->sc_pba = PBA_34K;
   4004 		break;
   4005 	case WM_T_ICH8:
   4006 		/* Workaround for a bit corruption issue in FIFO memory */
   4007 		sc->sc_pba = PBA_8K;
   4008 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4009 		break;
   4010 	case WM_T_ICH9:
   4011 	case WM_T_ICH10:
   4012 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4013 		    PBA_14K : PBA_10K;
   4014 		break;
   4015 	case WM_T_PCH:
   4016 	case WM_T_PCH2:
   4017 	case WM_T_PCH_LPT:
   4018 	case WM_T_PCH_SPT:
   4019 		sc->sc_pba = PBA_26K;
   4020 		break;
   4021 	default:
   4022 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4023 		    PBA_40K : PBA_48K;
   4024 		break;
   4025 	}
   4026 	/*
   4027 	 * Only old or non-multiqueue devices have the PBA register
   4028 	 * XXX Need special handling for 82575.
   4029 	 */
   4030 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4031 	    || (sc->sc_type == WM_T_82575))
   4032 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4033 
   4034 	/* Prevent the PCI-E bus from sticking */
   4035 	if (sc->sc_flags & WM_F_PCIE) {
   4036 		int timeout = 800;
   4037 
   4038 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4039 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4040 
   4041 		while (timeout--) {
   4042 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4043 			    == 0)
   4044 				break;
   4045 			delay(100);
   4046 		}
   4047 	}
   4048 
   4049 	/* Set the completion timeout for interface */
   4050 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4051 	    || (sc->sc_type == WM_T_82580)
   4052 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4053 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4054 		wm_set_pcie_completion_timeout(sc);
   4055 
   4056 	/* Clear interrupt */
   4057 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4058 	if (wm_is_using_msix(sc)) {
   4059 		if (sc->sc_type != WM_T_82574) {
   4060 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4061 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4062 		} else {
   4063 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4064 		}
   4065 	}
   4066 
   4067 	/* Stop the transmit and receive processes. */
   4068 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4069 	sc->sc_rctl &= ~RCTL_EN;
   4070 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4071 	CSR_WRITE_FLUSH(sc);
   4072 
   4073 	/* XXX set_tbi_sbp_82543() */
   4074 
   4075 	delay(10*1000);
   4076 
   4077 	/* Must acquire the MDIO ownership before MAC reset */
   4078 	switch (sc->sc_type) {
   4079 	case WM_T_82573:
   4080 	case WM_T_82574:
   4081 	case WM_T_82583:
   4082 		error = wm_get_hw_semaphore_82573(sc);
   4083 		break;
   4084 	default:
   4085 		break;
   4086 	}
   4087 
   4088 	/*
   4089 	 * 82541 Errata 29? & 82547 Errata 28?
   4090 	 * See also the description about PHY_RST bit in CTRL register
   4091 	 * in 8254x_GBe_SDM.pdf.
   4092 	 */
   4093 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4094 		CSR_WRITE(sc, WMREG_CTRL,
   4095 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4096 		CSR_WRITE_FLUSH(sc);
   4097 		delay(5000);
   4098 	}
   4099 
   4100 	switch (sc->sc_type) {
   4101 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4102 	case WM_T_82541:
   4103 	case WM_T_82541_2:
   4104 	case WM_T_82547:
   4105 	case WM_T_82547_2:
   4106 		/*
   4107 		 * On some chipsets, a reset through a memory-mapped write
   4108 		 * cycle can cause the chip to reset before completing the
   4109 		 * write cycle.  This causes major headache that can be
   4110 		 * avoided by issuing the reset via indirect register writes
   4111 		 * through I/O space.
   4112 		 *
   4113 		 * So, if we successfully mapped the I/O BAR at attach time,
   4114 		 * use that.  Otherwise, try our luck with a memory-mapped
   4115 		 * reset.
   4116 		 */
   4117 		if (sc->sc_flags & WM_F_IOH_VALID)
   4118 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4119 		else
   4120 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4121 		break;
   4122 	case WM_T_82545_3:
   4123 	case WM_T_82546_3:
   4124 		/* Use the shadow control register on these chips. */
   4125 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4126 		break;
   4127 	case WM_T_80003:
   4128 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4129 		sc->phy.acquire(sc);
   4130 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4131 		sc->phy.release(sc);
   4132 		break;
   4133 	case WM_T_ICH8:
   4134 	case WM_T_ICH9:
   4135 	case WM_T_ICH10:
   4136 	case WM_T_PCH:
   4137 	case WM_T_PCH2:
   4138 	case WM_T_PCH_LPT:
   4139 	case WM_T_PCH_SPT:
   4140 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4141 		if (wm_phy_resetisblocked(sc) == false) {
   4142 			/*
   4143 			 * Gate automatic PHY configuration by hardware on
   4144 			 * non-managed 82579
   4145 			 */
   4146 			if ((sc->sc_type == WM_T_PCH2)
   4147 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4148 				== 0))
   4149 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4150 
   4151 			reg |= CTRL_PHY_RESET;
   4152 			phy_reset = 1;
   4153 		} else
   4154 			printf("XXX reset is blocked!!!\n");
   4155 		sc->phy.acquire(sc);
   4156 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4157 		/* Don't insert a completion barrier when reset */
   4158 		delay(20*1000);
   4159 		mutex_exit(sc->sc_ich_phymtx);
   4160 		break;
   4161 	case WM_T_82580:
   4162 	case WM_T_I350:
   4163 	case WM_T_I354:
   4164 	case WM_T_I210:
   4165 	case WM_T_I211:
   4166 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4167 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4168 			CSR_WRITE_FLUSH(sc);
   4169 		delay(5000);
   4170 		break;
   4171 	case WM_T_82542_2_0:
   4172 	case WM_T_82542_2_1:
   4173 	case WM_T_82543:
   4174 	case WM_T_82540:
   4175 	case WM_T_82545:
   4176 	case WM_T_82546:
   4177 	case WM_T_82571:
   4178 	case WM_T_82572:
   4179 	case WM_T_82573:
   4180 	case WM_T_82574:
   4181 	case WM_T_82575:
   4182 	case WM_T_82576:
   4183 	case WM_T_82583:
   4184 	default:
   4185 		/* Everything else can safely use the documented method. */
   4186 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4187 		break;
   4188 	}
   4189 
   4190 	/* Must release the MDIO ownership after MAC reset */
   4191 	switch (sc->sc_type) {
   4192 	case WM_T_82573:
   4193 	case WM_T_82574:
   4194 	case WM_T_82583:
   4195 		if (error == 0)
   4196 			wm_put_hw_semaphore_82573(sc);
   4197 		break;
   4198 	default:
   4199 		break;
   4200 	}
   4201 
   4202 	if (phy_reset != 0)
   4203 		wm_get_cfg_done(sc);
   4204 
   4205 	/* reload EEPROM */
   4206 	switch (sc->sc_type) {
   4207 	case WM_T_82542_2_0:
   4208 	case WM_T_82542_2_1:
   4209 	case WM_T_82543:
   4210 	case WM_T_82544:
   4211 		delay(10);
   4212 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4213 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4214 		CSR_WRITE_FLUSH(sc);
   4215 		delay(2000);
   4216 		break;
   4217 	case WM_T_82540:
   4218 	case WM_T_82545:
   4219 	case WM_T_82545_3:
   4220 	case WM_T_82546:
   4221 	case WM_T_82546_3:
   4222 		delay(5*1000);
   4223 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4224 		break;
   4225 	case WM_T_82541:
   4226 	case WM_T_82541_2:
   4227 	case WM_T_82547:
   4228 	case WM_T_82547_2:
   4229 		delay(20000);
   4230 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4231 		break;
   4232 	case WM_T_82571:
   4233 	case WM_T_82572:
   4234 	case WM_T_82573:
   4235 	case WM_T_82574:
   4236 	case WM_T_82583:
   4237 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4238 			delay(10);
   4239 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4240 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4241 			CSR_WRITE_FLUSH(sc);
   4242 		}
   4243 		/* check EECD_EE_AUTORD */
   4244 		wm_get_auto_rd_done(sc);
   4245 		/*
   4246 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4247 		 * is set.
   4248 		 */
   4249 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4250 		    || (sc->sc_type == WM_T_82583))
   4251 			delay(25*1000);
   4252 		break;
   4253 	case WM_T_82575:
   4254 	case WM_T_82576:
   4255 	case WM_T_82580:
   4256 	case WM_T_I350:
   4257 	case WM_T_I354:
   4258 	case WM_T_I210:
   4259 	case WM_T_I211:
   4260 	case WM_T_80003:
   4261 		/* check EECD_EE_AUTORD */
   4262 		wm_get_auto_rd_done(sc);
   4263 		break;
   4264 	case WM_T_ICH8:
   4265 	case WM_T_ICH9:
   4266 	case WM_T_ICH10:
   4267 	case WM_T_PCH:
   4268 	case WM_T_PCH2:
   4269 	case WM_T_PCH_LPT:
   4270 	case WM_T_PCH_SPT:
   4271 		break;
   4272 	default:
   4273 		panic("%s: unknown type\n", __func__);
   4274 	}
   4275 
   4276 	/* Check whether EEPROM is present or not */
   4277 	switch (sc->sc_type) {
   4278 	case WM_T_82575:
   4279 	case WM_T_82576:
   4280 	case WM_T_82580:
   4281 	case WM_T_I350:
   4282 	case WM_T_I354:
   4283 	case WM_T_ICH8:
   4284 	case WM_T_ICH9:
   4285 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4286 			/* Not found */
   4287 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4288 			if (sc->sc_type == WM_T_82575)
   4289 				wm_reset_init_script_82575(sc);
   4290 		}
   4291 		break;
   4292 	default:
   4293 		break;
   4294 	}
   4295 
   4296 	if ((sc->sc_type == WM_T_82580)
   4297 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4298 		/* clear global device reset status bit */
   4299 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4300 	}
   4301 
   4302 	/* Clear any pending interrupt events. */
   4303 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4304 	reg = CSR_READ(sc, WMREG_ICR);
   4305 	if (wm_is_using_msix(sc)) {
   4306 		if (sc->sc_type != WM_T_82574) {
   4307 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4308 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4309 		} else
   4310 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4311 	}
   4312 
   4313 	/* reload sc_ctrl */
   4314 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4315 
   4316 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4317 		wm_set_eee_i350(sc);
   4318 
   4319 	/* Clear the host wakeup bit after lcd reset */
   4320 	if (sc->sc_type >= WM_T_PCH) {
   4321 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4322 		    BM_PORT_GEN_CFG);
   4323 		reg &= ~BM_WUC_HOST_WU_BIT;
   4324 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4325 		    BM_PORT_GEN_CFG, reg);
   4326 	}
   4327 
   4328 	/*
   4329 	 * For PCH, this write will make sure that any noise will be detected
   4330 	 * as a CRC error and be dropped rather than show up as a bad packet
   4331 	 * to the DMA engine
   4332 	 */
   4333 	if (sc->sc_type == WM_T_PCH)
   4334 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4335 
   4336 	if (sc->sc_type >= WM_T_82544)
   4337 		CSR_WRITE(sc, WMREG_WUC, 0);
   4338 
   4339 	wm_reset_mdicnfg_82580(sc);
   4340 
   4341 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4342 		wm_pll_workaround_i210(sc);
   4343 }
   4344 
   4345 /*
   4346  * wm_add_rxbuf:
   4347  *
   4348  *	Add a receive buffer to the indiciated descriptor.
   4349  */
   4350 static int
   4351 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4352 {
   4353 	struct wm_softc *sc = rxq->rxq_sc;
   4354 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4355 	struct mbuf *m;
   4356 	int error;
   4357 
   4358 	KASSERT(mutex_owned(rxq->rxq_lock));
   4359 
   4360 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4361 	if (m == NULL)
   4362 		return ENOBUFS;
   4363 
   4364 	MCLGET(m, M_DONTWAIT);
   4365 	if ((m->m_flags & M_EXT) == 0) {
   4366 		m_freem(m);
   4367 		return ENOBUFS;
   4368 	}
   4369 
   4370 	if (rxs->rxs_mbuf != NULL)
   4371 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4372 
   4373 	rxs->rxs_mbuf = m;
   4374 
   4375 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4376 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4377 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4378 	if (error) {
   4379 		/* XXX XXX XXX */
   4380 		aprint_error_dev(sc->sc_dev,
   4381 		    "unable to load rx DMA map %d, error = %d\n",
   4382 		    idx, error);
   4383 		panic("wm_add_rxbuf");
   4384 	}
   4385 
   4386 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4387 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4388 
   4389 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4390 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4391 			wm_init_rxdesc(rxq, idx);
   4392 	} else
   4393 		wm_init_rxdesc(rxq, idx);
   4394 
   4395 	return 0;
   4396 }
   4397 
   4398 /*
   4399  * wm_rxdrain:
   4400  *
   4401  *	Drain the receive queue.
   4402  */
   4403 static void
   4404 wm_rxdrain(struct wm_rxqueue *rxq)
   4405 {
   4406 	struct wm_softc *sc = rxq->rxq_sc;
   4407 	struct wm_rxsoft *rxs;
   4408 	int i;
   4409 
   4410 	KASSERT(mutex_owned(rxq->rxq_lock));
   4411 
   4412 	for (i = 0; i < WM_NRXDESC; i++) {
   4413 		rxs = &rxq->rxq_soft[i];
   4414 		if (rxs->rxs_mbuf != NULL) {
   4415 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4416 			m_freem(rxs->rxs_mbuf);
   4417 			rxs->rxs_mbuf = NULL;
   4418 		}
   4419 	}
   4420 }
   4421 
   4422 
   4423 /*
   4424  * XXX copy from FreeBSD's sys/net/rss_config.c
   4425  */
   4426 /*
   4427  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4428  * effectiveness may be limited by algorithm choice and available entropy
   4429  * during the boot.
   4430  *
   4431  * XXXRW: And that we don't randomize it yet!
   4432  *
   4433  * This is the default Microsoft RSS specification key which is also
   4434  * the Chelsio T5 firmware default key.
   4435  */
   4436 #define RSS_KEYSIZE 40
   4437 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4438 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4439 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4440 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4441 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4442 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4443 };
   4444 
   4445 /*
   4446  * Caller must pass an array of size sizeof(rss_key).
   4447  *
   4448  * XXX
   4449  * As if_ixgbe may use this function, this function should not be
   4450  * if_wm specific function.
   4451  */
   4452 static void
   4453 wm_rss_getkey(uint8_t *key)
   4454 {
   4455 
   4456 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4457 }
   4458 
   4459 /*
   4460  * Setup registers for RSS.
   4461  *
   4462  * XXX not yet VMDq support
   4463  */
   4464 static void
   4465 wm_init_rss(struct wm_softc *sc)
   4466 {
   4467 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4468 	int i;
   4469 
   4470 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4471 
   4472 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4473 		int qid, reta_ent;
   4474 
   4475 		qid  = i % sc->sc_nqueues;
   4476 		switch(sc->sc_type) {
   4477 		case WM_T_82574:
   4478 			reta_ent = __SHIFTIN(qid,
   4479 			    RETA_ENT_QINDEX_MASK_82574);
   4480 			break;
   4481 		case WM_T_82575:
   4482 			reta_ent = __SHIFTIN(qid,
   4483 			    RETA_ENT_QINDEX1_MASK_82575);
   4484 			break;
   4485 		default:
   4486 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4487 			break;
   4488 		}
   4489 
   4490 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4491 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4492 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4493 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4494 	}
   4495 
   4496 	wm_rss_getkey((uint8_t *)rss_key);
   4497 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4498 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4499 
   4500 	if (sc->sc_type == WM_T_82574)
   4501 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4502 	else
   4503 		mrqc = MRQC_ENABLE_RSS_MQ;
   4504 
   4505 	/*
   4506 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4507 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4508 	 */
   4509 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4510 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4511 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4512 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4513 
   4514 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4515 }
   4516 
   4517 /*
   4518  * Adjust TX and RX queue numbers which the system actulally uses.
   4519  *
   4520  * The numbers are affected by below parameters.
   4521  *     - The nubmer of hardware queues
   4522  *     - The number of MSI-X vectors (= "nvectors" argument)
   4523  *     - ncpu
   4524  */
   4525 static void
   4526 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4527 {
   4528 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4529 
   4530 	if (nvectors < 2) {
   4531 		sc->sc_nqueues = 1;
   4532 		return;
   4533 	}
   4534 
   4535 	switch(sc->sc_type) {
   4536 	case WM_T_82572:
   4537 		hw_ntxqueues = 2;
   4538 		hw_nrxqueues = 2;
   4539 		break;
   4540 	case WM_T_82574:
   4541 		hw_ntxqueues = 2;
   4542 		hw_nrxqueues = 2;
   4543 		break;
   4544 	case WM_T_82575:
   4545 		hw_ntxqueues = 4;
   4546 		hw_nrxqueues = 4;
   4547 		break;
   4548 	case WM_T_82576:
   4549 		hw_ntxqueues = 16;
   4550 		hw_nrxqueues = 16;
   4551 		break;
   4552 	case WM_T_82580:
   4553 	case WM_T_I350:
   4554 	case WM_T_I354:
   4555 		hw_ntxqueues = 8;
   4556 		hw_nrxqueues = 8;
   4557 		break;
   4558 	case WM_T_I210:
   4559 		hw_ntxqueues = 4;
   4560 		hw_nrxqueues = 4;
   4561 		break;
   4562 	case WM_T_I211:
   4563 		hw_ntxqueues = 2;
   4564 		hw_nrxqueues = 2;
   4565 		break;
   4566 		/*
   4567 		 * As below ethernet controllers does not support MSI-X,
   4568 		 * this driver let them not use multiqueue.
   4569 		 *     - WM_T_80003
   4570 		 *     - WM_T_ICH8
   4571 		 *     - WM_T_ICH9
   4572 		 *     - WM_T_ICH10
   4573 		 *     - WM_T_PCH
   4574 		 *     - WM_T_PCH2
   4575 		 *     - WM_T_PCH_LPT
   4576 		 */
   4577 	default:
   4578 		hw_ntxqueues = 1;
   4579 		hw_nrxqueues = 1;
   4580 		break;
   4581 	}
   4582 
   4583 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4584 
   4585 	/*
   4586 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4587 	 * the number of queues used actually.
   4588 	 */
   4589 	if (nvectors < hw_nqueues + 1) {
   4590 		sc->sc_nqueues = nvectors - 1;
   4591 	} else {
   4592 		sc->sc_nqueues = hw_nqueues;
   4593 	}
   4594 
   4595 	/*
   4596 	 * As queues more then cpus cannot improve scaling, we limit
   4597 	 * the number of queues used actually.
   4598 	 */
   4599 	if (ncpu < sc->sc_nqueues)
   4600 		sc->sc_nqueues = ncpu;
   4601 }
   4602 
   4603 static inline bool
   4604 wm_is_using_msix(struct wm_softc *sc)
   4605 {
   4606 
   4607 	return (sc->sc_nintrs > 1);
   4608 }
   4609 
   4610 static inline bool
   4611 wm_is_using_multiqueue(struct wm_softc *sc)
   4612 {
   4613 
   4614 	return (sc->sc_nqueues > 1);
   4615 }
   4616 
   4617 static int
   4618 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4619 {
   4620 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4621 	wmq->wmq_id = qidx;
   4622 	wmq->wmq_intr_idx = intr_idx;
   4623 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4624 #ifdef WM_MPSAFE
   4625 	    | SOFTINT_MPSAFE
   4626 #endif
   4627 	    , wm_handle_queue, wmq);
   4628 	if (wmq->wmq_si != NULL)
   4629 		return 0;
   4630 
   4631 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4632 	    wmq->wmq_id);
   4633 
   4634 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4635 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4636 	return ENOMEM;
   4637 }
   4638 
   4639 /*
   4640  * Both single interrupt MSI and INTx can use this function.
   4641  */
   4642 static int
   4643 wm_setup_legacy(struct wm_softc *sc)
   4644 {
   4645 	pci_chipset_tag_t pc = sc->sc_pc;
   4646 	const char *intrstr = NULL;
   4647 	char intrbuf[PCI_INTRSTR_LEN];
   4648 	int error;
   4649 
   4650 	error = wm_alloc_txrx_queues(sc);
   4651 	if (error) {
   4652 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4653 		    error);
   4654 		return ENOMEM;
   4655 	}
   4656 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4657 	    sizeof(intrbuf));
   4658 #ifdef WM_MPSAFE
   4659 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4660 #endif
   4661 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4662 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4663 	if (sc->sc_ihs[0] == NULL) {
   4664 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4665 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4666 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4667 		return ENOMEM;
   4668 	}
   4669 
   4670 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4671 	sc->sc_nintrs = 1;
   4672 
   4673 	return wm_softint_establish(sc, 0, 0);
   4674 }
   4675 
   4676 static int
   4677 wm_setup_msix(struct wm_softc *sc)
   4678 {
   4679 	void *vih;
   4680 	kcpuset_t *affinity;
   4681 	int qidx, error, intr_idx, txrx_established;
   4682 	pci_chipset_tag_t pc = sc->sc_pc;
   4683 	const char *intrstr = NULL;
   4684 	char intrbuf[PCI_INTRSTR_LEN];
   4685 	char intr_xname[INTRDEVNAMEBUF];
   4686 
   4687 	if (sc->sc_nqueues < ncpu) {
   4688 		/*
   4689 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4690 		 * interrupts start from CPU#1.
   4691 		 */
   4692 		sc->sc_affinity_offset = 1;
   4693 	} else {
   4694 		/*
   4695 		 * In this case, this device use all CPUs. So, we unify
   4696 		 * affinitied cpu_index to msix vector number for readability.
   4697 		 */
   4698 		sc->sc_affinity_offset = 0;
   4699 	}
   4700 
   4701 	error = wm_alloc_txrx_queues(sc);
   4702 	if (error) {
   4703 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4704 		    error);
   4705 		return ENOMEM;
   4706 	}
   4707 
   4708 	kcpuset_create(&affinity, false);
   4709 	intr_idx = 0;
   4710 
   4711 	/*
   4712 	 * TX and RX
   4713 	 */
   4714 	txrx_established = 0;
   4715 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4716 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4717 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4718 
   4719 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4720 		    sizeof(intrbuf));
   4721 #ifdef WM_MPSAFE
   4722 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4723 		    PCI_INTR_MPSAFE, true);
   4724 #endif
   4725 		memset(intr_xname, 0, sizeof(intr_xname));
   4726 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4727 		    device_xname(sc->sc_dev), qidx);
   4728 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4729 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4730 		if (vih == NULL) {
   4731 			aprint_error_dev(sc->sc_dev,
   4732 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4733 			    intrstr ? " at " : "",
   4734 			    intrstr ? intrstr : "");
   4735 
   4736 			goto fail;
   4737 		}
   4738 		kcpuset_zero(affinity);
   4739 		/* Round-robin affinity */
   4740 		kcpuset_set(affinity, affinity_to);
   4741 		error = interrupt_distribute(vih, affinity, NULL);
   4742 		if (error == 0) {
   4743 			aprint_normal_dev(sc->sc_dev,
   4744 			    "for TX and RX interrupting at %s affinity to %u\n",
   4745 			    intrstr, affinity_to);
   4746 		} else {
   4747 			aprint_normal_dev(sc->sc_dev,
   4748 			    "for TX and RX interrupting at %s\n", intrstr);
   4749 		}
   4750 		sc->sc_ihs[intr_idx] = vih;
   4751 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   4752 			goto fail;
   4753 		txrx_established++;
   4754 		intr_idx++;
   4755 	}
   4756 
   4757 	/*
   4758 	 * LINK
   4759 	 */
   4760 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4761 	    sizeof(intrbuf));
   4762 #ifdef WM_MPSAFE
   4763 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4764 #endif
   4765 	memset(intr_xname, 0, sizeof(intr_xname));
   4766 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4767 	    device_xname(sc->sc_dev));
   4768 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4769 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4770 	if (vih == NULL) {
   4771 		aprint_error_dev(sc->sc_dev,
   4772 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4773 		    intrstr ? " at " : "",
   4774 		    intrstr ? intrstr : "");
   4775 
   4776 		goto fail;
   4777 	}
   4778 	/* keep default affinity to LINK interrupt */
   4779 	aprint_normal_dev(sc->sc_dev,
   4780 	    "for LINK interrupting at %s\n", intrstr);
   4781 	sc->sc_ihs[intr_idx] = vih;
   4782 	sc->sc_link_intr_idx = intr_idx;
   4783 
   4784 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4785 	kcpuset_destroy(affinity);
   4786 	return 0;
   4787 
   4788  fail:
   4789 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4790 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4791 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4792 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4793 	}
   4794 
   4795 	kcpuset_destroy(affinity);
   4796 	return ENOMEM;
   4797 }
   4798 
   4799 static void
   4800 wm_turnon(struct wm_softc *sc)
   4801 {
   4802 	int i;
   4803 
   4804 	KASSERT(WM_CORE_LOCKED(sc));
   4805 
   4806 	/*
   4807 	 * must unset stopping flags in ascending order.
   4808 	 */
   4809 	for(i = 0; i < sc->sc_nqueues; i++) {
   4810 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4811 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4812 
   4813 		mutex_enter(txq->txq_lock);
   4814 		txq->txq_stopping = false;
   4815 		mutex_exit(txq->txq_lock);
   4816 
   4817 		mutex_enter(rxq->rxq_lock);
   4818 		rxq->rxq_stopping = false;
   4819 		mutex_exit(rxq->rxq_lock);
   4820 	}
   4821 
   4822 	sc->sc_core_stopping = false;
   4823 }
   4824 
   4825 static void
   4826 wm_turnoff(struct wm_softc *sc)
   4827 {
   4828 	int i;
   4829 
   4830 	KASSERT(WM_CORE_LOCKED(sc));
   4831 
   4832 	sc->sc_core_stopping = true;
   4833 
   4834 	/*
   4835 	 * must set stopping flags in ascending order.
   4836 	 */
   4837 	for(i = 0; i < sc->sc_nqueues; i++) {
   4838 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4839 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4840 
   4841 		mutex_enter(rxq->rxq_lock);
   4842 		rxq->rxq_stopping = true;
   4843 		mutex_exit(rxq->rxq_lock);
   4844 
   4845 		mutex_enter(txq->txq_lock);
   4846 		txq->txq_stopping = true;
   4847 		mutex_exit(txq->txq_lock);
   4848 	}
   4849 }
   4850 
   4851 /*
   4852  * write interrupt interval value to ITR or EITR
   4853  */
   4854 static void
   4855 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   4856 {
   4857 
   4858 	if (!wmq->wmq_set_itr)
   4859 		return;
   4860 
   4861 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4862 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   4863 
   4864 		/*
   4865 		 * 82575 doesn't have CNT_INGR field.
   4866 		 * So, overwrite counter field by software.
   4867 		 */
   4868 		if (sc->sc_type == WM_T_82575)
   4869 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   4870 		else
   4871 			eitr |= EITR_CNT_INGR;
   4872 
   4873 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   4874 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   4875 		/*
   4876 		 * 82574 has both ITR and EITR. SET EITR when we use
   4877 		 * the multi queue function with MSI-X.
   4878 		 */
   4879 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   4880 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   4881 	} else {
   4882 		KASSERT(wmq->wmq_id == 0);
   4883 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   4884 	}
   4885 
   4886 	wmq->wmq_set_itr = false;
   4887 }
   4888 
   4889 /*
   4890  * TODO
   4891  * Below dynamic calculation of itr is almost the same as linux igb,
   4892  * however it does not fit to wm(4). So, we will have been disable AIM
   4893  * until we will find appropriate calculation of itr.
   4894  */
   4895 /*
   4896  * calculate interrupt interval value to be going to write register in
   4897  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   4898  */
   4899 static void
   4900 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   4901 {
   4902 #ifdef NOTYET
   4903 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   4904 	struct wm_txqueue *txq = &wmq->wmq_txq;
   4905 	uint32_t avg_size = 0;
   4906 	uint32_t new_itr;
   4907 
   4908 	if (rxq->rxq_packets)
   4909 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   4910 	if (txq->txq_packets)
   4911 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   4912 
   4913 	if (avg_size == 0) {
   4914 		new_itr = 450; /* restore default value */
   4915 		goto out;
   4916 	}
   4917 
   4918 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   4919 	avg_size += 24;
   4920 
   4921 	/* Don't starve jumbo frames */
   4922 	avg_size = min(avg_size, 3000);
   4923 
   4924 	/* Give a little boost to mid-size frames */
   4925 	if ((avg_size > 300) && (avg_size < 1200))
   4926 		new_itr = avg_size / 3;
   4927 	else
   4928 		new_itr = avg_size / 2;
   4929 
   4930 out:
   4931 	/*
   4932 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   4933 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   4934 	 */
   4935 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   4936 		new_itr *= 4;
   4937 
   4938 	if (new_itr != wmq->wmq_itr) {
   4939 		wmq->wmq_itr = new_itr;
   4940 		wmq->wmq_set_itr = true;
   4941 	} else
   4942 		wmq->wmq_set_itr = false;
   4943 
   4944 	rxq->rxq_packets = 0;
   4945 	rxq->rxq_bytes = 0;
   4946 	txq->txq_packets = 0;
   4947 	txq->txq_bytes = 0;
   4948 #endif
   4949 }
   4950 
   4951 /*
   4952  * wm_init:		[ifnet interface function]
   4953  *
   4954  *	Initialize the interface.
   4955  */
   4956 static int
   4957 wm_init(struct ifnet *ifp)
   4958 {
   4959 	struct wm_softc *sc = ifp->if_softc;
   4960 	int ret;
   4961 
   4962 	WM_CORE_LOCK(sc);
   4963 	ret = wm_init_locked(ifp);
   4964 	WM_CORE_UNLOCK(sc);
   4965 
   4966 	return ret;
   4967 }
   4968 
   4969 static int
   4970 wm_init_locked(struct ifnet *ifp)
   4971 {
   4972 	struct wm_softc *sc = ifp->if_softc;
   4973 	int i, j, trynum, error = 0;
   4974 	uint32_t reg;
   4975 
   4976 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4977 		device_xname(sc->sc_dev), __func__));
   4978 	KASSERT(WM_CORE_LOCKED(sc));
   4979 
   4980 	/*
   4981 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4982 	 * There is a small but measurable benefit to avoiding the adjusment
   4983 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4984 	 * on such platforms.  One possibility is that the DMA itself is
   4985 	 * slightly more efficient if the front of the entire packet (instead
   4986 	 * of the front of the headers) is aligned.
   4987 	 *
   4988 	 * Note we must always set align_tweak to 0 if we are using
   4989 	 * jumbo frames.
   4990 	 */
   4991 #ifdef __NO_STRICT_ALIGNMENT
   4992 	sc->sc_align_tweak = 0;
   4993 #else
   4994 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4995 		sc->sc_align_tweak = 0;
   4996 	else
   4997 		sc->sc_align_tweak = 2;
   4998 #endif /* __NO_STRICT_ALIGNMENT */
   4999 
   5000 	/* Cancel any pending I/O. */
   5001 	wm_stop_locked(ifp, 0);
   5002 
   5003 	/* update statistics before reset */
   5004 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5005 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5006 
   5007 	/* PCH_SPT hardware workaround */
   5008 	if (sc->sc_type == WM_T_PCH_SPT)
   5009 		wm_flush_desc_rings(sc);
   5010 
   5011 	/* Reset the chip to a known state. */
   5012 	wm_reset(sc);
   5013 
   5014 	/* AMT based hardware can now take control from firmware */
   5015 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5016 		wm_get_hw_control(sc);
   5017 
   5018 	/* Init hardware bits */
   5019 	wm_initialize_hardware_bits(sc);
   5020 
   5021 	/* Reset the PHY. */
   5022 	if (sc->sc_flags & WM_F_HAS_MII)
   5023 		wm_gmii_reset(sc);
   5024 
   5025 	/* Calculate (E)ITR value */
   5026 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5027 		/*
   5028 		 * For NEWQUEUE's EITR (except for 82575).
   5029 		 * 82575's EITR should be set same throttling value as other
   5030 		 * old controllers' ITR because the interrupt/sec calculation
   5031 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5032 		 *
   5033 		 * 82574's EITR should be set same throttling value as ITR.
   5034 		 *
   5035 		 * For N interrupts/sec, set this value to:
   5036 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5037 		 */
   5038 		sc->sc_itr_init = 450;
   5039 	} else if (sc->sc_type >= WM_T_82543) {
   5040 		/*
   5041 		 * Set up the interrupt throttling register (units of 256ns)
   5042 		 * Note that a footnote in Intel's documentation says this
   5043 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5044 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5045 		 * that that is also true for the 1024ns units of the other
   5046 		 * interrupt-related timer registers -- so, really, we ought
   5047 		 * to divide this value by 4 when the link speed is low.
   5048 		 *
   5049 		 * XXX implement this division at link speed change!
   5050 		 */
   5051 
   5052 		/*
   5053 		 * For N interrupts/sec, set this value to:
   5054 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5055 		 * absolute and packet timer values to this value
   5056 		 * divided by 4 to get "simple timer" behavior.
   5057 		 */
   5058 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5059 	}
   5060 
   5061 	error = wm_init_txrx_queues(sc);
   5062 	if (error)
   5063 		goto out;
   5064 
   5065 	/*
   5066 	 * Clear out the VLAN table -- we don't use it (yet).
   5067 	 */
   5068 	CSR_WRITE(sc, WMREG_VET, 0);
   5069 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5070 		trynum = 10; /* Due to hw errata */
   5071 	else
   5072 		trynum = 1;
   5073 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5074 		for (j = 0; j < trynum; j++)
   5075 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5076 
   5077 	/*
   5078 	 * Set up flow-control parameters.
   5079 	 *
   5080 	 * XXX Values could probably stand some tuning.
   5081 	 */
   5082 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5083 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5084 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5085 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5086 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5087 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5088 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5089 	}
   5090 
   5091 	sc->sc_fcrtl = FCRTL_DFLT;
   5092 	if (sc->sc_type < WM_T_82543) {
   5093 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5094 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5095 	} else {
   5096 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5097 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5098 	}
   5099 
   5100 	if (sc->sc_type == WM_T_80003)
   5101 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5102 	else
   5103 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5104 
   5105 	/* Writes the control register. */
   5106 	wm_set_vlan(sc);
   5107 
   5108 	if (sc->sc_flags & WM_F_HAS_MII) {
   5109 		int val;
   5110 
   5111 		switch (sc->sc_type) {
   5112 		case WM_T_80003:
   5113 		case WM_T_ICH8:
   5114 		case WM_T_ICH9:
   5115 		case WM_T_ICH10:
   5116 		case WM_T_PCH:
   5117 		case WM_T_PCH2:
   5118 		case WM_T_PCH_LPT:
   5119 		case WM_T_PCH_SPT:
   5120 			/*
   5121 			 * Set the mac to wait the maximum time between each
   5122 			 * iteration and increase the max iterations when
   5123 			 * polling the phy; this fixes erroneous timeouts at
   5124 			 * 10Mbps.
   5125 			 */
   5126 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5127 			    0xFFFF);
   5128 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   5129 			val |= 0x3F;
   5130 			wm_kmrn_writereg(sc,
   5131 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   5132 			break;
   5133 		default:
   5134 			break;
   5135 		}
   5136 
   5137 		if (sc->sc_type == WM_T_80003) {
   5138 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   5139 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   5140 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   5141 
   5142 			/* Bypass RX and TX FIFO's */
   5143 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5144 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5145 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5146 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5147 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5148 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5149 		}
   5150 	}
   5151 #if 0
   5152 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5153 #endif
   5154 
   5155 	/* Set up checksum offload parameters. */
   5156 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5157 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5158 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5159 		reg |= RXCSUM_IPOFL;
   5160 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5161 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5162 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5163 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5164 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5165 
   5166 	/* Set registers about MSI-X */
   5167 	if (wm_is_using_msix(sc)) {
   5168 		uint32_t ivar;
   5169 		struct wm_queue *wmq;
   5170 		int qid, qintr_idx;
   5171 
   5172 		if (sc->sc_type == WM_T_82575) {
   5173 			/* Interrupt control */
   5174 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5175 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5176 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5177 
   5178 			/* TX and RX */
   5179 			for (i = 0; i < sc->sc_nqueues; i++) {
   5180 				wmq = &sc->sc_queue[i];
   5181 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5182 				    EITR_TX_QUEUE(wmq->wmq_id)
   5183 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5184 			}
   5185 			/* Link status */
   5186 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5187 			    EITR_OTHER);
   5188 		} else if (sc->sc_type == WM_T_82574) {
   5189 			/* Interrupt control */
   5190 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5191 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5192 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5193 
   5194 			/*
   5195 			 * workaround issue with spurious interrupts
   5196 			 * in MSI-X mode.
   5197 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5198 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5199 			 */
   5200 			reg = CSR_READ(sc, WMREG_RFCTL);
   5201 			reg |= WMREG_RFCTL_ACKDIS;
   5202 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5203 
   5204 			ivar = 0;
   5205 			/* TX and RX */
   5206 			for (i = 0; i < sc->sc_nqueues; i++) {
   5207 				wmq = &sc->sc_queue[i];
   5208 				qid = wmq->wmq_id;
   5209 				qintr_idx = wmq->wmq_intr_idx;
   5210 
   5211 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5212 				    IVAR_TX_MASK_Q_82574(qid));
   5213 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5214 				    IVAR_RX_MASK_Q_82574(qid));
   5215 			}
   5216 			/* Link status */
   5217 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5218 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5219 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5220 		} else {
   5221 			/* Interrupt control */
   5222 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5223 			    | GPIE_EIAME | GPIE_PBA);
   5224 
   5225 			switch (sc->sc_type) {
   5226 			case WM_T_82580:
   5227 			case WM_T_I350:
   5228 			case WM_T_I354:
   5229 			case WM_T_I210:
   5230 			case WM_T_I211:
   5231 				/* TX and RX */
   5232 				for (i = 0; i < sc->sc_nqueues; i++) {
   5233 					wmq = &sc->sc_queue[i];
   5234 					qid = wmq->wmq_id;
   5235 					qintr_idx = wmq->wmq_intr_idx;
   5236 
   5237 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5238 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5239 					ivar |= __SHIFTIN((qintr_idx
   5240 						| IVAR_VALID),
   5241 					    IVAR_TX_MASK_Q(qid));
   5242 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5243 					ivar |= __SHIFTIN((qintr_idx
   5244 						| IVAR_VALID),
   5245 					    IVAR_RX_MASK_Q(qid));
   5246 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5247 				}
   5248 				break;
   5249 			case WM_T_82576:
   5250 				/* TX and RX */
   5251 				for (i = 0; i < sc->sc_nqueues; i++) {
   5252 					wmq = &sc->sc_queue[i];
   5253 					qid = wmq->wmq_id;
   5254 					qintr_idx = wmq->wmq_intr_idx;
   5255 
   5256 					ivar = CSR_READ(sc,
   5257 					    WMREG_IVAR_Q_82576(qid));
   5258 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5259 					ivar |= __SHIFTIN((qintr_idx
   5260 						| IVAR_VALID),
   5261 					    IVAR_TX_MASK_Q_82576(qid));
   5262 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5263 					ivar |= __SHIFTIN((qintr_idx
   5264 						| IVAR_VALID),
   5265 					    IVAR_RX_MASK_Q_82576(qid));
   5266 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5267 					    ivar);
   5268 				}
   5269 				break;
   5270 			default:
   5271 				break;
   5272 			}
   5273 
   5274 			/* Link status */
   5275 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5276 			    IVAR_MISC_OTHER);
   5277 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5278 		}
   5279 
   5280 		if (wm_is_using_multiqueue(sc)) {
   5281 			wm_init_rss(sc);
   5282 
   5283 			/*
   5284 			** NOTE: Receive Full-Packet Checksum Offload
   5285 			** is mutually exclusive with Multiqueue. However
   5286 			** this is not the same as TCP/IP checksums which
   5287 			** still work.
   5288 			*/
   5289 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5290 			reg |= RXCSUM_PCSD;
   5291 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5292 		}
   5293 	}
   5294 
   5295 	/* Set up the interrupt registers. */
   5296 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5297 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5298 	    ICR_RXO | ICR_RXT0;
   5299 	if (wm_is_using_msix(sc)) {
   5300 		uint32_t mask;
   5301 		struct wm_queue *wmq;
   5302 
   5303 		switch (sc->sc_type) {
   5304 		case WM_T_82574:
   5305 			mask = 0;
   5306 			for (i = 0; i < sc->sc_nqueues; i++) {
   5307 				wmq = &sc->sc_queue[i];
   5308 				mask |= ICR_TXQ(wmq->wmq_id);
   5309 				mask |= ICR_RXQ(wmq->wmq_id);
   5310 			}
   5311 			mask |= ICR_OTHER;
   5312 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5313 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5314 			break;
   5315 		default:
   5316 			if (sc->sc_type == WM_T_82575) {
   5317 				mask = 0;
   5318 				for (i = 0; i < sc->sc_nqueues; i++) {
   5319 					wmq = &sc->sc_queue[i];
   5320 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5321 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5322 				}
   5323 				mask |= EITR_OTHER;
   5324 			} else {
   5325 				mask = 0;
   5326 				for (i = 0; i < sc->sc_nqueues; i++) {
   5327 					wmq = &sc->sc_queue[i];
   5328 					mask |= 1 << wmq->wmq_intr_idx;
   5329 				}
   5330 				mask |= 1 << sc->sc_link_intr_idx;
   5331 			}
   5332 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5333 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5334 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5335 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5336 			break;
   5337 		}
   5338 	} else
   5339 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5340 
   5341 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5342 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5343 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5344 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   5345 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5346 		reg |= KABGTXD_BGSQLBIAS;
   5347 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5348 	}
   5349 
   5350 	/* Set up the inter-packet gap. */
   5351 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5352 
   5353 	if (sc->sc_type >= WM_T_82543) {
   5354 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5355 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5356 			wm_itrs_writereg(sc, wmq);
   5357 		}
   5358 		/*
   5359 		 * Link interrupts occur much less than TX
   5360 		 * interrupts and RX interrupts. So, we don't
   5361 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5362 		 * FreeBSD's if_igb.
   5363 		 */
   5364 	}
   5365 
   5366 	/* Set the VLAN ethernetype. */
   5367 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5368 
   5369 	/*
   5370 	 * Set up the transmit control register; we start out with
   5371 	 * a collision distance suitable for FDX, but update it whe
   5372 	 * we resolve the media type.
   5373 	 */
   5374 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5375 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5376 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5377 	if (sc->sc_type >= WM_T_82571)
   5378 		sc->sc_tctl |= TCTL_MULR;
   5379 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5380 
   5381 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5382 		/* Write TDT after TCTL.EN is set. See the document. */
   5383 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5384 	}
   5385 
   5386 	if (sc->sc_type == WM_T_80003) {
   5387 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5388 		reg &= ~TCTL_EXT_GCEX_MASK;
   5389 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5390 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5391 	}
   5392 
   5393 	/* Set the media. */
   5394 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5395 		goto out;
   5396 
   5397 	/* Configure for OS presence */
   5398 	wm_init_manageability(sc);
   5399 
   5400 	/*
   5401 	 * Set up the receive control register; we actually program
   5402 	 * the register when we set the receive filter.  Use multicast
   5403 	 * address offset type 0.
   5404 	 *
   5405 	 * Only the i82544 has the ability to strip the incoming
   5406 	 * CRC, so we don't enable that feature.
   5407 	 */
   5408 	sc->sc_mchash_type = 0;
   5409 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5410 	    | RCTL_MO(sc->sc_mchash_type);
   5411 
   5412 	/*
   5413 	 * 82574 use one buffer extended Rx descriptor.
   5414 	 */
   5415 	if (sc->sc_type == WM_T_82574)
   5416 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5417 
   5418 	/*
   5419 	 * The I350 has a bug where it always strips the CRC whether
   5420 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5421 	 */
   5422 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5423 	    || (sc->sc_type == WM_T_I210))
   5424 		sc->sc_rctl |= RCTL_SECRC;
   5425 
   5426 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5427 	    && (ifp->if_mtu > ETHERMTU)) {
   5428 		sc->sc_rctl |= RCTL_LPE;
   5429 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5430 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5431 	}
   5432 
   5433 	if (MCLBYTES == 2048) {
   5434 		sc->sc_rctl |= RCTL_2k;
   5435 	} else {
   5436 		if (sc->sc_type >= WM_T_82543) {
   5437 			switch (MCLBYTES) {
   5438 			case 4096:
   5439 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5440 				break;
   5441 			case 8192:
   5442 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5443 				break;
   5444 			case 16384:
   5445 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5446 				break;
   5447 			default:
   5448 				panic("wm_init: MCLBYTES %d unsupported",
   5449 				    MCLBYTES);
   5450 				break;
   5451 			}
   5452 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5453 	}
   5454 
   5455 	/* Set the receive filter. */
   5456 	wm_set_filter(sc);
   5457 
   5458 	/* Enable ECC */
   5459 	switch (sc->sc_type) {
   5460 	case WM_T_82571:
   5461 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5462 		reg |= PBA_ECC_CORR_EN;
   5463 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5464 		break;
   5465 	case WM_T_PCH_LPT:
   5466 	case WM_T_PCH_SPT:
   5467 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5468 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5469 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5470 
   5471 		sc->sc_ctrl |= CTRL_MEHE;
   5472 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5473 		break;
   5474 	default:
   5475 		break;
   5476 	}
   5477 
   5478 	/* On 575 and later set RDT only if RX enabled */
   5479 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5480 		int qidx;
   5481 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5482 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5483 			for (i = 0; i < WM_NRXDESC; i++) {
   5484 				mutex_enter(rxq->rxq_lock);
   5485 				wm_init_rxdesc(rxq, i);
   5486 				mutex_exit(rxq->rxq_lock);
   5487 
   5488 			}
   5489 		}
   5490 	}
   5491 
   5492 	wm_turnon(sc);
   5493 
   5494 	/* Start the one second link check clock. */
   5495 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5496 
   5497 	/* ...all done! */
   5498 	ifp->if_flags |= IFF_RUNNING;
   5499 	ifp->if_flags &= ~IFF_OACTIVE;
   5500 
   5501  out:
   5502 	sc->sc_if_flags = ifp->if_flags;
   5503 	if (error)
   5504 		log(LOG_ERR, "%s: interface not running\n",
   5505 		    device_xname(sc->sc_dev));
   5506 	return error;
   5507 }
   5508 
   5509 /*
   5510  * wm_stop:		[ifnet interface function]
   5511  *
   5512  *	Stop transmission on the interface.
   5513  */
   5514 static void
   5515 wm_stop(struct ifnet *ifp, int disable)
   5516 {
   5517 	struct wm_softc *sc = ifp->if_softc;
   5518 
   5519 	WM_CORE_LOCK(sc);
   5520 	wm_stop_locked(ifp, disable);
   5521 	WM_CORE_UNLOCK(sc);
   5522 }
   5523 
   5524 static void
   5525 wm_stop_locked(struct ifnet *ifp, int disable)
   5526 {
   5527 	struct wm_softc *sc = ifp->if_softc;
   5528 	struct wm_txsoft *txs;
   5529 	int i, qidx;
   5530 
   5531 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5532 		device_xname(sc->sc_dev), __func__));
   5533 	KASSERT(WM_CORE_LOCKED(sc));
   5534 
   5535 	wm_turnoff(sc);
   5536 
   5537 	/* Stop the one second clock. */
   5538 	callout_stop(&sc->sc_tick_ch);
   5539 
   5540 	/* Stop the 82547 Tx FIFO stall check timer. */
   5541 	if (sc->sc_type == WM_T_82547)
   5542 		callout_stop(&sc->sc_txfifo_ch);
   5543 
   5544 	if (sc->sc_flags & WM_F_HAS_MII) {
   5545 		/* Down the MII. */
   5546 		mii_down(&sc->sc_mii);
   5547 	} else {
   5548 #if 0
   5549 		/* Should we clear PHY's status properly? */
   5550 		wm_reset(sc);
   5551 #endif
   5552 	}
   5553 
   5554 	/* Stop the transmit and receive processes. */
   5555 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5556 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5557 	sc->sc_rctl &= ~RCTL_EN;
   5558 
   5559 	/*
   5560 	 * Clear the interrupt mask to ensure the device cannot assert its
   5561 	 * interrupt line.
   5562 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5563 	 * service any currently pending or shared interrupt.
   5564 	 */
   5565 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5566 	sc->sc_icr = 0;
   5567 	if (wm_is_using_msix(sc)) {
   5568 		if (sc->sc_type != WM_T_82574) {
   5569 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5570 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5571 		} else
   5572 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5573 	}
   5574 
   5575 	/* Release any queued transmit buffers. */
   5576 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5577 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5578 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5579 		mutex_enter(txq->txq_lock);
   5580 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5581 			txs = &txq->txq_soft[i];
   5582 			if (txs->txs_mbuf != NULL) {
   5583 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5584 				m_freem(txs->txs_mbuf);
   5585 				txs->txs_mbuf = NULL;
   5586 			}
   5587 		}
   5588 		mutex_exit(txq->txq_lock);
   5589 	}
   5590 
   5591 	/* Mark the interface as down and cancel the watchdog timer. */
   5592 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5593 	ifp->if_timer = 0;
   5594 
   5595 	if (disable) {
   5596 		for (i = 0; i < sc->sc_nqueues; i++) {
   5597 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5598 			mutex_enter(rxq->rxq_lock);
   5599 			wm_rxdrain(rxq);
   5600 			mutex_exit(rxq->rxq_lock);
   5601 		}
   5602 	}
   5603 
   5604 #if 0 /* notyet */
   5605 	if (sc->sc_type >= WM_T_82544)
   5606 		CSR_WRITE(sc, WMREG_WUC, 0);
   5607 #endif
   5608 }
   5609 
   5610 static void
   5611 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5612 {
   5613 	struct mbuf *m;
   5614 	int i;
   5615 
   5616 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5617 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5618 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5619 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5620 		    m->m_data, m->m_len, m->m_flags);
   5621 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5622 	    i, i == 1 ? "" : "s");
   5623 }
   5624 
   5625 /*
   5626  * wm_82547_txfifo_stall:
   5627  *
   5628  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5629  *	reset the FIFO pointers, and restart packet transmission.
   5630  */
   5631 static void
   5632 wm_82547_txfifo_stall(void *arg)
   5633 {
   5634 	struct wm_softc *sc = arg;
   5635 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5636 
   5637 	mutex_enter(txq->txq_lock);
   5638 
   5639 	if (txq->txq_stopping)
   5640 		goto out;
   5641 
   5642 	if (txq->txq_fifo_stall) {
   5643 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5644 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5645 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5646 			/*
   5647 			 * Packets have drained.  Stop transmitter, reset
   5648 			 * FIFO pointers, restart transmitter, and kick
   5649 			 * the packet queue.
   5650 			 */
   5651 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5652 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5653 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5654 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5655 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5656 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5657 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5658 			CSR_WRITE_FLUSH(sc);
   5659 
   5660 			txq->txq_fifo_head = 0;
   5661 			txq->txq_fifo_stall = 0;
   5662 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5663 		} else {
   5664 			/*
   5665 			 * Still waiting for packets to drain; try again in
   5666 			 * another tick.
   5667 			 */
   5668 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5669 		}
   5670 	}
   5671 
   5672 out:
   5673 	mutex_exit(txq->txq_lock);
   5674 }
   5675 
   5676 /*
   5677  * wm_82547_txfifo_bugchk:
   5678  *
   5679  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5680  *	prevent enqueueing a packet that would wrap around the end
   5681  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5682  *
   5683  *	We do this by checking the amount of space before the end
   5684  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5685  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5686  *	the internal FIFO pointers to the beginning, and restart
   5687  *	transmission on the interface.
   5688  */
   5689 #define	WM_FIFO_HDR		0x10
   5690 #define	WM_82547_PAD_LEN	0x3e0
   5691 static int
   5692 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5693 {
   5694 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5695 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5696 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5697 
   5698 	/* Just return if already stalled. */
   5699 	if (txq->txq_fifo_stall)
   5700 		return 1;
   5701 
   5702 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5703 		/* Stall only occurs in half-duplex mode. */
   5704 		goto send_packet;
   5705 	}
   5706 
   5707 	if (len >= WM_82547_PAD_LEN + space) {
   5708 		txq->txq_fifo_stall = 1;
   5709 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5710 		return 1;
   5711 	}
   5712 
   5713  send_packet:
   5714 	txq->txq_fifo_head += len;
   5715 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5716 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5717 
   5718 	return 0;
   5719 }
   5720 
   5721 static int
   5722 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5723 {
   5724 	int error;
   5725 
   5726 	/*
   5727 	 * Allocate the control data structures, and create and load the
   5728 	 * DMA map for it.
   5729 	 *
   5730 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5731 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5732 	 * both sets within the same 4G segment.
   5733 	 */
   5734 	if (sc->sc_type < WM_T_82544)
   5735 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5736 	else
   5737 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5738 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5739 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5740 	else
   5741 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5742 
   5743 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5744 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5745 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5746 		aprint_error_dev(sc->sc_dev,
   5747 		    "unable to allocate TX control data, error = %d\n",
   5748 		    error);
   5749 		goto fail_0;
   5750 	}
   5751 
   5752 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5753 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5754 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5755 		aprint_error_dev(sc->sc_dev,
   5756 		    "unable to map TX control data, error = %d\n", error);
   5757 		goto fail_1;
   5758 	}
   5759 
   5760 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5761 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5762 		aprint_error_dev(sc->sc_dev,
   5763 		    "unable to create TX control data DMA map, error = %d\n",
   5764 		    error);
   5765 		goto fail_2;
   5766 	}
   5767 
   5768 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5769 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5770 		aprint_error_dev(sc->sc_dev,
   5771 		    "unable to load TX control data DMA map, error = %d\n",
   5772 		    error);
   5773 		goto fail_3;
   5774 	}
   5775 
   5776 	return 0;
   5777 
   5778  fail_3:
   5779 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5780  fail_2:
   5781 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5782 	    WM_TXDESCS_SIZE(txq));
   5783  fail_1:
   5784 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5785  fail_0:
   5786 	return error;
   5787 }
   5788 
   5789 static void
   5790 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5791 {
   5792 
   5793 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5794 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5795 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5796 	    WM_TXDESCS_SIZE(txq));
   5797 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5798 }
   5799 
   5800 static int
   5801 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5802 {
   5803 	int error;
   5804 	size_t rxq_descs_size;
   5805 
   5806 	/*
   5807 	 * Allocate the control data structures, and create and load the
   5808 	 * DMA map for it.
   5809 	 *
   5810 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5811 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5812 	 * both sets within the same 4G segment.
   5813 	 */
   5814 	rxq->rxq_ndesc = WM_NRXDESC;
   5815 	if (sc->sc_type == WM_T_82574)
   5816 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   5817 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5818 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   5819 	else
   5820 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   5821 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   5822 
   5823 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   5824 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5825 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5826 		aprint_error_dev(sc->sc_dev,
   5827 		    "unable to allocate RX control data, error = %d\n",
   5828 		    error);
   5829 		goto fail_0;
   5830 	}
   5831 
   5832 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5833 		    rxq->rxq_desc_rseg, rxq_descs_size,
   5834 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5835 		aprint_error_dev(sc->sc_dev,
   5836 		    "unable to map RX control data, error = %d\n", error);
   5837 		goto fail_1;
   5838 	}
   5839 
   5840 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   5841 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5842 		aprint_error_dev(sc->sc_dev,
   5843 		    "unable to create RX control data DMA map, error = %d\n",
   5844 		    error);
   5845 		goto fail_2;
   5846 	}
   5847 
   5848 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5849 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   5850 		aprint_error_dev(sc->sc_dev,
   5851 		    "unable to load RX control data DMA map, error = %d\n",
   5852 		    error);
   5853 		goto fail_3;
   5854 	}
   5855 
   5856 	return 0;
   5857 
   5858  fail_3:
   5859 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5860  fail_2:
   5861 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5862 	    rxq_descs_size);
   5863  fail_1:
   5864 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5865  fail_0:
   5866 	return error;
   5867 }
   5868 
   5869 static void
   5870 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5871 {
   5872 
   5873 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5874 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5875 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5876 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   5877 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5878 }
   5879 
   5880 
   5881 static int
   5882 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5883 {
   5884 	int i, error;
   5885 
   5886 	/* Create the transmit buffer DMA maps. */
   5887 	WM_TXQUEUELEN(txq) =
   5888 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5889 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5890 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5891 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5892 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5893 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5894 			aprint_error_dev(sc->sc_dev,
   5895 			    "unable to create Tx DMA map %d, error = %d\n",
   5896 			    i, error);
   5897 			goto fail;
   5898 		}
   5899 	}
   5900 
   5901 	return 0;
   5902 
   5903  fail:
   5904 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5905 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5906 			bus_dmamap_destroy(sc->sc_dmat,
   5907 			    txq->txq_soft[i].txs_dmamap);
   5908 	}
   5909 	return error;
   5910 }
   5911 
   5912 static void
   5913 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5914 {
   5915 	int i;
   5916 
   5917 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5918 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5919 			bus_dmamap_destroy(sc->sc_dmat,
   5920 			    txq->txq_soft[i].txs_dmamap);
   5921 	}
   5922 }
   5923 
   5924 static int
   5925 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5926 {
   5927 	int i, error;
   5928 
   5929 	/* Create the receive buffer DMA maps. */
   5930 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5931 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5932 			    MCLBYTES, 0, 0,
   5933 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5934 			aprint_error_dev(sc->sc_dev,
   5935 			    "unable to create Rx DMA map %d error = %d\n",
   5936 			    i, error);
   5937 			goto fail;
   5938 		}
   5939 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5940 	}
   5941 
   5942 	return 0;
   5943 
   5944  fail:
   5945 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5946 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5947 			bus_dmamap_destroy(sc->sc_dmat,
   5948 			    rxq->rxq_soft[i].rxs_dmamap);
   5949 	}
   5950 	return error;
   5951 }
   5952 
   5953 static void
   5954 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5955 {
   5956 	int i;
   5957 
   5958 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5959 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5960 			bus_dmamap_destroy(sc->sc_dmat,
   5961 			    rxq->rxq_soft[i].rxs_dmamap);
   5962 	}
   5963 }
   5964 
   5965 /*
   5966  * wm_alloc_quques:
   5967  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5968  */
   5969 static int
   5970 wm_alloc_txrx_queues(struct wm_softc *sc)
   5971 {
   5972 	int i, error, tx_done, rx_done;
   5973 
   5974 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5975 	    KM_SLEEP);
   5976 	if (sc->sc_queue == NULL) {
   5977 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5978 		error = ENOMEM;
   5979 		goto fail_0;
   5980 	}
   5981 
   5982 	/*
   5983 	 * For transmission
   5984 	 */
   5985 	error = 0;
   5986 	tx_done = 0;
   5987 	for (i = 0; i < sc->sc_nqueues; i++) {
   5988 #ifdef WM_EVENT_COUNTERS
   5989 		int j;
   5990 		const char *xname;
   5991 #endif
   5992 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5993 		txq->txq_sc = sc;
   5994 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5995 
   5996 		error = wm_alloc_tx_descs(sc, txq);
   5997 		if (error)
   5998 			break;
   5999 		error = wm_alloc_tx_buffer(sc, txq);
   6000 		if (error) {
   6001 			wm_free_tx_descs(sc, txq);
   6002 			break;
   6003 		}
   6004 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6005 		if (txq->txq_interq == NULL) {
   6006 			wm_free_tx_descs(sc, txq);
   6007 			wm_free_tx_buffer(sc, txq);
   6008 			error = ENOMEM;
   6009 			break;
   6010 		}
   6011 
   6012 #ifdef WM_EVENT_COUNTERS
   6013 		xname = device_xname(sc->sc_dev);
   6014 
   6015 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6016 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6017 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6018 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6019 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6020 
   6021 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6022 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6023 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6024 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6025 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6026 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6027 
   6028 		for (j = 0; j < WM_NTXSEGS; j++) {
   6029 			snprintf(txq->txq_txseg_evcnt_names[j],
   6030 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6031 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6032 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6033 		}
   6034 
   6035 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6036 
   6037 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6038 #endif /* WM_EVENT_COUNTERS */
   6039 
   6040 		tx_done++;
   6041 	}
   6042 	if (error)
   6043 		goto fail_1;
   6044 
   6045 	/*
   6046 	 * For recieve
   6047 	 */
   6048 	error = 0;
   6049 	rx_done = 0;
   6050 	for (i = 0; i < sc->sc_nqueues; i++) {
   6051 #ifdef WM_EVENT_COUNTERS
   6052 		const char *xname;
   6053 #endif
   6054 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6055 		rxq->rxq_sc = sc;
   6056 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6057 
   6058 		error = wm_alloc_rx_descs(sc, rxq);
   6059 		if (error)
   6060 			break;
   6061 
   6062 		error = wm_alloc_rx_buffer(sc, rxq);
   6063 		if (error) {
   6064 			wm_free_rx_descs(sc, rxq);
   6065 			break;
   6066 		}
   6067 
   6068 #ifdef WM_EVENT_COUNTERS
   6069 		xname = device_xname(sc->sc_dev);
   6070 
   6071 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6072 
   6073 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6074 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6075 #endif /* WM_EVENT_COUNTERS */
   6076 
   6077 		rx_done++;
   6078 	}
   6079 	if (error)
   6080 		goto fail_2;
   6081 
   6082 	return 0;
   6083 
   6084  fail_2:
   6085 	for (i = 0; i < rx_done; i++) {
   6086 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6087 		wm_free_rx_buffer(sc, rxq);
   6088 		wm_free_rx_descs(sc, rxq);
   6089 		if (rxq->rxq_lock)
   6090 			mutex_obj_free(rxq->rxq_lock);
   6091 	}
   6092  fail_1:
   6093 	for (i = 0; i < tx_done; i++) {
   6094 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6095 		pcq_destroy(txq->txq_interq);
   6096 		wm_free_tx_buffer(sc, txq);
   6097 		wm_free_tx_descs(sc, txq);
   6098 		if (txq->txq_lock)
   6099 			mutex_obj_free(txq->txq_lock);
   6100 	}
   6101 
   6102 	kmem_free(sc->sc_queue,
   6103 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6104  fail_0:
   6105 	return error;
   6106 }
   6107 
   6108 /*
   6109  * wm_free_quques:
   6110  *	Free {tx,rx}descs and {tx,rx} buffers
   6111  */
   6112 static void
   6113 wm_free_txrx_queues(struct wm_softc *sc)
   6114 {
   6115 	int i;
   6116 
   6117 	for (i = 0; i < sc->sc_nqueues; i++) {
   6118 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6119 
   6120 #ifdef WM_EVENT_COUNTERS
   6121 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6122 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6123 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6124 #endif /* WM_EVENT_COUNTERS */
   6125 
   6126 		wm_free_rx_buffer(sc, rxq);
   6127 		wm_free_rx_descs(sc, rxq);
   6128 		if (rxq->rxq_lock)
   6129 			mutex_obj_free(rxq->rxq_lock);
   6130 	}
   6131 
   6132 	for (i = 0; i < sc->sc_nqueues; i++) {
   6133 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6134 		struct mbuf *m;
   6135 #ifdef WM_EVENT_COUNTERS
   6136 		int j;
   6137 
   6138 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6139 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6140 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6141 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6142 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6143 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6144 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6145 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6146 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6147 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6148 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6149 
   6150 		for (j = 0; j < WM_NTXSEGS; j++)
   6151 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6152 
   6153 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6154 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6155 #endif /* WM_EVENT_COUNTERS */
   6156 
   6157 		/* drain txq_interq */
   6158 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6159 			m_freem(m);
   6160 		pcq_destroy(txq->txq_interq);
   6161 
   6162 		wm_free_tx_buffer(sc, txq);
   6163 		wm_free_tx_descs(sc, txq);
   6164 		if (txq->txq_lock)
   6165 			mutex_obj_free(txq->txq_lock);
   6166 	}
   6167 
   6168 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6169 }
   6170 
   6171 static void
   6172 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6173 {
   6174 
   6175 	KASSERT(mutex_owned(txq->txq_lock));
   6176 
   6177 	/* Initialize the transmit descriptor ring. */
   6178 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6179 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6180 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6181 	txq->txq_free = WM_NTXDESC(txq);
   6182 	txq->txq_next = 0;
   6183 }
   6184 
   6185 static void
   6186 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6187     struct wm_txqueue *txq)
   6188 {
   6189 
   6190 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6191 		device_xname(sc->sc_dev), __func__));
   6192 	KASSERT(mutex_owned(txq->txq_lock));
   6193 
   6194 	if (sc->sc_type < WM_T_82543) {
   6195 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6196 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6197 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6198 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6199 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6200 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6201 	} else {
   6202 		int qid = wmq->wmq_id;
   6203 
   6204 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6205 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6206 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6207 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6208 
   6209 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6210 			/*
   6211 			 * Don't write TDT before TCTL.EN is set.
   6212 			 * See the document.
   6213 			 */
   6214 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6215 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6216 			    | TXDCTL_WTHRESH(0));
   6217 		else {
   6218 			/* XXX should update with AIM? */
   6219 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6220 			if (sc->sc_type >= WM_T_82540) {
   6221 				/* should be same */
   6222 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6223 			}
   6224 
   6225 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6226 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6227 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6228 		}
   6229 	}
   6230 }
   6231 
   6232 static void
   6233 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6234 {
   6235 	int i;
   6236 
   6237 	KASSERT(mutex_owned(txq->txq_lock));
   6238 
   6239 	/* Initialize the transmit job descriptors. */
   6240 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6241 		txq->txq_soft[i].txs_mbuf = NULL;
   6242 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6243 	txq->txq_snext = 0;
   6244 	txq->txq_sdirty = 0;
   6245 }
   6246 
   6247 static void
   6248 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6249     struct wm_txqueue *txq)
   6250 {
   6251 
   6252 	KASSERT(mutex_owned(txq->txq_lock));
   6253 
   6254 	/*
   6255 	 * Set up some register offsets that are different between
   6256 	 * the i82542 and the i82543 and later chips.
   6257 	 */
   6258 	if (sc->sc_type < WM_T_82543)
   6259 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6260 	else
   6261 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6262 
   6263 	wm_init_tx_descs(sc, txq);
   6264 	wm_init_tx_regs(sc, wmq, txq);
   6265 	wm_init_tx_buffer(sc, txq);
   6266 }
   6267 
   6268 static void
   6269 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6270     struct wm_rxqueue *rxq)
   6271 {
   6272 
   6273 	KASSERT(mutex_owned(rxq->rxq_lock));
   6274 
   6275 	/*
   6276 	 * Initialize the receive descriptor and receive job
   6277 	 * descriptor rings.
   6278 	 */
   6279 	if (sc->sc_type < WM_T_82543) {
   6280 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6281 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6282 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6283 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6284 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6285 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6286 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6287 
   6288 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6289 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6290 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6291 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6292 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6293 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6294 	} else {
   6295 		int qid = wmq->wmq_id;
   6296 
   6297 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6298 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6299 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6300 
   6301 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6302 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6303 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6304 
   6305 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6306 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6307 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6308 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6309 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6310 			    | RXDCTL_WTHRESH(1));
   6311 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6312 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6313 		} else {
   6314 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6315 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6316 			/* XXX should update with AIM? */
   6317 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6318 			/* MUST be same */
   6319 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6320 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6321 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6322 		}
   6323 	}
   6324 }
   6325 
   6326 static int
   6327 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6328 {
   6329 	struct wm_rxsoft *rxs;
   6330 	int error, i;
   6331 
   6332 	KASSERT(mutex_owned(rxq->rxq_lock));
   6333 
   6334 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6335 		rxs = &rxq->rxq_soft[i];
   6336 		if (rxs->rxs_mbuf == NULL) {
   6337 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6338 				log(LOG_ERR, "%s: unable to allocate or map "
   6339 				    "rx buffer %d, error = %d\n",
   6340 				    device_xname(sc->sc_dev), i, error);
   6341 				/*
   6342 				 * XXX Should attempt to run with fewer receive
   6343 				 * XXX buffers instead of just failing.
   6344 				 */
   6345 				wm_rxdrain(rxq);
   6346 				return ENOMEM;
   6347 			}
   6348 		} else {
   6349 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6350 				wm_init_rxdesc(rxq, i);
   6351 			/*
   6352 			 * For 82575 and newer device, the RX descriptors
   6353 			 * must be initialized after the setting of RCTL.EN in
   6354 			 * wm_set_filter()
   6355 			 */
   6356 		}
   6357 	}
   6358 	rxq->rxq_ptr = 0;
   6359 	rxq->rxq_discard = 0;
   6360 	WM_RXCHAIN_RESET(rxq);
   6361 
   6362 	return 0;
   6363 }
   6364 
   6365 static int
   6366 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6367     struct wm_rxqueue *rxq)
   6368 {
   6369 
   6370 	KASSERT(mutex_owned(rxq->rxq_lock));
   6371 
   6372 	/*
   6373 	 * Set up some register offsets that are different between
   6374 	 * the i82542 and the i82543 and later chips.
   6375 	 */
   6376 	if (sc->sc_type < WM_T_82543)
   6377 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6378 	else
   6379 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6380 
   6381 	wm_init_rx_regs(sc, wmq, rxq);
   6382 	return wm_init_rx_buffer(sc, rxq);
   6383 }
   6384 
   6385 /*
   6386  * wm_init_quques:
   6387  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6388  */
   6389 static int
   6390 wm_init_txrx_queues(struct wm_softc *sc)
   6391 {
   6392 	int i, error = 0;
   6393 
   6394 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6395 		device_xname(sc->sc_dev), __func__));
   6396 
   6397 	for (i = 0; i < sc->sc_nqueues; i++) {
   6398 		struct wm_queue *wmq = &sc->sc_queue[i];
   6399 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6400 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6401 
   6402 		/*
   6403 		 * TODO
   6404 		 * Currently, use constant variable instead of AIM.
   6405 		 * Furthermore, the interrupt interval of multiqueue which use
   6406 		 * polling mode is less than default value.
   6407 		 * More tuning and AIM are required.
   6408 		 */
   6409 		if (wm_is_using_multiqueue(sc))
   6410 			wmq->wmq_itr = 50;
   6411 		else
   6412 			wmq->wmq_itr = sc->sc_itr_init;
   6413 		wmq->wmq_set_itr = true;
   6414 
   6415 		mutex_enter(txq->txq_lock);
   6416 		wm_init_tx_queue(sc, wmq, txq);
   6417 		mutex_exit(txq->txq_lock);
   6418 
   6419 		mutex_enter(rxq->rxq_lock);
   6420 		error = wm_init_rx_queue(sc, wmq, rxq);
   6421 		mutex_exit(rxq->rxq_lock);
   6422 		if (error)
   6423 			break;
   6424 	}
   6425 
   6426 	return error;
   6427 }
   6428 
   6429 /*
   6430  * wm_tx_offload:
   6431  *
   6432  *	Set up TCP/IP checksumming parameters for the
   6433  *	specified packet.
   6434  */
   6435 static int
   6436 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6437     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6438 {
   6439 	struct mbuf *m0 = txs->txs_mbuf;
   6440 	struct livengood_tcpip_ctxdesc *t;
   6441 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6442 	uint32_t ipcse;
   6443 	struct ether_header *eh;
   6444 	int offset, iphl;
   6445 	uint8_t fields;
   6446 
   6447 	/*
   6448 	 * XXX It would be nice if the mbuf pkthdr had offset
   6449 	 * fields for the protocol headers.
   6450 	 */
   6451 
   6452 	eh = mtod(m0, struct ether_header *);
   6453 	switch (htons(eh->ether_type)) {
   6454 	case ETHERTYPE_IP:
   6455 	case ETHERTYPE_IPV6:
   6456 		offset = ETHER_HDR_LEN;
   6457 		break;
   6458 
   6459 	case ETHERTYPE_VLAN:
   6460 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6461 		break;
   6462 
   6463 	default:
   6464 		/*
   6465 		 * Don't support this protocol or encapsulation.
   6466 		 */
   6467 		*fieldsp = 0;
   6468 		*cmdp = 0;
   6469 		return 0;
   6470 	}
   6471 
   6472 	if ((m0->m_pkthdr.csum_flags &
   6473 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6474 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6475 	} else {
   6476 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6477 	}
   6478 	ipcse = offset + iphl - 1;
   6479 
   6480 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6481 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6482 	seg = 0;
   6483 	fields = 0;
   6484 
   6485 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6486 		int hlen = offset + iphl;
   6487 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6488 
   6489 		if (__predict_false(m0->m_len <
   6490 				    (hlen + sizeof(struct tcphdr)))) {
   6491 			/*
   6492 			 * TCP/IP headers are not in the first mbuf; we need
   6493 			 * to do this the slow and painful way.  Let's just
   6494 			 * hope this doesn't happen very often.
   6495 			 */
   6496 			struct tcphdr th;
   6497 
   6498 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6499 
   6500 			m_copydata(m0, hlen, sizeof(th), &th);
   6501 			if (v4) {
   6502 				struct ip ip;
   6503 
   6504 				m_copydata(m0, offset, sizeof(ip), &ip);
   6505 				ip.ip_len = 0;
   6506 				m_copyback(m0,
   6507 				    offset + offsetof(struct ip, ip_len),
   6508 				    sizeof(ip.ip_len), &ip.ip_len);
   6509 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6510 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6511 			} else {
   6512 				struct ip6_hdr ip6;
   6513 
   6514 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6515 				ip6.ip6_plen = 0;
   6516 				m_copyback(m0,
   6517 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6518 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6519 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6520 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6521 			}
   6522 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6523 			    sizeof(th.th_sum), &th.th_sum);
   6524 
   6525 			hlen += th.th_off << 2;
   6526 		} else {
   6527 			/*
   6528 			 * TCP/IP headers are in the first mbuf; we can do
   6529 			 * this the easy way.
   6530 			 */
   6531 			struct tcphdr *th;
   6532 
   6533 			if (v4) {
   6534 				struct ip *ip =
   6535 				    (void *)(mtod(m0, char *) + offset);
   6536 				th = (void *)(mtod(m0, char *) + hlen);
   6537 
   6538 				ip->ip_len = 0;
   6539 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6540 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6541 			} else {
   6542 				struct ip6_hdr *ip6 =
   6543 				    (void *)(mtod(m0, char *) + offset);
   6544 				th = (void *)(mtod(m0, char *) + hlen);
   6545 
   6546 				ip6->ip6_plen = 0;
   6547 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6548 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6549 			}
   6550 			hlen += th->th_off << 2;
   6551 		}
   6552 
   6553 		if (v4) {
   6554 			WM_Q_EVCNT_INCR(txq, txtso);
   6555 			cmdlen |= WTX_TCPIP_CMD_IP;
   6556 		} else {
   6557 			WM_Q_EVCNT_INCR(txq, txtso6);
   6558 			ipcse = 0;
   6559 		}
   6560 		cmd |= WTX_TCPIP_CMD_TSE;
   6561 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6562 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6563 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6564 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6565 	}
   6566 
   6567 	/*
   6568 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6569 	 * offload feature, if we load the context descriptor, we
   6570 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6571 	 */
   6572 
   6573 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6574 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6575 	    WTX_TCPIP_IPCSE(ipcse);
   6576 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6577 		WM_Q_EVCNT_INCR(txq, txipsum);
   6578 		fields |= WTX_IXSM;
   6579 	}
   6580 
   6581 	offset += iphl;
   6582 
   6583 	if (m0->m_pkthdr.csum_flags &
   6584 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6585 		WM_Q_EVCNT_INCR(txq, txtusum);
   6586 		fields |= WTX_TXSM;
   6587 		tucs = WTX_TCPIP_TUCSS(offset) |
   6588 		    WTX_TCPIP_TUCSO(offset +
   6589 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6590 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6591 	} else if ((m0->m_pkthdr.csum_flags &
   6592 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6593 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6594 		fields |= WTX_TXSM;
   6595 		tucs = WTX_TCPIP_TUCSS(offset) |
   6596 		    WTX_TCPIP_TUCSO(offset +
   6597 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6598 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6599 	} else {
   6600 		/* Just initialize it to a valid TCP context. */
   6601 		tucs = WTX_TCPIP_TUCSS(offset) |
   6602 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6603 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6604 	}
   6605 
   6606 	/*
   6607 	 * We don't have to write context descriptor for every packet
   6608 	 * except for 82574. For 82574, we must write context descriptor
   6609 	 * for every packet when we use two descriptor queues.
   6610 	 * It would be overhead to write context descriptor for every packet,
   6611 	 * however it does not cause problems.
   6612 	 */
   6613 	/* Fill in the context descriptor. */
   6614 	t = (struct livengood_tcpip_ctxdesc *)
   6615 	    &txq->txq_descs[txq->txq_next];
   6616 	t->tcpip_ipcs = htole32(ipcs);
   6617 	t->tcpip_tucs = htole32(tucs);
   6618 	t->tcpip_cmdlen = htole32(cmdlen);
   6619 	t->tcpip_seg = htole32(seg);
   6620 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6621 
   6622 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6623 	txs->txs_ndesc++;
   6624 
   6625 	*cmdp = cmd;
   6626 	*fieldsp = fields;
   6627 
   6628 	return 0;
   6629 }
   6630 
   6631 static inline int
   6632 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6633 {
   6634 	struct wm_softc *sc = ifp->if_softc;
   6635 	u_int cpuid = cpu_index(curcpu());
   6636 
   6637 	/*
   6638 	 * Currently, simple distribute strategy.
   6639 	 * TODO:
   6640 	 * distribute by flowid(RSS has value).
   6641 	 */
   6642         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6643 }
   6644 
   6645 /*
   6646  * wm_start:		[ifnet interface function]
   6647  *
   6648  *	Start packet transmission on the interface.
   6649  */
   6650 static void
   6651 wm_start(struct ifnet *ifp)
   6652 {
   6653 	struct wm_softc *sc = ifp->if_softc;
   6654 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6655 
   6656 #ifdef WM_MPSAFE
   6657 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6658 #endif
   6659 	/*
   6660 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6661 	 */
   6662 
   6663 	mutex_enter(txq->txq_lock);
   6664 	if (!txq->txq_stopping)
   6665 		wm_start_locked(ifp);
   6666 	mutex_exit(txq->txq_lock);
   6667 }
   6668 
   6669 static void
   6670 wm_start_locked(struct ifnet *ifp)
   6671 {
   6672 	struct wm_softc *sc = ifp->if_softc;
   6673 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6674 
   6675 	wm_send_common_locked(ifp, txq, false);
   6676 }
   6677 
   6678 static int
   6679 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6680 {
   6681 	int qid;
   6682 	struct wm_softc *sc = ifp->if_softc;
   6683 	struct wm_txqueue *txq;
   6684 
   6685 	qid = wm_select_txqueue(ifp, m);
   6686 	txq = &sc->sc_queue[qid].wmq_txq;
   6687 
   6688 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6689 		m_freem(m);
   6690 		WM_Q_EVCNT_INCR(txq, txdrop);
   6691 		return ENOBUFS;
   6692 	}
   6693 
   6694 	/*
   6695 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6696 	 */
   6697 	ifp->if_obytes += m->m_pkthdr.len;
   6698 	if (m->m_flags & M_MCAST)
   6699 		ifp->if_omcasts++;
   6700 
   6701 	if (mutex_tryenter(txq->txq_lock)) {
   6702 		if (!txq->txq_stopping)
   6703 			wm_transmit_locked(ifp, txq);
   6704 		mutex_exit(txq->txq_lock);
   6705 	}
   6706 
   6707 	return 0;
   6708 }
   6709 
   6710 static void
   6711 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6712 {
   6713 
   6714 	wm_send_common_locked(ifp, txq, true);
   6715 }
   6716 
   6717 static void
   6718 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6719     bool is_transmit)
   6720 {
   6721 	struct wm_softc *sc = ifp->if_softc;
   6722 	struct mbuf *m0;
   6723 	struct m_tag *mtag;
   6724 	struct wm_txsoft *txs;
   6725 	bus_dmamap_t dmamap;
   6726 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6727 	bus_addr_t curaddr;
   6728 	bus_size_t seglen, curlen;
   6729 	uint32_t cksumcmd;
   6730 	uint8_t cksumfields;
   6731 
   6732 	KASSERT(mutex_owned(txq->txq_lock));
   6733 
   6734 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   6735 		return;
   6736 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   6737 		return;
   6738 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6739 		return;
   6740 
   6741 	/* Remember the previous number of free descriptors. */
   6742 	ofree = txq->txq_free;
   6743 
   6744 	/*
   6745 	 * Loop through the send queue, setting up transmit descriptors
   6746 	 * until we drain the queue, or use up all available transmit
   6747 	 * descriptors.
   6748 	 */
   6749 	for (;;) {
   6750 		m0 = NULL;
   6751 
   6752 		/* Get a work queue entry. */
   6753 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6754 			wm_txeof(sc, txq);
   6755 			if (txq->txq_sfree == 0) {
   6756 				DPRINTF(WM_DEBUG_TX,
   6757 				    ("%s: TX: no free job descriptors\n",
   6758 					device_xname(sc->sc_dev)));
   6759 				WM_Q_EVCNT_INCR(txq, txsstall);
   6760 				break;
   6761 			}
   6762 		}
   6763 
   6764 		/* Grab a packet off the queue. */
   6765 		if (is_transmit)
   6766 			m0 = pcq_get(txq->txq_interq);
   6767 		else
   6768 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6769 		if (m0 == NULL)
   6770 			break;
   6771 
   6772 		DPRINTF(WM_DEBUG_TX,
   6773 		    ("%s: TX: have packet to transmit: %p\n",
   6774 		    device_xname(sc->sc_dev), m0));
   6775 
   6776 		txs = &txq->txq_soft[txq->txq_snext];
   6777 		dmamap = txs->txs_dmamap;
   6778 
   6779 		use_tso = (m0->m_pkthdr.csum_flags &
   6780 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6781 
   6782 		/*
   6783 		 * So says the Linux driver:
   6784 		 * The controller does a simple calculation to make sure
   6785 		 * there is enough room in the FIFO before initiating the
   6786 		 * DMA for each buffer.  The calc is:
   6787 		 *	4 = ceil(buffer len / MSS)
   6788 		 * To make sure we don't overrun the FIFO, adjust the max
   6789 		 * buffer len if the MSS drops.
   6790 		 */
   6791 		dmamap->dm_maxsegsz =
   6792 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6793 		    ? m0->m_pkthdr.segsz << 2
   6794 		    : WTX_MAX_LEN;
   6795 
   6796 		/*
   6797 		 * Load the DMA map.  If this fails, the packet either
   6798 		 * didn't fit in the allotted number of segments, or we
   6799 		 * were short on resources.  For the too-many-segments
   6800 		 * case, we simply report an error and drop the packet,
   6801 		 * since we can't sanely copy a jumbo packet to a single
   6802 		 * buffer.
   6803 		 */
   6804 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6805 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6806 		if (error) {
   6807 			if (error == EFBIG) {
   6808 				WM_Q_EVCNT_INCR(txq, txdrop);
   6809 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6810 				    "DMA segments, dropping...\n",
   6811 				    device_xname(sc->sc_dev));
   6812 				wm_dump_mbuf_chain(sc, m0);
   6813 				m_freem(m0);
   6814 				continue;
   6815 			}
   6816 			/*  Short on resources, just stop for now. */
   6817 			DPRINTF(WM_DEBUG_TX,
   6818 			    ("%s: TX: dmamap load failed: %d\n",
   6819 			    device_xname(sc->sc_dev), error));
   6820 			break;
   6821 		}
   6822 
   6823 		segs_needed = dmamap->dm_nsegs;
   6824 		if (use_tso) {
   6825 			/* For sentinel descriptor; see below. */
   6826 			segs_needed++;
   6827 		}
   6828 
   6829 		/*
   6830 		 * Ensure we have enough descriptors free to describe
   6831 		 * the packet.  Note, we always reserve one descriptor
   6832 		 * at the end of the ring due to the semantics of the
   6833 		 * TDT register, plus one more in the event we need
   6834 		 * to load offload context.
   6835 		 */
   6836 		if (segs_needed > txq->txq_free - 2) {
   6837 			/*
   6838 			 * Not enough free descriptors to transmit this
   6839 			 * packet.  We haven't committed anything yet,
   6840 			 * so just unload the DMA map, put the packet
   6841 			 * pack on the queue, and punt.  Notify the upper
   6842 			 * layer that there are no more slots left.
   6843 			 */
   6844 			DPRINTF(WM_DEBUG_TX,
   6845 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6846 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6847 			    segs_needed, txq->txq_free - 1));
   6848 			if (!is_transmit)
   6849 				ifp->if_flags |= IFF_OACTIVE;
   6850 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6851 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6852 			WM_Q_EVCNT_INCR(txq, txdstall);
   6853 			break;
   6854 		}
   6855 
   6856 		/*
   6857 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6858 		 * once we know we can transmit the packet, since we
   6859 		 * do some internal FIFO space accounting here.
   6860 		 */
   6861 		if (sc->sc_type == WM_T_82547 &&
   6862 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6863 			DPRINTF(WM_DEBUG_TX,
   6864 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6865 			    device_xname(sc->sc_dev)));
   6866 			if (!is_transmit)
   6867 				ifp->if_flags |= IFF_OACTIVE;
   6868 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6869 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6870 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6871 			break;
   6872 		}
   6873 
   6874 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6875 
   6876 		DPRINTF(WM_DEBUG_TX,
   6877 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6878 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6879 
   6880 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6881 
   6882 		/*
   6883 		 * Store a pointer to the packet so that we can free it
   6884 		 * later.
   6885 		 *
   6886 		 * Initially, we consider the number of descriptors the
   6887 		 * packet uses the number of DMA segments.  This may be
   6888 		 * incremented by 1 if we do checksum offload (a descriptor
   6889 		 * is used to set the checksum context).
   6890 		 */
   6891 		txs->txs_mbuf = m0;
   6892 		txs->txs_firstdesc = txq->txq_next;
   6893 		txs->txs_ndesc = segs_needed;
   6894 
   6895 		/* Set up offload parameters for this packet. */
   6896 		if (m0->m_pkthdr.csum_flags &
   6897 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6898 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6899 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6900 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   6901 					  &cksumfields) != 0) {
   6902 				/* Error message already displayed. */
   6903 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6904 				continue;
   6905 			}
   6906 		} else {
   6907 			cksumcmd = 0;
   6908 			cksumfields = 0;
   6909 		}
   6910 
   6911 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6912 
   6913 		/* Sync the DMA map. */
   6914 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6915 		    BUS_DMASYNC_PREWRITE);
   6916 
   6917 		/* Initialize the transmit descriptor. */
   6918 		for (nexttx = txq->txq_next, seg = 0;
   6919 		     seg < dmamap->dm_nsegs; seg++) {
   6920 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6921 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6922 			     seglen != 0;
   6923 			     curaddr += curlen, seglen -= curlen,
   6924 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6925 				curlen = seglen;
   6926 
   6927 				/*
   6928 				 * So says the Linux driver:
   6929 				 * Work around for premature descriptor
   6930 				 * write-backs in TSO mode.  Append a
   6931 				 * 4-byte sentinel descriptor.
   6932 				 */
   6933 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6934 				    curlen > 8)
   6935 					curlen -= 4;
   6936 
   6937 				wm_set_dma_addr(
   6938 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6939 				txq->txq_descs[nexttx].wtx_cmdlen
   6940 				    = htole32(cksumcmd | curlen);
   6941 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6942 				    = 0;
   6943 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6944 				    = cksumfields;
   6945 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6946 				lasttx = nexttx;
   6947 
   6948 				DPRINTF(WM_DEBUG_TX,
   6949 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6950 				     "len %#04zx\n",
   6951 				    device_xname(sc->sc_dev), nexttx,
   6952 				    (uint64_t)curaddr, curlen));
   6953 			}
   6954 		}
   6955 
   6956 		KASSERT(lasttx != -1);
   6957 
   6958 		/*
   6959 		 * Set up the command byte on the last descriptor of
   6960 		 * the packet.  If we're in the interrupt delay window,
   6961 		 * delay the interrupt.
   6962 		 */
   6963 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6964 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6965 
   6966 		/*
   6967 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6968 		 * up the descriptor to encapsulate the packet for us.
   6969 		 *
   6970 		 * This is only valid on the last descriptor of the packet.
   6971 		 */
   6972 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6973 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6974 			    htole32(WTX_CMD_VLE);
   6975 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6976 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6977 		}
   6978 
   6979 		txs->txs_lastdesc = lasttx;
   6980 
   6981 		DPRINTF(WM_DEBUG_TX,
   6982 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6983 		    device_xname(sc->sc_dev),
   6984 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6985 
   6986 		/* Sync the descriptors we're using. */
   6987 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6988 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6989 
   6990 		/* Give the packet to the chip. */
   6991 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6992 
   6993 		DPRINTF(WM_DEBUG_TX,
   6994 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6995 
   6996 		DPRINTF(WM_DEBUG_TX,
   6997 		    ("%s: TX: finished transmitting packet, job %d\n",
   6998 		    device_xname(sc->sc_dev), txq->txq_snext));
   6999 
   7000 		/* Advance the tx pointer. */
   7001 		txq->txq_free -= txs->txs_ndesc;
   7002 		txq->txq_next = nexttx;
   7003 
   7004 		txq->txq_sfree--;
   7005 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7006 
   7007 		/* Pass the packet to any BPF listeners. */
   7008 		bpf_mtap(ifp, m0);
   7009 	}
   7010 
   7011 	if (m0 != NULL) {
   7012 		if (!is_transmit)
   7013 			ifp->if_flags |= IFF_OACTIVE;
   7014 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7015 		WM_Q_EVCNT_INCR(txq, txdrop);
   7016 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7017 			__func__));
   7018 		m_freem(m0);
   7019 	}
   7020 
   7021 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7022 		/* No more slots; notify upper layer. */
   7023 		if (!is_transmit)
   7024 			ifp->if_flags |= IFF_OACTIVE;
   7025 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7026 	}
   7027 
   7028 	if (txq->txq_free != ofree) {
   7029 		/* Set a watchdog timer in case the chip flakes out. */
   7030 		ifp->if_timer = 5;
   7031 	}
   7032 }
   7033 
   7034 /*
   7035  * wm_nq_tx_offload:
   7036  *
   7037  *	Set up TCP/IP checksumming parameters for the
   7038  *	specified packet, for NEWQUEUE devices
   7039  */
   7040 static int
   7041 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7042     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7043 {
   7044 	struct mbuf *m0 = txs->txs_mbuf;
   7045 	struct m_tag *mtag;
   7046 	uint32_t vl_len, mssidx, cmdc;
   7047 	struct ether_header *eh;
   7048 	int offset, iphl;
   7049 
   7050 	/*
   7051 	 * XXX It would be nice if the mbuf pkthdr had offset
   7052 	 * fields for the protocol headers.
   7053 	 */
   7054 	*cmdlenp = 0;
   7055 	*fieldsp = 0;
   7056 
   7057 	eh = mtod(m0, struct ether_header *);
   7058 	switch (htons(eh->ether_type)) {
   7059 	case ETHERTYPE_IP:
   7060 	case ETHERTYPE_IPV6:
   7061 		offset = ETHER_HDR_LEN;
   7062 		break;
   7063 
   7064 	case ETHERTYPE_VLAN:
   7065 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7066 		break;
   7067 
   7068 	default:
   7069 		/* Don't support this protocol or encapsulation. */
   7070 		*do_csum = false;
   7071 		return 0;
   7072 	}
   7073 	*do_csum = true;
   7074 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7075 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7076 
   7077 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7078 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7079 
   7080 	if ((m0->m_pkthdr.csum_flags &
   7081 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7082 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7083 	} else {
   7084 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7085 	}
   7086 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7087 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7088 
   7089 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7090 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7091 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7092 		*cmdlenp |= NQTX_CMD_VLE;
   7093 	}
   7094 
   7095 	mssidx = 0;
   7096 
   7097 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7098 		int hlen = offset + iphl;
   7099 		int tcp_hlen;
   7100 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7101 
   7102 		if (__predict_false(m0->m_len <
   7103 				    (hlen + sizeof(struct tcphdr)))) {
   7104 			/*
   7105 			 * TCP/IP headers are not in the first mbuf; we need
   7106 			 * to do this the slow and painful way.  Let's just
   7107 			 * hope this doesn't happen very often.
   7108 			 */
   7109 			struct tcphdr th;
   7110 
   7111 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7112 
   7113 			m_copydata(m0, hlen, sizeof(th), &th);
   7114 			if (v4) {
   7115 				struct ip ip;
   7116 
   7117 				m_copydata(m0, offset, sizeof(ip), &ip);
   7118 				ip.ip_len = 0;
   7119 				m_copyback(m0,
   7120 				    offset + offsetof(struct ip, ip_len),
   7121 				    sizeof(ip.ip_len), &ip.ip_len);
   7122 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7123 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7124 			} else {
   7125 				struct ip6_hdr ip6;
   7126 
   7127 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7128 				ip6.ip6_plen = 0;
   7129 				m_copyback(m0,
   7130 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7131 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7132 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7133 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7134 			}
   7135 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7136 			    sizeof(th.th_sum), &th.th_sum);
   7137 
   7138 			tcp_hlen = th.th_off << 2;
   7139 		} else {
   7140 			/*
   7141 			 * TCP/IP headers are in the first mbuf; we can do
   7142 			 * this the easy way.
   7143 			 */
   7144 			struct tcphdr *th;
   7145 
   7146 			if (v4) {
   7147 				struct ip *ip =
   7148 				    (void *)(mtod(m0, char *) + offset);
   7149 				th = (void *)(mtod(m0, char *) + hlen);
   7150 
   7151 				ip->ip_len = 0;
   7152 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7153 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7154 			} else {
   7155 				struct ip6_hdr *ip6 =
   7156 				    (void *)(mtod(m0, char *) + offset);
   7157 				th = (void *)(mtod(m0, char *) + hlen);
   7158 
   7159 				ip6->ip6_plen = 0;
   7160 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7161 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7162 			}
   7163 			tcp_hlen = th->th_off << 2;
   7164 		}
   7165 		hlen += tcp_hlen;
   7166 		*cmdlenp |= NQTX_CMD_TSE;
   7167 
   7168 		if (v4) {
   7169 			WM_Q_EVCNT_INCR(txq, txtso);
   7170 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7171 		} else {
   7172 			WM_Q_EVCNT_INCR(txq, txtso6);
   7173 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7174 		}
   7175 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7176 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7177 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7178 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7179 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7180 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7181 	} else {
   7182 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7183 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7184 	}
   7185 
   7186 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7187 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7188 		cmdc |= NQTXC_CMD_IP4;
   7189 	}
   7190 
   7191 	if (m0->m_pkthdr.csum_flags &
   7192 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7193 		WM_Q_EVCNT_INCR(txq, txtusum);
   7194 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7195 			cmdc |= NQTXC_CMD_TCP;
   7196 		} else {
   7197 			cmdc |= NQTXC_CMD_UDP;
   7198 		}
   7199 		cmdc |= NQTXC_CMD_IP4;
   7200 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7201 	}
   7202 	if (m0->m_pkthdr.csum_flags &
   7203 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7204 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7205 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7206 			cmdc |= NQTXC_CMD_TCP;
   7207 		} else {
   7208 			cmdc |= NQTXC_CMD_UDP;
   7209 		}
   7210 		cmdc |= NQTXC_CMD_IP6;
   7211 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7212 	}
   7213 
   7214 	/*
   7215 	 * We don't have to write context descriptor for every packet to
   7216 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7217 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7218 	 * controllers.
   7219 	 * It would be overhead to write context descriptor for every packet,
   7220 	 * however it does not cause problems.
   7221 	 */
   7222 	/* Fill in the context descriptor. */
   7223 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7224 	    htole32(vl_len);
   7225 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7226 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7227 	    htole32(cmdc);
   7228 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7229 	    htole32(mssidx);
   7230 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7231 	DPRINTF(WM_DEBUG_TX,
   7232 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7233 	    txq->txq_next, 0, vl_len));
   7234 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7235 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7236 	txs->txs_ndesc++;
   7237 	return 0;
   7238 }
   7239 
   7240 /*
   7241  * wm_nq_start:		[ifnet interface function]
   7242  *
   7243  *	Start packet transmission on the interface for NEWQUEUE devices
   7244  */
   7245 static void
   7246 wm_nq_start(struct ifnet *ifp)
   7247 {
   7248 	struct wm_softc *sc = ifp->if_softc;
   7249 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7250 
   7251 #ifdef WM_MPSAFE
   7252 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7253 #endif
   7254 	/*
   7255 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7256 	 */
   7257 
   7258 	mutex_enter(txq->txq_lock);
   7259 	if (!txq->txq_stopping)
   7260 		wm_nq_start_locked(ifp);
   7261 	mutex_exit(txq->txq_lock);
   7262 }
   7263 
   7264 static void
   7265 wm_nq_start_locked(struct ifnet *ifp)
   7266 {
   7267 	struct wm_softc *sc = ifp->if_softc;
   7268 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7269 
   7270 	wm_nq_send_common_locked(ifp, txq, false);
   7271 }
   7272 
   7273 static int
   7274 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7275 {
   7276 	int qid;
   7277 	struct wm_softc *sc = ifp->if_softc;
   7278 	struct wm_txqueue *txq;
   7279 
   7280 	qid = wm_select_txqueue(ifp, m);
   7281 	txq = &sc->sc_queue[qid].wmq_txq;
   7282 
   7283 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7284 		m_freem(m);
   7285 		WM_Q_EVCNT_INCR(txq, txdrop);
   7286 		return ENOBUFS;
   7287 	}
   7288 
   7289 	/*
   7290 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7291 	 */
   7292 	ifp->if_obytes += m->m_pkthdr.len;
   7293 	if (m->m_flags & M_MCAST)
   7294 		ifp->if_omcasts++;
   7295 
   7296 	/*
   7297 	 * The situations which this mutex_tryenter() fails at running time
   7298 	 * are below two patterns.
   7299 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7300 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7301 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7302 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7303 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7304 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7305 	 */
   7306 	if (mutex_tryenter(txq->txq_lock)) {
   7307 		if (!txq->txq_stopping)
   7308 			wm_nq_transmit_locked(ifp, txq);
   7309 		mutex_exit(txq->txq_lock);
   7310 	}
   7311 
   7312 	return 0;
   7313 }
   7314 
   7315 static void
   7316 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7317 {
   7318 
   7319 	wm_nq_send_common_locked(ifp, txq, true);
   7320 }
   7321 
   7322 static void
   7323 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7324     bool is_transmit)
   7325 {
   7326 	struct wm_softc *sc = ifp->if_softc;
   7327 	struct mbuf *m0;
   7328 	struct m_tag *mtag;
   7329 	struct wm_txsoft *txs;
   7330 	bus_dmamap_t dmamap;
   7331 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7332 	bool do_csum, sent;
   7333 
   7334 	KASSERT(mutex_owned(txq->txq_lock));
   7335 
   7336 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7337 		return;
   7338 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7339 		return;
   7340 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7341 		return;
   7342 
   7343 	sent = false;
   7344 
   7345 	/*
   7346 	 * Loop through the send queue, setting up transmit descriptors
   7347 	 * until we drain the queue, or use up all available transmit
   7348 	 * descriptors.
   7349 	 */
   7350 	for (;;) {
   7351 		m0 = NULL;
   7352 
   7353 		/* Get a work queue entry. */
   7354 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7355 			wm_txeof(sc, txq);
   7356 			if (txq->txq_sfree == 0) {
   7357 				DPRINTF(WM_DEBUG_TX,
   7358 				    ("%s: TX: no free job descriptors\n",
   7359 					device_xname(sc->sc_dev)));
   7360 				WM_Q_EVCNT_INCR(txq, txsstall);
   7361 				break;
   7362 			}
   7363 		}
   7364 
   7365 		/* Grab a packet off the queue. */
   7366 		if (is_transmit)
   7367 			m0 = pcq_get(txq->txq_interq);
   7368 		else
   7369 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7370 		if (m0 == NULL)
   7371 			break;
   7372 
   7373 		DPRINTF(WM_DEBUG_TX,
   7374 		    ("%s: TX: have packet to transmit: %p\n",
   7375 		    device_xname(sc->sc_dev), m0));
   7376 
   7377 		txs = &txq->txq_soft[txq->txq_snext];
   7378 		dmamap = txs->txs_dmamap;
   7379 
   7380 		/*
   7381 		 * Load the DMA map.  If this fails, the packet either
   7382 		 * didn't fit in the allotted number of segments, or we
   7383 		 * were short on resources.  For the too-many-segments
   7384 		 * case, we simply report an error and drop the packet,
   7385 		 * since we can't sanely copy a jumbo packet to a single
   7386 		 * buffer.
   7387 		 */
   7388 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7389 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7390 		if (error) {
   7391 			if (error == EFBIG) {
   7392 				WM_Q_EVCNT_INCR(txq, txdrop);
   7393 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7394 				    "DMA segments, dropping...\n",
   7395 				    device_xname(sc->sc_dev));
   7396 				wm_dump_mbuf_chain(sc, m0);
   7397 				m_freem(m0);
   7398 				continue;
   7399 			}
   7400 			/* Short on resources, just stop for now. */
   7401 			DPRINTF(WM_DEBUG_TX,
   7402 			    ("%s: TX: dmamap load failed: %d\n",
   7403 			    device_xname(sc->sc_dev), error));
   7404 			break;
   7405 		}
   7406 
   7407 		segs_needed = dmamap->dm_nsegs;
   7408 
   7409 		/*
   7410 		 * Ensure we have enough descriptors free to describe
   7411 		 * the packet.  Note, we always reserve one descriptor
   7412 		 * at the end of the ring due to the semantics of the
   7413 		 * TDT register, plus one more in the event we need
   7414 		 * to load offload context.
   7415 		 */
   7416 		if (segs_needed > txq->txq_free - 2) {
   7417 			/*
   7418 			 * Not enough free descriptors to transmit this
   7419 			 * packet.  We haven't committed anything yet,
   7420 			 * so just unload the DMA map, put the packet
   7421 			 * pack on the queue, and punt.  Notify the upper
   7422 			 * layer that there are no more slots left.
   7423 			 */
   7424 			DPRINTF(WM_DEBUG_TX,
   7425 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7426 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7427 			    segs_needed, txq->txq_free - 1));
   7428 			if (!is_transmit)
   7429 				ifp->if_flags |= IFF_OACTIVE;
   7430 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7431 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7432 			WM_Q_EVCNT_INCR(txq, txdstall);
   7433 			break;
   7434 		}
   7435 
   7436 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7437 
   7438 		DPRINTF(WM_DEBUG_TX,
   7439 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7440 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7441 
   7442 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7443 
   7444 		/*
   7445 		 * Store a pointer to the packet so that we can free it
   7446 		 * later.
   7447 		 *
   7448 		 * Initially, we consider the number of descriptors the
   7449 		 * packet uses the number of DMA segments.  This may be
   7450 		 * incremented by 1 if we do checksum offload (a descriptor
   7451 		 * is used to set the checksum context).
   7452 		 */
   7453 		txs->txs_mbuf = m0;
   7454 		txs->txs_firstdesc = txq->txq_next;
   7455 		txs->txs_ndesc = segs_needed;
   7456 
   7457 		/* Set up offload parameters for this packet. */
   7458 		uint32_t cmdlen, fields, dcmdlen;
   7459 		if (m0->m_pkthdr.csum_flags &
   7460 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7461 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7462 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7463 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7464 			    &do_csum) != 0) {
   7465 				/* Error message already displayed. */
   7466 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7467 				continue;
   7468 			}
   7469 		} else {
   7470 			do_csum = false;
   7471 			cmdlen = 0;
   7472 			fields = 0;
   7473 		}
   7474 
   7475 		/* Sync the DMA map. */
   7476 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7477 		    BUS_DMASYNC_PREWRITE);
   7478 
   7479 		/* Initialize the first transmit descriptor. */
   7480 		nexttx = txq->txq_next;
   7481 		if (!do_csum) {
   7482 			/* setup a legacy descriptor */
   7483 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7484 			    dmamap->dm_segs[0].ds_addr);
   7485 			txq->txq_descs[nexttx].wtx_cmdlen =
   7486 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7487 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7488 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7489 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7490 			    NULL) {
   7491 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7492 				    htole32(WTX_CMD_VLE);
   7493 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7494 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7495 			} else {
   7496 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7497 			}
   7498 			dcmdlen = 0;
   7499 		} else {
   7500 			/* setup an advanced data descriptor */
   7501 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7502 			    htole64(dmamap->dm_segs[0].ds_addr);
   7503 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7504 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7505 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7506 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7507 			    htole32(fields);
   7508 			DPRINTF(WM_DEBUG_TX,
   7509 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7510 			    device_xname(sc->sc_dev), nexttx,
   7511 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7512 			DPRINTF(WM_DEBUG_TX,
   7513 			    ("\t 0x%08x%08x\n", fields,
   7514 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7515 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7516 		}
   7517 
   7518 		lasttx = nexttx;
   7519 		nexttx = WM_NEXTTX(txq, nexttx);
   7520 		/*
   7521 		 * fill in the next descriptors. legacy or adcanced format
   7522 		 * is the same here
   7523 		 */
   7524 		for (seg = 1; seg < dmamap->dm_nsegs;
   7525 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7526 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7527 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7528 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7529 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7530 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7531 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7532 			lasttx = nexttx;
   7533 
   7534 			DPRINTF(WM_DEBUG_TX,
   7535 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7536 			     "len %#04zx\n",
   7537 			    device_xname(sc->sc_dev), nexttx,
   7538 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7539 			    dmamap->dm_segs[seg].ds_len));
   7540 		}
   7541 
   7542 		KASSERT(lasttx != -1);
   7543 
   7544 		/*
   7545 		 * Set up the command byte on the last descriptor of
   7546 		 * the packet.  If we're in the interrupt delay window,
   7547 		 * delay the interrupt.
   7548 		 */
   7549 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7550 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7551 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7552 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7553 
   7554 		txs->txs_lastdesc = lasttx;
   7555 
   7556 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7557 		    device_xname(sc->sc_dev),
   7558 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7559 
   7560 		/* Sync the descriptors we're using. */
   7561 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7562 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7563 
   7564 		/* Give the packet to the chip. */
   7565 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7566 		sent = true;
   7567 
   7568 		DPRINTF(WM_DEBUG_TX,
   7569 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7570 
   7571 		DPRINTF(WM_DEBUG_TX,
   7572 		    ("%s: TX: finished transmitting packet, job %d\n",
   7573 		    device_xname(sc->sc_dev), txq->txq_snext));
   7574 
   7575 		/* Advance the tx pointer. */
   7576 		txq->txq_free -= txs->txs_ndesc;
   7577 		txq->txq_next = nexttx;
   7578 
   7579 		txq->txq_sfree--;
   7580 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7581 
   7582 		/* Pass the packet to any BPF listeners. */
   7583 		bpf_mtap(ifp, m0);
   7584 	}
   7585 
   7586 	if (m0 != NULL) {
   7587 		if (!is_transmit)
   7588 			ifp->if_flags |= IFF_OACTIVE;
   7589 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7590 		WM_Q_EVCNT_INCR(txq, txdrop);
   7591 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7592 			__func__));
   7593 		m_freem(m0);
   7594 	}
   7595 
   7596 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7597 		/* No more slots; notify upper layer. */
   7598 		if (!is_transmit)
   7599 			ifp->if_flags |= IFF_OACTIVE;
   7600 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7601 	}
   7602 
   7603 	if (sent) {
   7604 		/* Set a watchdog timer in case the chip flakes out. */
   7605 		ifp->if_timer = 5;
   7606 	}
   7607 }
   7608 
   7609 static void
   7610 wm_deferred_start_locked(struct wm_txqueue *txq)
   7611 {
   7612 	struct wm_softc *sc = txq->txq_sc;
   7613 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7614 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7615 	int qid = wmq->wmq_id;
   7616 
   7617 	KASSERT(mutex_owned(txq->txq_lock));
   7618 
   7619 	if (txq->txq_stopping) {
   7620 		mutex_exit(txq->txq_lock);
   7621 		return;
   7622 	}
   7623 
   7624 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7625 		/* XXX need for ALTQ or one CPU system */
   7626 		if (qid == 0)
   7627 			wm_nq_start_locked(ifp);
   7628 		wm_nq_transmit_locked(ifp, txq);
   7629 	} else {
   7630 		/* XXX need for ALTQ or one CPU system */
   7631 		if (qid == 0)
   7632 			wm_start_locked(ifp);
   7633 		wm_transmit_locked(ifp, txq);
   7634 	}
   7635 }
   7636 
   7637 /* Interrupt */
   7638 
   7639 /*
   7640  * wm_txeof:
   7641  *
   7642  *	Helper; handle transmit interrupts.
   7643  */
   7644 static int
   7645 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7646 {
   7647 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7648 	struct wm_txsoft *txs;
   7649 	bool processed = false;
   7650 	int count = 0;
   7651 	int i;
   7652 	uint8_t status;
   7653 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7654 
   7655 	KASSERT(mutex_owned(txq->txq_lock));
   7656 
   7657 	if (txq->txq_stopping)
   7658 		return 0;
   7659 
   7660 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7661 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7662 	if (wmq->wmq_id == 0)
   7663 		ifp->if_flags &= ~IFF_OACTIVE;
   7664 
   7665 	/*
   7666 	 * Go through the Tx list and free mbufs for those
   7667 	 * frames which have been transmitted.
   7668 	 */
   7669 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7670 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7671 		txs = &txq->txq_soft[i];
   7672 
   7673 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7674 			device_xname(sc->sc_dev), i));
   7675 
   7676 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7677 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7678 
   7679 		status =
   7680 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7681 		if ((status & WTX_ST_DD) == 0) {
   7682 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7683 			    BUS_DMASYNC_PREREAD);
   7684 			break;
   7685 		}
   7686 
   7687 		processed = true;
   7688 		count++;
   7689 		DPRINTF(WM_DEBUG_TX,
   7690 		    ("%s: TX: job %d done: descs %d..%d\n",
   7691 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7692 		    txs->txs_lastdesc));
   7693 
   7694 		/*
   7695 		 * XXX We should probably be using the statistics
   7696 		 * XXX registers, but I don't know if they exist
   7697 		 * XXX on chips before the i82544.
   7698 		 */
   7699 
   7700 #ifdef WM_EVENT_COUNTERS
   7701 		if (status & WTX_ST_TU)
   7702 			WM_Q_EVCNT_INCR(txq, tu);
   7703 #endif /* WM_EVENT_COUNTERS */
   7704 
   7705 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7706 			ifp->if_oerrors++;
   7707 			if (status & WTX_ST_LC)
   7708 				log(LOG_WARNING, "%s: late collision\n",
   7709 				    device_xname(sc->sc_dev));
   7710 			else if (status & WTX_ST_EC) {
   7711 				ifp->if_collisions += 16;
   7712 				log(LOG_WARNING, "%s: excessive collisions\n",
   7713 				    device_xname(sc->sc_dev));
   7714 			}
   7715 		} else
   7716 			ifp->if_opackets++;
   7717 
   7718 		txq->txq_packets++;
   7719 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   7720 
   7721 		txq->txq_free += txs->txs_ndesc;
   7722 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7723 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7724 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7725 		m_freem(txs->txs_mbuf);
   7726 		txs->txs_mbuf = NULL;
   7727 	}
   7728 
   7729 	/* Update the dirty transmit buffer pointer. */
   7730 	txq->txq_sdirty = i;
   7731 	DPRINTF(WM_DEBUG_TX,
   7732 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7733 
   7734 	if (count != 0)
   7735 		rnd_add_uint32(&sc->rnd_source, count);
   7736 
   7737 	/*
   7738 	 * If there are no more pending transmissions, cancel the watchdog
   7739 	 * timer.
   7740 	 */
   7741 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7742 		ifp->if_timer = 0;
   7743 
   7744 	return processed;
   7745 }
   7746 
   7747 static inline uint32_t
   7748 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   7749 {
   7750 	struct wm_softc *sc = rxq->rxq_sc;
   7751 
   7752 	if (sc->sc_type == WM_T_82574)
   7753 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7754 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7755 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7756 	else
   7757 		return rxq->rxq_descs[idx].wrx_status;
   7758 }
   7759 
   7760 static inline uint32_t
   7761 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   7762 {
   7763 	struct wm_softc *sc = rxq->rxq_sc;
   7764 
   7765 	if (sc->sc_type == WM_T_82574)
   7766 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7767 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7768 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7769 	else
   7770 		return rxq->rxq_descs[idx].wrx_errors;
   7771 }
   7772 
   7773 static inline uint16_t
   7774 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   7775 {
   7776 	struct wm_softc *sc = rxq->rxq_sc;
   7777 
   7778 	if (sc->sc_type == WM_T_82574)
   7779 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   7780 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7781 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   7782 	else
   7783 		return rxq->rxq_descs[idx].wrx_special;
   7784 }
   7785 
   7786 static inline int
   7787 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   7788 {
   7789 	struct wm_softc *sc = rxq->rxq_sc;
   7790 
   7791 	if (sc->sc_type == WM_T_82574)
   7792 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   7793 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7794 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   7795 	else
   7796 		return rxq->rxq_descs[idx].wrx_len;
   7797 }
   7798 
   7799 #ifdef WM_DEBUG
   7800 static inline uint32_t
   7801 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   7802 {
   7803 	struct wm_softc *sc = rxq->rxq_sc;
   7804 
   7805 	if (sc->sc_type == WM_T_82574)
   7806 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   7807 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7808 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   7809 	else
   7810 		return 0;
   7811 }
   7812 
   7813 static inline uint8_t
   7814 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   7815 {
   7816 	struct wm_softc *sc = rxq->rxq_sc;
   7817 
   7818 	if (sc->sc_type == WM_T_82574)
   7819 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   7820 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7821 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   7822 	else
   7823 		return 0;
   7824 }
   7825 #endif /* WM_DEBUG */
   7826 
   7827 static inline bool
   7828 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   7829     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7830 {
   7831 
   7832 	if (sc->sc_type == WM_T_82574)
   7833 		return (status & ext_bit) != 0;
   7834 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7835 		return (status & nq_bit) != 0;
   7836 	else
   7837 		return (status & legacy_bit) != 0;
   7838 }
   7839 
   7840 static inline bool
   7841 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   7842     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7843 {
   7844 
   7845 	if (sc->sc_type == WM_T_82574)
   7846 		return (error & ext_bit) != 0;
   7847 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7848 		return (error & nq_bit) != 0;
   7849 	else
   7850 		return (error & legacy_bit) != 0;
   7851 }
   7852 
   7853 static inline bool
   7854 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   7855 {
   7856 
   7857 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7858 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   7859 		return true;
   7860 	else
   7861 		return false;
   7862 }
   7863 
   7864 static inline bool
   7865 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   7866 {
   7867 	struct wm_softc *sc = rxq->rxq_sc;
   7868 
   7869 	/* XXXX missing error bit for newqueue? */
   7870 	if (wm_rxdesc_is_set_error(sc, errors,
   7871 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   7872 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   7873 		NQRXC_ERROR_RXE)) {
   7874 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   7875 			log(LOG_WARNING, "%s: symbol error\n",
   7876 			    device_xname(sc->sc_dev));
   7877 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   7878 			log(LOG_WARNING, "%s: receive sequence error\n",
   7879 			    device_xname(sc->sc_dev));
   7880 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   7881 			log(LOG_WARNING, "%s: CRC error\n",
   7882 			    device_xname(sc->sc_dev));
   7883 		return true;
   7884 	}
   7885 
   7886 	return false;
   7887 }
   7888 
   7889 static inline bool
   7890 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   7891 {
   7892 	struct wm_softc *sc = rxq->rxq_sc;
   7893 
   7894 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   7895 		NQRXC_STATUS_DD)) {
   7896 		/* We have processed all of the receive descriptors. */
   7897 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   7898 		return false;
   7899 	}
   7900 
   7901 	return true;
   7902 }
   7903 
   7904 static inline bool
   7905 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   7906     struct mbuf *m)
   7907 {
   7908 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   7909 
   7910 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7911 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   7912 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   7913 	}
   7914 
   7915 	return true;
   7916 }
   7917 
   7918 static inline void
   7919 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   7920     uint32_t errors, struct mbuf *m)
   7921 {
   7922 	struct wm_softc *sc = rxq->rxq_sc;
   7923 
   7924 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   7925 		if (wm_rxdesc_is_set_status(sc, status,
   7926 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   7927 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   7928 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7929 			if (wm_rxdesc_is_set_error(sc, errors,
   7930 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   7931 				m->m_pkthdr.csum_flags |=
   7932 					M_CSUM_IPv4_BAD;
   7933 		}
   7934 		if (wm_rxdesc_is_set_status(sc, status,
   7935 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   7936 			/*
   7937 			 * Note: we don't know if this was TCP or UDP,
   7938 			 * so we just set both bits, and expect the
   7939 			 * upper layers to deal.
   7940 			 */
   7941 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   7942 			m->m_pkthdr.csum_flags |=
   7943 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7944 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7945 			if (wm_rxdesc_is_set_error(sc, errors,
   7946 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   7947 				m->m_pkthdr.csum_flags |=
   7948 					M_CSUM_TCP_UDP_BAD;
   7949 		}
   7950 	}
   7951 }
   7952 
   7953 /*
   7954  * wm_rxeof:
   7955  *
   7956  *	Helper; handle receive interrupts.
   7957  */
   7958 static void
   7959 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   7960 {
   7961 	struct wm_softc *sc = rxq->rxq_sc;
   7962 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7963 	struct wm_rxsoft *rxs;
   7964 	struct mbuf *m;
   7965 	int i, len;
   7966 	int count = 0;
   7967 	uint32_t status, errors;
   7968 	uint16_t vlantag;
   7969 
   7970 	KASSERT(mutex_owned(rxq->rxq_lock));
   7971 
   7972 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7973 		if (limit-- == 0) {
   7974 			rxq->rxq_ptr = i;
   7975 			break;
   7976 		}
   7977 
   7978 		rxs = &rxq->rxq_soft[i];
   7979 
   7980 		DPRINTF(WM_DEBUG_RX,
   7981 		    ("%s: RX: checking descriptor %d\n",
   7982 		    device_xname(sc->sc_dev), i));
   7983 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7984 
   7985 		status = wm_rxdesc_get_status(rxq, i);
   7986 		errors = wm_rxdesc_get_errors(rxq, i);
   7987 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   7988 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   7989 #ifdef WM_DEBUG
   7990 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   7991 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   7992 #endif
   7993 
   7994 		if (!wm_rxdesc_dd(rxq, i, status)) {
   7995 			/*
   7996 			 * Update the receive pointer holding rxq_lock
   7997 			 * consistent with increment counter.
   7998 			 */
   7999 			rxq->rxq_ptr = i;
   8000 			break;
   8001 		}
   8002 
   8003 		count++;
   8004 		if (__predict_false(rxq->rxq_discard)) {
   8005 			DPRINTF(WM_DEBUG_RX,
   8006 			    ("%s: RX: discarding contents of descriptor %d\n",
   8007 			    device_xname(sc->sc_dev), i));
   8008 			wm_init_rxdesc(rxq, i);
   8009 			if (wm_rxdesc_is_eop(rxq, status)) {
   8010 				/* Reset our state. */
   8011 				DPRINTF(WM_DEBUG_RX,
   8012 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8013 				    device_xname(sc->sc_dev)));
   8014 				rxq->rxq_discard = 0;
   8015 			}
   8016 			continue;
   8017 		}
   8018 
   8019 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8020 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8021 
   8022 		m = rxs->rxs_mbuf;
   8023 
   8024 		/*
   8025 		 * Add a new receive buffer to the ring, unless of
   8026 		 * course the length is zero. Treat the latter as a
   8027 		 * failed mapping.
   8028 		 */
   8029 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8030 			/*
   8031 			 * Failed, throw away what we've done so
   8032 			 * far, and discard the rest of the packet.
   8033 			 */
   8034 			ifp->if_ierrors++;
   8035 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8036 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8037 			wm_init_rxdesc(rxq, i);
   8038 			if (!wm_rxdesc_is_eop(rxq, status))
   8039 				rxq->rxq_discard = 1;
   8040 			if (rxq->rxq_head != NULL)
   8041 				m_freem(rxq->rxq_head);
   8042 			WM_RXCHAIN_RESET(rxq);
   8043 			DPRINTF(WM_DEBUG_RX,
   8044 			    ("%s: RX: Rx buffer allocation failed, "
   8045 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8046 			    rxq->rxq_discard ? " (discard)" : ""));
   8047 			continue;
   8048 		}
   8049 
   8050 		m->m_len = len;
   8051 		rxq->rxq_len += len;
   8052 		DPRINTF(WM_DEBUG_RX,
   8053 		    ("%s: RX: buffer at %p len %d\n",
   8054 		    device_xname(sc->sc_dev), m->m_data, len));
   8055 
   8056 		/* If this is not the end of the packet, keep looking. */
   8057 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8058 			WM_RXCHAIN_LINK(rxq, m);
   8059 			DPRINTF(WM_DEBUG_RX,
   8060 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8061 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8062 			continue;
   8063 		}
   8064 
   8065 		/*
   8066 		 * Okay, we have the entire packet now.  The chip is
   8067 		 * configured to include the FCS except I350 and I21[01]
   8068 		 * (not all chips can be configured to strip it),
   8069 		 * so we need to trim it.
   8070 		 * May need to adjust length of previous mbuf in the
   8071 		 * chain if the current mbuf is too short.
   8072 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8073 		 * is always set in I350, so we don't trim it.
   8074 		 */
   8075 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8076 		    && (sc->sc_type != WM_T_I210)
   8077 		    && (sc->sc_type != WM_T_I211)) {
   8078 			if (m->m_len < ETHER_CRC_LEN) {
   8079 				rxq->rxq_tail->m_len
   8080 				    -= (ETHER_CRC_LEN - m->m_len);
   8081 				m->m_len = 0;
   8082 			} else
   8083 				m->m_len -= ETHER_CRC_LEN;
   8084 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8085 		} else
   8086 			len = rxq->rxq_len;
   8087 
   8088 		WM_RXCHAIN_LINK(rxq, m);
   8089 
   8090 		*rxq->rxq_tailp = NULL;
   8091 		m = rxq->rxq_head;
   8092 
   8093 		WM_RXCHAIN_RESET(rxq);
   8094 
   8095 		DPRINTF(WM_DEBUG_RX,
   8096 		    ("%s: RX: have entire packet, len -> %d\n",
   8097 		    device_xname(sc->sc_dev), len));
   8098 
   8099 		/* If an error occurred, update stats and drop the packet. */
   8100 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8101 			m_freem(m);
   8102 			continue;
   8103 		}
   8104 
   8105 		/* No errors.  Receive the packet. */
   8106 		m_set_rcvif(m, ifp);
   8107 		m->m_pkthdr.len = len;
   8108 		/*
   8109 		 * TODO
   8110 		 * should be save rsshash and rsstype to this mbuf.
   8111 		 */
   8112 		DPRINTF(WM_DEBUG_RX,
   8113 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8114 			device_xname(sc->sc_dev), rsstype, rsshash));
   8115 
   8116 		/*
   8117 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8118 		 * for us.  Associate the tag with the packet.
   8119 		 */
   8120 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8121 			continue;
   8122 
   8123 		/* Set up checksum info for this packet. */
   8124 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8125 		/*
   8126 		 * Update the receive pointer holding rxq_lock consistent with
   8127 		 * increment counter.
   8128 		 */
   8129 		rxq->rxq_ptr = i;
   8130 		rxq->rxq_packets++;
   8131 		rxq->rxq_bytes += len;
   8132 		mutex_exit(rxq->rxq_lock);
   8133 
   8134 		/* Pass it on. */
   8135 		if_percpuq_enqueue(sc->sc_ipq, m);
   8136 
   8137 		mutex_enter(rxq->rxq_lock);
   8138 
   8139 		if (rxq->rxq_stopping)
   8140 			break;
   8141 	}
   8142 
   8143 	if (count != 0)
   8144 		rnd_add_uint32(&sc->rnd_source, count);
   8145 
   8146 	DPRINTF(WM_DEBUG_RX,
   8147 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8148 }
   8149 
   8150 /*
   8151  * wm_linkintr_gmii:
   8152  *
   8153  *	Helper; handle link interrupts for GMII.
   8154  */
   8155 static void
   8156 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8157 {
   8158 
   8159 	KASSERT(WM_CORE_LOCKED(sc));
   8160 
   8161 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8162 		__func__));
   8163 
   8164 	if (icr & ICR_LSC) {
   8165 		uint32_t reg;
   8166 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8167 
   8168 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8169 			wm_gig_downshift_workaround_ich8lan(sc);
   8170 
   8171 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8172 			device_xname(sc->sc_dev)));
   8173 		mii_pollstat(&sc->sc_mii);
   8174 		if (sc->sc_type == WM_T_82543) {
   8175 			int miistatus, active;
   8176 
   8177 			/*
   8178 			 * With 82543, we need to force speed and
   8179 			 * duplex on the MAC equal to what the PHY
   8180 			 * speed and duplex configuration is.
   8181 			 */
   8182 			miistatus = sc->sc_mii.mii_media_status;
   8183 
   8184 			if (miistatus & IFM_ACTIVE) {
   8185 				active = sc->sc_mii.mii_media_active;
   8186 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8187 				switch (IFM_SUBTYPE(active)) {
   8188 				case IFM_10_T:
   8189 					sc->sc_ctrl |= CTRL_SPEED_10;
   8190 					break;
   8191 				case IFM_100_TX:
   8192 					sc->sc_ctrl |= CTRL_SPEED_100;
   8193 					break;
   8194 				case IFM_1000_T:
   8195 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8196 					break;
   8197 				default:
   8198 					/*
   8199 					 * fiber?
   8200 					 * Shoud not enter here.
   8201 					 */
   8202 					printf("unknown media (%x)\n", active);
   8203 					break;
   8204 				}
   8205 				if (active & IFM_FDX)
   8206 					sc->sc_ctrl |= CTRL_FD;
   8207 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8208 			}
   8209 		} else if ((sc->sc_type == WM_T_ICH8)
   8210 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8211 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8212 		} else if (sc->sc_type == WM_T_PCH) {
   8213 			wm_k1_gig_workaround_hv(sc,
   8214 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8215 		}
   8216 
   8217 		if ((sc->sc_phytype == WMPHY_82578)
   8218 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8219 			== IFM_1000_T)) {
   8220 
   8221 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8222 				delay(200*1000); /* XXX too big */
   8223 
   8224 				/* Link stall fix for link up */
   8225 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8226 				    HV_MUX_DATA_CTRL,
   8227 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8228 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8229 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8230 				    HV_MUX_DATA_CTRL,
   8231 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8232 			}
   8233 		}
   8234 		/*
   8235 		 * I217 Packet Loss issue:
   8236 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8237 		 * on power up.
   8238 		 * Set the Beacon Duration for I217 to 8 usec
   8239 		 */
   8240 		if ((sc->sc_type == WM_T_PCH_LPT)
   8241 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8242 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8243 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8244 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8245 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8246 		}
   8247 
   8248 		/* XXX Work-around I218 hang issue */
   8249 		/* e1000_k1_workaround_lpt_lp() */
   8250 
   8251 		if ((sc->sc_type == WM_T_PCH_LPT)
   8252 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8253 			/*
   8254 			 * Set platform power management values for Latency
   8255 			 * Tolerance Reporting (LTR)
   8256 			 */
   8257 			wm_platform_pm_pch_lpt(sc,
   8258 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8259 				    != 0));
   8260 		}
   8261 
   8262 		/* FEXTNVM6 K1-off workaround */
   8263 		if (sc->sc_type == WM_T_PCH_SPT) {
   8264 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8265 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8266 			    & FEXTNVM6_K1_OFF_ENABLE)
   8267 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8268 			else
   8269 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8270 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8271 		}
   8272 	} else if (icr & ICR_RXSEQ) {
   8273 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8274 			device_xname(sc->sc_dev)));
   8275 	}
   8276 }
   8277 
   8278 /*
   8279  * wm_linkintr_tbi:
   8280  *
   8281  *	Helper; handle link interrupts for TBI mode.
   8282  */
   8283 static void
   8284 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8285 {
   8286 	uint32_t status;
   8287 
   8288 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8289 		__func__));
   8290 
   8291 	status = CSR_READ(sc, WMREG_STATUS);
   8292 	if (icr & ICR_LSC) {
   8293 		if (status & STATUS_LU) {
   8294 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8295 			    device_xname(sc->sc_dev),
   8296 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8297 			/*
   8298 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8299 			 * so we should update sc->sc_ctrl
   8300 			 */
   8301 
   8302 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8303 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8304 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8305 			if (status & STATUS_FD)
   8306 				sc->sc_tctl |=
   8307 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8308 			else
   8309 				sc->sc_tctl |=
   8310 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8311 			if (sc->sc_ctrl & CTRL_TFCE)
   8312 				sc->sc_fcrtl |= FCRTL_XONE;
   8313 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8314 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8315 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8316 				      sc->sc_fcrtl);
   8317 			sc->sc_tbi_linkup = 1;
   8318 		} else {
   8319 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8320 			    device_xname(sc->sc_dev)));
   8321 			sc->sc_tbi_linkup = 0;
   8322 		}
   8323 		/* Update LED */
   8324 		wm_tbi_serdes_set_linkled(sc);
   8325 	} else if (icr & ICR_RXSEQ) {
   8326 		DPRINTF(WM_DEBUG_LINK,
   8327 		    ("%s: LINK: Receive sequence error\n",
   8328 		    device_xname(sc->sc_dev)));
   8329 	}
   8330 }
   8331 
   8332 /*
   8333  * wm_linkintr_serdes:
   8334  *
   8335  *	Helper; handle link interrupts for TBI mode.
   8336  */
   8337 static void
   8338 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8339 {
   8340 	struct mii_data *mii = &sc->sc_mii;
   8341 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8342 	uint32_t pcs_adv, pcs_lpab, reg;
   8343 
   8344 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8345 		__func__));
   8346 
   8347 	if (icr & ICR_LSC) {
   8348 		/* Check PCS */
   8349 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8350 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8351 			mii->mii_media_status |= IFM_ACTIVE;
   8352 			sc->sc_tbi_linkup = 1;
   8353 		} else {
   8354 			mii->mii_media_status |= IFM_NONE;
   8355 			sc->sc_tbi_linkup = 0;
   8356 			wm_tbi_serdes_set_linkled(sc);
   8357 			return;
   8358 		}
   8359 		mii->mii_media_active |= IFM_1000_SX;
   8360 		if ((reg & PCS_LSTS_FDX) != 0)
   8361 			mii->mii_media_active |= IFM_FDX;
   8362 		else
   8363 			mii->mii_media_active |= IFM_HDX;
   8364 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8365 			/* Check flow */
   8366 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8367 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8368 				DPRINTF(WM_DEBUG_LINK,
   8369 				    ("XXX LINKOK but not ACOMP\n"));
   8370 				return;
   8371 			}
   8372 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8373 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8374 			DPRINTF(WM_DEBUG_LINK,
   8375 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8376 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8377 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8378 				mii->mii_media_active |= IFM_FLOW
   8379 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8380 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8381 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8382 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8383 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8384 				mii->mii_media_active |= IFM_FLOW
   8385 				    | IFM_ETH_TXPAUSE;
   8386 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8387 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8388 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8389 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8390 				mii->mii_media_active |= IFM_FLOW
   8391 				    | IFM_ETH_RXPAUSE;
   8392 		}
   8393 		/* Update LED */
   8394 		wm_tbi_serdes_set_linkled(sc);
   8395 	} else {
   8396 		DPRINTF(WM_DEBUG_LINK,
   8397 		    ("%s: LINK: Receive sequence error\n",
   8398 		    device_xname(sc->sc_dev)));
   8399 	}
   8400 }
   8401 
   8402 /*
   8403  * wm_linkintr:
   8404  *
   8405  *	Helper; handle link interrupts.
   8406  */
   8407 static void
   8408 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8409 {
   8410 
   8411 	KASSERT(WM_CORE_LOCKED(sc));
   8412 
   8413 	if (sc->sc_flags & WM_F_HAS_MII)
   8414 		wm_linkintr_gmii(sc, icr);
   8415 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8416 	    && (sc->sc_type >= WM_T_82575))
   8417 		wm_linkintr_serdes(sc, icr);
   8418 	else
   8419 		wm_linkintr_tbi(sc, icr);
   8420 }
   8421 
   8422 /*
   8423  * wm_intr_legacy:
   8424  *
   8425  *	Interrupt service routine for INTx and MSI.
   8426  */
   8427 static int
   8428 wm_intr_legacy(void *arg)
   8429 {
   8430 	struct wm_softc *sc = arg;
   8431 	struct wm_queue *wmq = &sc->sc_queue[0];
   8432 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8433 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8434 	uint32_t icr, rndval = 0;
   8435 	int handled = 0;
   8436 
   8437 	DPRINTF(WM_DEBUG_TX,
   8438 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   8439 	while (1 /* CONSTCOND */) {
   8440 		icr = CSR_READ(sc, WMREG_ICR);
   8441 		if ((icr & sc->sc_icr) == 0)
   8442 			break;
   8443 		if (rndval == 0)
   8444 			rndval = icr;
   8445 
   8446 		mutex_enter(rxq->rxq_lock);
   8447 
   8448 		if (rxq->rxq_stopping) {
   8449 			mutex_exit(rxq->rxq_lock);
   8450 			break;
   8451 		}
   8452 
   8453 		handled = 1;
   8454 
   8455 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8456 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8457 			DPRINTF(WM_DEBUG_RX,
   8458 			    ("%s: RX: got Rx intr 0x%08x\n",
   8459 			    device_xname(sc->sc_dev),
   8460 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8461 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8462 		}
   8463 #endif
   8464 		wm_rxeof(rxq, UINT_MAX);
   8465 
   8466 		mutex_exit(rxq->rxq_lock);
   8467 		mutex_enter(txq->txq_lock);
   8468 
   8469 		if (txq->txq_stopping) {
   8470 			mutex_exit(txq->txq_lock);
   8471 			break;
   8472 		}
   8473 
   8474 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8475 		if (icr & ICR_TXDW) {
   8476 			DPRINTF(WM_DEBUG_TX,
   8477 			    ("%s: TX: got TXDW interrupt\n",
   8478 			    device_xname(sc->sc_dev)));
   8479 			WM_Q_EVCNT_INCR(txq, txdw);
   8480 		}
   8481 #endif
   8482 		wm_txeof(sc, txq);
   8483 
   8484 		mutex_exit(txq->txq_lock);
   8485 		WM_CORE_LOCK(sc);
   8486 
   8487 		if (sc->sc_core_stopping) {
   8488 			WM_CORE_UNLOCK(sc);
   8489 			break;
   8490 		}
   8491 
   8492 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8493 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8494 			wm_linkintr(sc, icr);
   8495 		}
   8496 
   8497 		WM_CORE_UNLOCK(sc);
   8498 
   8499 		if (icr & ICR_RXO) {
   8500 #if defined(WM_DEBUG)
   8501 			log(LOG_WARNING, "%s: Receive overrun\n",
   8502 			    device_xname(sc->sc_dev));
   8503 #endif /* defined(WM_DEBUG) */
   8504 		}
   8505 	}
   8506 
   8507 	rnd_add_uint32(&sc->rnd_source, rndval);
   8508 
   8509 	if (handled) {
   8510 		/* Try to get more packets going. */
   8511 		softint_schedule(wmq->wmq_si);
   8512 	}
   8513 
   8514 	return handled;
   8515 }
   8516 
   8517 static inline void
   8518 wm_txrxintr_disable(struct wm_queue *wmq)
   8519 {
   8520 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8521 
   8522 	if (sc->sc_type == WM_T_82574)
   8523 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8524 	else if (sc->sc_type == WM_T_82575)
   8525 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8526 	else
   8527 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8528 }
   8529 
   8530 static inline void
   8531 wm_txrxintr_enable(struct wm_queue *wmq)
   8532 {
   8533 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8534 
   8535 	wm_itrs_calculate(sc, wmq);
   8536 
   8537 	if (sc->sc_type == WM_T_82574)
   8538 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8539 	else if (sc->sc_type == WM_T_82575)
   8540 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8541 	else
   8542 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8543 }
   8544 
   8545 static int
   8546 wm_txrxintr_msix(void *arg)
   8547 {
   8548 	struct wm_queue *wmq = arg;
   8549 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8550 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8551 	struct wm_softc *sc = txq->txq_sc;
   8552 	u_int limit = sc->sc_rx_intr_process_limit;
   8553 
   8554 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8555 
   8556 	DPRINTF(WM_DEBUG_TX,
   8557 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8558 
   8559 	wm_txrxintr_disable(wmq);
   8560 
   8561 	mutex_enter(txq->txq_lock);
   8562 
   8563 	if (txq->txq_stopping) {
   8564 		mutex_exit(txq->txq_lock);
   8565 		return 0;
   8566 	}
   8567 
   8568 	WM_Q_EVCNT_INCR(txq, txdw);
   8569 	wm_txeof(sc, txq);
   8570 	/* wm_deferred start() is done in wm_handle_queue(). */
   8571 	mutex_exit(txq->txq_lock);
   8572 
   8573 	DPRINTF(WM_DEBUG_RX,
   8574 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8575 	mutex_enter(rxq->rxq_lock);
   8576 
   8577 	if (rxq->rxq_stopping) {
   8578 		mutex_exit(rxq->rxq_lock);
   8579 		return 0;
   8580 	}
   8581 
   8582 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8583 	wm_rxeof(rxq, limit);
   8584 	mutex_exit(rxq->rxq_lock);
   8585 
   8586 	wm_itrs_writereg(sc, wmq);
   8587 
   8588 	softint_schedule(wmq->wmq_si);
   8589 
   8590 	return 1;
   8591 }
   8592 
   8593 static void
   8594 wm_handle_queue(void *arg)
   8595 {
   8596 	struct wm_queue *wmq = arg;
   8597 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8598 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8599 	struct wm_softc *sc = txq->txq_sc;
   8600 	u_int limit = sc->sc_rx_process_limit;
   8601 
   8602 	mutex_enter(txq->txq_lock);
   8603 	if (txq->txq_stopping) {
   8604 		mutex_exit(txq->txq_lock);
   8605 		return;
   8606 	}
   8607 	wm_txeof(sc, txq);
   8608 	wm_deferred_start_locked(txq);
   8609 	mutex_exit(txq->txq_lock);
   8610 
   8611 	mutex_enter(rxq->rxq_lock);
   8612 	if (rxq->rxq_stopping) {
   8613 		mutex_exit(rxq->rxq_lock);
   8614 		return;
   8615 	}
   8616 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8617 	wm_rxeof(rxq, limit);
   8618 	mutex_exit(rxq->rxq_lock);
   8619 
   8620 	wm_txrxintr_enable(wmq);
   8621 }
   8622 
   8623 /*
   8624  * wm_linkintr_msix:
   8625  *
   8626  *	Interrupt service routine for link status change for MSI-X.
   8627  */
   8628 static int
   8629 wm_linkintr_msix(void *arg)
   8630 {
   8631 	struct wm_softc *sc = arg;
   8632 	uint32_t reg;
   8633 
   8634 	DPRINTF(WM_DEBUG_LINK,
   8635 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8636 
   8637 	reg = CSR_READ(sc, WMREG_ICR);
   8638 	WM_CORE_LOCK(sc);
   8639 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8640 		goto out;
   8641 
   8642 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8643 	wm_linkintr(sc, ICR_LSC);
   8644 
   8645 out:
   8646 	WM_CORE_UNLOCK(sc);
   8647 
   8648 	if (sc->sc_type == WM_T_82574)
   8649 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8650 	else if (sc->sc_type == WM_T_82575)
   8651 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8652 	else
   8653 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8654 
   8655 	return 1;
   8656 }
   8657 
   8658 /*
   8659  * Media related.
   8660  * GMII, SGMII, TBI (and SERDES)
   8661  */
   8662 
   8663 /* Common */
   8664 
   8665 /*
   8666  * wm_tbi_serdes_set_linkled:
   8667  *
   8668  *	Update the link LED on TBI and SERDES devices.
   8669  */
   8670 static void
   8671 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8672 {
   8673 
   8674 	if (sc->sc_tbi_linkup)
   8675 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8676 	else
   8677 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8678 
   8679 	/* 82540 or newer devices are active low */
   8680 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8681 
   8682 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8683 }
   8684 
   8685 /* GMII related */
   8686 
   8687 /*
   8688  * wm_gmii_reset:
   8689  *
   8690  *	Reset the PHY.
   8691  */
   8692 static void
   8693 wm_gmii_reset(struct wm_softc *sc)
   8694 {
   8695 	uint32_t reg;
   8696 	int rv;
   8697 
   8698 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8699 		device_xname(sc->sc_dev), __func__));
   8700 
   8701 	rv = sc->phy.acquire(sc);
   8702 	if (rv != 0) {
   8703 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8704 		    __func__);
   8705 		return;
   8706 	}
   8707 
   8708 	switch (sc->sc_type) {
   8709 	case WM_T_82542_2_0:
   8710 	case WM_T_82542_2_1:
   8711 		/* null */
   8712 		break;
   8713 	case WM_T_82543:
   8714 		/*
   8715 		 * With 82543, we need to force speed and duplex on the MAC
   8716 		 * equal to what the PHY speed and duplex configuration is.
   8717 		 * In addition, we need to perform a hardware reset on the PHY
   8718 		 * to take it out of reset.
   8719 		 */
   8720 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8721 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8722 
   8723 		/* The PHY reset pin is active-low. */
   8724 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8725 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8726 		    CTRL_EXT_SWDPIN(4));
   8727 		reg |= CTRL_EXT_SWDPIO(4);
   8728 
   8729 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8730 		CSR_WRITE_FLUSH(sc);
   8731 		delay(10*1000);
   8732 
   8733 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8734 		CSR_WRITE_FLUSH(sc);
   8735 		delay(150);
   8736 #if 0
   8737 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8738 #endif
   8739 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8740 		break;
   8741 	case WM_T_82544:	/* reset 10000us */
   8742 	case WM_T_82540:
   8743 	case WM_T_82545:
   8744 	case WM_T_82545_3:
   8745 	case WM_T_82546:
   8746 	case WM_T_82546_3:
   8747 	case WM_T_82541:
   8748 	case WM_T_82541_2:
   8749 	case WM_T_82547:
   8750 	case WM_T_82547_2:
   8751 	case WM_T_82571:	/* reset 100us */
   8752 	case WM_T_82572:
   8753 	case WM_T_82573:
   8754 	case WM_T_82574:
   8755 	case WM_T_82575:
   8756 	case WM_T_82576:
   8757 	case WM_T_82580:
   8758 	case WM_T_I350:
   8759 	case WM_T_I354:
   8760 	case WM_T_I210:
   8761 	case WM_T_I211:
   8762 	case WM_T_82583:
   8763 	case WM_T_80003:
   8764 		/* generic reset */
   8765 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8766 		CSR_WRITE_FLUSH(sc);
   8767 		delay(20000);
   8768 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8769 		CSR_WRITE_FLUSH(sc);
   8770 		delay(20000);
   8771 
   8772 		if ((sc->sc_type == WM_T_82541)
   8773 		    || (sc->sc_type == WM_T_82541_2)
   8774 		    || (sc->sc_type == WM_T_82547)
   8775 		    || (sc->sc_type == WM_T_82547_2)) {
   8776 			/* workaround for igp are done in igp_reset() */
   8777 			/* XXX add code to set LED after phy reset */
   8778 		}
   8779 		break;
   8780 	case WM_T_ICH8:
   8781 	case WM_T_ICH9:
   8782 	case WM_T_ICH10:
   8783 	case WM_T_PCH:
   8784 	case WM_T_PCH2:
   8785 	case WM_T_PCH_LPT:
   8786 	case WM_T_PCH_SPT:
   8787 		/* generic reset */
   8788 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8789 		CSR_WRITE_FLUSH(sc);
   8790 		delay(100);
   8791 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8792 		CSR_WRITE_FLUSH(sc);
   8793 		delay(150);
   8794 		break;
   8795 	default:
   8796 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8797 		    __func__);
   8798 		break;
   8799 	}
   8800 
   8801 	sc->phy.release(sc);
   8802 
   8803 	/* get_cfg_done */
   8804 	wm_get_cfg_done(sc);
   8805 
   8806 	/* extra setup */
   8807 	switch (sc->sc_type) {
   8808 	case WM_T_82542_2_0:
   8809 	case WM_T_82542_2_1:
   8810 	case WM_T_82543:
   8811 	case WM_T_82544:
   8812 	case WM_T_82540:
   8813 	case WM_T_82545:
   8814 	case WM_T_82545_3:
   8815 	case WM_T_82546:
   8816 	case WM_T_82546_3:
   8817 	case WM_T_82541_2:
   8818 	case WM_T_82547_2:
   8819 	case WM_T_82571:
   8820 	case WM_T_82572:
   8821 	case WM_T_82573:
   8822 	case WM_T_82575:
   8823 	case WM_T_82576:
   8824 	case WM_T_82580:
   8825 	case WM_T_I350:
   8826 	case WM_T_I354:
   8827 	case WM_T_I210:
   8828 	case WM_T_I211:
   8829 	case WM_T_80003:
   8830 		/* null */
   8831 		break;
   8832 	case WM_T_82574:
   8833 	case WM_T_82583:
   8834 		wm_lplu_d0_disable(sc);
   8835 		break;
   8836 	case WM_T_82541:
   8837 	case WM_T_82547:
   8838 		/* XXX Configure actively LED after PHY reset */
   8839 		break;
   8840 	case WM_T_ICH8:
   8841 	case WM_T_ICH9:
   8842 	case WM_T_ICH10:
   8843 	case WM_T_PCH:
   8844 	case WM_T_PCH2:
   8845 	case WM_T_PCH_LPT:
   8846 	case WM_T_PCH_SPT:
   8847 		/* Allow time for h/w to get to a quiescent state afer reset */
   8848 		delay(10*1000);
   8849 
   8850 		if (sc->sc_type == WM_T_PCH)
   8851 			wm_hv_phy_workaround_ich8lan(sc);
   8852 
   8853 		if (sc->sc_type == WM_T_PCH2)
   8854 			wm_lv_phy_workaround_ich8lan(sc);
   8855 
   8856 		/* Clear the host wakeup bit after lcd reset */
   8857 		if (sc->sc_type >= WM_T_PCH) {
   8858 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8859 			    BM_PORT_GEN_CFG);
   8860 			reg &= ~BM_WUC_HOST_WU_BIT;
   8861 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8862 			    BM_PORT_GEN_CFG, reg);
   8863 		}
   8864 
   8865 		/*
   8866 		 * XXX Configure the LCD with th extended configuration region
   8867 		 * in NVM
   8868 		 */
   8869 
   8870 		/* Disable D0 LPLU. */
   8871 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8872 			wm_lplu_d0_disable_pch(sc);
   8873 		else
   8874 			wm_lplu_d0_disable(sc);	/* ICH* */
   8875 		break;
   8876 	default:
   8877 		panic("%s: unknown type\n", __func__);
   8878 		break;
   8879 	}
   8880 }
   8881 
   8882 /*
   8883  * Setup sc_phytype and mii_{read|write}reg.
   8884  *
   8885  *  To identify PHY type, correct read/write function should be selected.
   8886  * To select correct read/write function, PCI ID or MAC type are required
   8887  * without accessing PHY registers.
   8888  *
   8889  *  On the first call of this function, PHY ID is not known yet. Check
   8890  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   8891  * result might be incorrect.
   8892  *
   8893  *  In the second call, PHY OUI and model is used to identify PHY type.
   8894  * It might not be perfpect because of the lack of compared entry, but it
   8895  * would be better than the first call.
   8896  *
   8897  *  If the detected new result and previous assumption is different,
   8898  * diagnous message will be printed.
   8899  */
   8900 static void
   8901 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   8902     uint16_t phy_model)
   8903 {
   8904 	device_t dev = sc->sc_dev;
   8905 	struct mii_data *mii = &sc->sc_mii;
   8906 	uint16_t new_phytype = WMPHY_UNKNOWN;
   8907 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   8908 	mii_readreg_t new_readreg;
   8909 	mii_writereg_t new_writereg;
   8910 
   8911 	if (mii->mii_readreg == NULL) {
   8912 		/*
   8913 		 *  This is the first call of this function. For ICH and PCH
   8914 		 * variants, it's difficult to determine the PHY access method
   8915 		 * by sc_type, so use the PCI product ID for some devices.
   8916 		 */
   8917 
   8918 		switch (sc->sc_pcidevid) {
   8919 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   8920 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   8921 			/* 82577 */
   8922 			new_phytype = WMPHY_82577;
   8923 			break;
   8924 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   8925 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   8926 			/* 82578 */
   8927 			new_phytype = WMPHY_82578;
   8928 			break;
   8929 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8930 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8931 			/* 82579 */
   8932 			new_phytype = WMPHY_82579;
   8933 			break;
   8934 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8935 		case PCI_PRODUCT_INTEL_82801I_BM:
   8936 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   8937 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8938 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8939 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8940 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8941 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8942 			/* ICH8, 9, 10 with 82567 */
   8943 			new_phytype = WMPHY_BM;
   8944 			break;
   8945 		default:
   8946 			break;
   8947 		}
   8948 	} else {
   8949 		/* It's not the first call. Use PHY OUI and model */
   8950 		switch (phy_oui) {
   8951 		case MII_OUI_ATHEROS: /* XXX ??? */
   8952 			switch (phy_model) {
   8953 			case 0x0004: /* XXX */
   8954 				new_phytype = WMPHY_82578;
   8955 				break;
   8956 			default:
   8957 				break;
   8958 			}
   8959 			break;
   8960 		case MII_OUI_xxMARVELL:
   8961 			switch (phy_model) {
   8962 			case MII_MODEL_xxMARVELL_I210:
   8963 				new_phytype = WMPHY_I210;
   8964 				break;
   8965 			case MII_MODEL_xxMARVELL_E1011:
   8966 			case MII_MODEL_xxMARVELL_E1000_3:
   8967 			case MII_MODEL_xxMARVELL_E1000_5:
   8968 			case MII_MODEL_xxMARVELL_E1112:
   8969 				new_phytype = WMPHY_M88;
   8970 				break;
   8971 			case MII_MODEL_xxMARVELL_E1149:
   8972 				new_phytype = WMPHY_BM;
   8973 				break;
   8974 			case MII_MODEL_xxMARVELL_E1111:
   8975 			case MII_MODEL_xxMARVELL_I347:
   8976 			case MII_MODEL_xxMARVELL_E1512:
   8977 			case MII_MODEL_xxMARVELL_E1340M:
   8978 			case MII_MODEL_xxMARVELL_E1543:
   8979 				new_phytype = WMPHY_M88;
   8980 				break;
   8981 			case MII_MODEL_xxMARVELL_I82563:
   8982 				new_phytype = WMPHY_GG82563;
   8983 				break;
   8984 			default:
   8985 				break;
   8986 			}
   8987 			break;
   8988 		case MII_OUI_INTEL:
   8989 			switch (phy_model) {
   8990 			case MII_MODEL_INTEL_I82577:
   8991 				new_phytype = WMPHY_82577;
   8992 				break;
   8993 			case MII_MODEL_INTEL_I82579:
   8994 				new_phytype = WMPHY_82579;
   8995 				break;
   8996 			case MII_MODEL_INTEL_I217:
   8997 				new_phytype = WMPHY_I217;
   8998 				break;
   8999 			case MII_MODEL_INTEL_I82580:
   9000 			case MII_MODEL_INTEL_I350:
   9001 				new_phytype = WMPHY_82580;
   9002 				break;
   9003 			default:
   9004 				break;
   9005 			}
   9006 			break;
   9007 		case MII_OUI_yyINTEL:
   9008 			switch (phy_model) {
   9009 			case MII_MODEL_yyINTEL_I82562G:
   9010 			case MII_MODEL_yyINTEL_I82562EM:
   9011 			case MII_MODEL_yyINTEL_I82562ET:
   9012 				new_phytype = WMPHY_IFE;
   9013 				break;
   9014 			case MII_MODEL_yyINTEL_IGP01E1000:
   9015 				new_phytype = WMPHY_IGP;
   9016 				break;
   9017 			case MII_MODEL_yyINTEL_I82566:
   9018 				new_phytype = WMPHY_IGP_3;
   9019 				break;
   9020 			default:
   9021 				break;
   9022 			}
   9023 			break;
   9024 		default:
   9025 			break;
   9026 		}
   9027 		if (new_phytype == WMPHY_UNKNOWN)
   9028 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9029 			    __func__);
   9030 
   9031 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9032 		    && (sc->sc_phytype != new_phytype )) {
   9033 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9034 			    "was incorrect. PHY type from PHY ID = %u\n",
   9035 			    sc->sc_phytype, new_phytype);
   9036 		}
   9037 	}
   9038 
   9039 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9040 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9041 		/* SGMII */
   9042 		new_readreg = wm_sgmii_readreg;
   9043 		new_writereg = wm_sgmii_writereg;
   9044 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9045 		/* BM2 (phyaddr == 1) */
   9046 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9047 		    && (new_phytype != WMPHY_BM)
   9048 		    && (new_phytype != WMPHY_UNKNOWN))
   9049 			doubt_phytype = new_phytype;
   9050 		new_phytype = WMPHY_BM;
   9051 		new_readreg = wm_gmii_bm_readreg;
   9052 		new_writereg = wm_gmii_bm_writereg;
   9053 	} else if (sc->sc_type >= WM_T_PCH) {
   9054 		/* All PCH* use _hv_ */
   9055 		new_readreg = wm_gmii_hv_readreg;
   9056 		new_writereg = wm_gmii_hv_writereg;
   9057 	} else if (sc->sc_type >= WM_T_ICH8) {
   9058 		/* non-82567 ICH8, 9 and 10 */
   9059 		new_readreg = wm_gmii_i82544_readreg;
   9060 		new_writereg = wm_gmii_i82544_writereg;
   9061 	} else if (sc->sc_type >= WM_T_80003) {
   9062 		/* 80003 */
   9063 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9064 		    && (new_phytype != WMPHY_GG82563)
   9065 		    && (new_phytype != WMPHY_UNKNOWN))
   9066 			doubt_phytype = new_phytype;
   9067 		new_phytype = WMPHY_GG82563;
   9068 		new_readreg = wm_gmii_i80003_readreg;
   9069 		new_writereg = wm_gmii_i80003_writereg;
   9070 	} else if (sc->sc_type >= WM_T_I210) {
   9071 		/* I210 and I211 */
   9072 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9073 		    && (new_phytype != WMPHY_I210)
   9074 		    && (new_phytype != WMPHY_UNKNOWN))
   9075 			doubt_phytype = new_phytype;
   9076 		new_phytype = WMPHY_I210;
   9077 		new_readreg = wm_gmii_gs40g_readreg;
   9078 		new_writereg = wm_gmii_gs40g_writereg;
   9079 	} else if (sc->sc_type >= WM_T_82580) {
   9080 		/* 82580, I350 and I354 */
   9081 		new_readreg = wm_gmii_82580_readreg;
   9082 		new_writereg = wm_gmii_82580_writereg;
   9083 	} else if (sc->sc_type >= WM_T_82544) {
   9084 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9085 		new_readreg = wm_gmii_i82544_readreg;
   9086 		new_writereg = wm_gmii_i82544_writereg;
   9087 	} else {
   9088 		new_readreg = wm_gmii_i82543_readreg;
   9089 		new_writereg = wm_gmii_i82543_writereg;
   9090 	}
   9091 
   9092 	if (new_phytype == WMPHY_BM) {
   9093 		/* All BM use _bm_ */
   9094 		new_readreg = wm_gmii_bm_readreg;
   9095 		new_writereg = wm_gmii_bm_writereg;
   9096 	}
   9097 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9098 		/* All PCH* use _hv_ */
   9099 		new_readreg = wm_gmii_hv_readreg;
   9100 		new_writereg = wm_gmii_hv_writereg;
   9101 	}
   9102 
   9103 	/* Diag output */
   9104 	if (doubt_phytype != WMPHY_UNKNOWN)
   9105 		aprint_error_dev(dev, "Assumed new PHY type was "
   9106 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9107 		    new_phytype);
   9108 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9109 	    && (sc->sc_phytype != new_phytype ))
   9110 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9111 		    "was incorrect. New PHY type = %u\n",
   9112 		    sc->sc_phytype, new_phytype);
   9113 
   9114 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9115 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9116 
   9117 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9118 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9119 		    "function was incorrect.\n");
   9120 
   9121 	/* Update now */
   9122 	sc->sc_phytype = new_phytype;
   9123 	mii->mii_readreg = new_readreg;
   9124 	mii->mii_writereg = new_writereg;
   9125 }
   9126 
   9127 /*
   9128  * wm_get_phy_id_82575:
   9129  *
   9130  * Return PHY ID. Return -1 if it failed.
   9131  */
   9132 static int
   9133 wm_get_phy_id_82575(struct wm_softc *sc)
   9134 {
   9135 	uint32_t reg;
   9136 	int phyid = -1;
   9137 
   9138 	/* XXX */
   9139 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9140 		return -1;
   9141 
   9142 	if (wm_sgmii_uses_mdio(sc)) {
   9143 		switch (sc->sc_type) {
   9144 		case WM_T_82575:
   9145 		case WM_T_82576:
   9146 			reg = CSR_READ(sc, WMREG_MDIC);
   9147 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9148 			break;
   9149 		case WM_T_82580:
   9150 		case WM_T_I350:
   9151 		case WM_T_I354:
   9152 		case WM_T_I210:
   9153 		case WM_T_I211:
   9154 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9155 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9156 			break;
   9157 		default:
   9158 			return -1;
   9159 		}
   9160 	}
   9161 
   9162 	return phyid;
   9163 }
   9164 
   9165 
   9166 /*
   9167  * wm_gmii_mediainit:
   9168  *
   9169  *	Initialize media for use on 1000BASE-T devices.
   9170  */
   9171 static void
   9172 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9173 {
   9174 	device_t dev = sc->sc_dev;
   9175 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9176 	struct mii_data *mii = &sc->sc_mii;
   9177 	uint32_t reg;
   9178 
   9179 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9180 		device_xname(sc->sc_dev), __func__));
   9181 
   9182 	/* We have GMII. */
   9183 	sc->sc_flags |= WM_F_HAS_MII;
   9184 
   9185 	if (sc->sc_type == WM_T_80003)
   9186 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9187 	else
   9188 		sc->sc_tipg = TIPG_1000T_DFLT;
   9189 
   9190 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9191 	if ((sc->sc_type == WM_T_82580)
   9192 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9193 	    || (sc->sc_type == WM_T_I211)) {
   9194 		reg = CSR_READ(sc, WMREG_PHPM);
   9195 		reg &= ~PHPM_GO_LINK_D;
   9196 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9197 	}
   9198 
   9199 	/*
   9200 	 * Let the chip set speed/duplex on its own based on
   9201 	 * signals from the PHY.
   9202 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9203 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9204 	 */
   9205 	sc->sc_ctrl |= CTRL_SLU;
   9206 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9207 
   9208 	/* Initialize our media structures and probe the GMII. */
   9209 	mii->mii_ifp = ifp;
   9210 
   9211 	/*
   9212 	 * The first call of wm_mii_setup_phytype. The result might be
   9213 	 * incorrect.
   9214 	 */
   9215 	wm_gmii_setup_phytype(sc, 0, 0);
   9216 
   9217 	mii->mii_statchg = wm_gmii_statchg;
   9218 
   9219 	/* get PHY control from SMBus to PCIe */
   9220 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9221 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9222 		wm_smbustopci(sc);
   9223 
   9224 	wm_gmii_reset(sc);
   9225 
   9226 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9227 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9228 	    wm_gmii_mediastatus);
   9229 
   9230 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9231 	    || (sc->sc_type == WM_T_82580)
   9232 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9233 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9234 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9235 			/* Attach only one port */
   9236 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9237 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9238 		} else {
   9239 			int i, id;
   9240 			uint32_t ctrl_ext;
   9241 
   9242 			id = wm_get_phy_id_82575(sc);
   9243 			if (id != -1) {
   9244 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9245 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9246 			}
   9247 			if ((id == -1)
   9248 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9249 				/* Power on sgmii phy if it is disabled */
   9250 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9251 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9252 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9253 				CSR_WRITE_FLUSH(sc);
   9254 				delay(300*1000); /* XXX too long */
   9255 
   9256 				/* from 1 to 8 */
   9257 				for (i = 1; i < 8; i++)
   9258 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9259 					    0xffffffff, i, MII_OFFSET_ANY,
   9260 					    MIIF_DOPAUSE);
   9261 
   9262 				/* restore previous sfp cage power state */
   9263 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9264 			}
   9265 		}
   9266 	} else {
   9267 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9268 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9269 	}
   9270 
   9271 	/*
   9272 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9273 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9274 	 */
   9275 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9276 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9277 		wm_set_mdio_slow_mode_hv(sc);
   9278 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9279 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9280 	}
   9281 
   9282 	/*
   9283 	 * (For ICH8 variants)
   9284 	 * If PHY detection failed, use BM's r/w function and retry.
   9285 	 */
   9286 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9287 		/* if failed, retry with *_bm_* */
   9288 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9289 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9290 		    sc->sc_phytype);
   9291 		sc->sc_phytype = WMPHY_BM;
   9292 		mii->mii_readreg = wm_gmii_bm_readreg;
   9293 		mii->mii_writereg = wm_gmii_bm_writereg;
   9294 
   9295 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9296 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9297 	}
   9298 
   9299 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9300 		/* Any PHY wasn't find */
   9301 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9302 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9303 		sc->sc_phytype = WMPHY_NONE;
   9304 	} else {
   9305 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9306 
   9307 		/*
   9308 		 * PHY Found! Check PHY type again by the second call of
   9309 		 * wm_mii_setup_phytype.
   9310 		 */
   9311 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9312 		    child->mii_mpd_model);
   9313 
   9314 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9315 	}
   9316 }
   9317 
   9318 /*
   9319  * wm_gmii_mediachange:	[ifmedia interface function]
   9320  *
   9321  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9322  */
   9323 static int
   9324 wm_gmii_mediachange(struct ifnet *ifp)
   9325 {
   9326 	struct wm_softc *sc = ifp->if_softc;
   9327 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9328 	int rc;
   9329 
   9330 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9331 		device_xname(sc->sc_dev), __func__));
   9332 	if ((ifp->if_flags & IFF_UP) == 0)
   9333 		return 0;
   9334 
   9335 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9336 	sc->sc_ctrl |= CTRL_SLU;
   9337 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9338 	    || (sc->sc_type > WM_T_82543)) {
   9339 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9340 	} else {
   9341 		sc->sc_ctrl &= ~CTRL_ASDE;
   9342 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9343 		if (ife->ifm_media & IFM_FDX)
   9344 			sc->sc_ctrl |= CTRL_FD;
   9345 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9346 		case IFM_10_T:
   9347 			sc->sc_ctrl |= CTRL_SPEED_10;
   9348 			break;
   9349 		case IFM_100_TX:
   9350 			sc->sc_ctrl |= CTRL_SPEED_100;
   9351 			break;
   9352 		case IFM_1000_T:
   9353 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9354 			break;
   9355 		default:
   9356 			panic("wm_gmii_mediachange: bad media 0x%x",
   9357 			    ife->ifm_media);
   9358 		}
   9359 	}
   9360 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9361 	if (sc->sc_type <= WM_T_82543)
   9362 		wm_gmii_reset(sc);
   9363 
   9364 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9365 		return 0;
   9366 	return rc;
   9367 }
   9368 
   9369 /*
   9370  * wm_gmii_mediastatus:	[ifmedia interface function]
   9371  *
   9372  *	Get the current interface media status on a 1000BASE-T device.
   9373  */
   9374 static void
   9375 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9376 {
   9377 	struct wm_softc *sc = ifp->if_softc;
   9378 
   9379 	ether_mediastatus(ifp, ifmr);
   9380 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9381 	    | sc->sc_flowflags;
   9382 }
   9383 
   9384 #define	MDI_IO		CTRL_SWDPIN(2)
   9385 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9386 #define	MDI_CLK		CTRL_SWDPIN(3)
   9387 
   9388 static void
   9389 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9390 {
   9391 	uint32_t i, v;
   9392 
   9393 	v = CSR_READ(sc, WMREG_CTRL);
   9394 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9395 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9396 
   9397 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9398 		if (data & i)
   9399 			v |= MDI_IO;
   9400 		else
   9401 			v &= ~MDI_IO;
   9402 		CSR_WRITE(sc, WMREG_CTRL, v);
   9403 		CSR_WRITE_FLUSH(sc);
   9404 		delay(10);
   9405 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9406 		CSR_WRITE_FLUSH(sc);
   9407 		delay(10);
   9408 		CSR_WRITE(sc, WMREG_CTRL, v);
   9409 		CSR_WRITE_FLUSH(sc);
   9410 		delay(10);
   9411 	}
   9412 }
   9413 
   9414 static uint32_t
   9415 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9416 {
   9417 	uint32_t v, i, data = 0;
   9418 
   9419 	v = CSR_READ(sc, WMREG_CTRL);
   9420 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9421 	v |= CTRL_SWDPIO(3);
   9422 
   9423 	CSR_WRITE(sc, WMREG_CTRL, v);
   9424 	CSR_WRITE_FLUSH(sc);
   9425 	delay(10);
   9426 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9427 	CSR_WRITE_FLUSH(sc);
   9428 	delay(10);
   9429 	CSR_WRITE(sc, WMREG_CTRL, v);
   9430 	CSR_WRITE_FLUSH(sc);
   9431 	delay(10);
   9432 
   9433 	for (i = 0; i < 16; i++) {
   9434 		data <<= 1;
   9435 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9436 		CSR_WRITE_FLUSH(sc);
   9437 		delay(10);
   9438 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9439 			data |= 1;
   9440 		CSR_WRITE(sc, WMREG_CTRL, v);
   9441 		CSR_WRITE_FLUSH(sc);
   9442 		delay(10);
   9443 	}
   9444 
   9445 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9446 	CSR_WRITE_FLUSH(sc);
   9447 	delay(10);
   9448 	CSR_WRITE(sc, WMREG_CTRL, v);
   9449 	CSR_WRITE_FLUSH(sc);
   9450 	delay(10);
   9451 
   9452 	return data;
   9453 }
   9454 
   9455 #undef MDI_IO
   9456 #undef MDI_DIR
   9457 #undef MDI_CLK
   9458 
   9459 /*
   9460  * wm_gmii_i82543_readreg:	[mii interface function]
   9461  *
   9462  *	Read a PHY register on the GMII (i82543 version).
   9463  */
   9464 static int
   9465 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   9466 {
   9467 	struct wm_softc *sc = device_private(self);
   9468 	int rv;
   9469 
   9470 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9471 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9472 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9473 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9474 
   9475 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9476 	    device_xname(sc->sc_dev), phy, reg, rv));
   9477 
   9478 	return rv;
   9479 }
   9480 
   9481 /*
   9482  * wm_gmii_i82543_writereg:	[mii interface function]
   9483  *
   9484  *	Write a PHY register on the GMII (i82543 version).
   9485  */
   9486 static void
   9487 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   9488 {
   9489 	struct wm_softc *sc = device_private(self);
   9490 
   9491 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9492 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9493 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9494 	    (MII_COMMAND_START << 30), 32);
   9495 }
   9496 
   9497 /*
   9498  * wm_gmii_mdic_readreg:	[mii interface function]
   9499  *
   9500  *	Read a PHY register on the GMII.
   9501  */
   9502 static int
   9503 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   9504 {
   9505 	struct wm_softc *sc = device_private(self);
   9506 	uint32_t mdic = 0;
   9507 	int i, rv;
   9508 
   9509 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9510 	    MDIC_REGADD(reg));
   9511 
   9512 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9513 		mdic = CSR_READ(sc, WMREG_MDIC);
   9514 		if (mdic & MDIC_READY)
   9515 			break;
   9516 		delay(50);
   9517 	}
   9518 
   9519 	if ((mdic & MDIC_READY) == 0) {
   9520 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9521 		    device_xname(sc->sc_dev), phy, reg);
   9522 		rv = 0;
   9523 	} else if (mdic & MDIC_E) {
   9524 #if 0 /* This is normal if no PHY is present. */
   9525 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9526 		    device_xname(sc->sc_dev), phy, reg);
   9527 #endif
   9528 		rv = 0;
   9529 	} else {
   9530 		rv = MDIC_DATA(mdic);
   9531 		if (rv == 0xffff)
   9532 			rv = 0;
   9533 	}
   9534 
   9535 	return rv;
   9536 }
   9537 
   9538 /*
   9539  * wm_gmii_mdic_writereg:	[mii interface function]
   9540  *
   9541  *	Write a PHY register on the GMII.
   9542  */
   9543 static void
   9544 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   9545 {
   9546 	struct wm_softc *sc = device_private(self);
   9547 	uint32_t mdic = 0;
   9548 	int i;
   9549 
   9550 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9551 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9552 
   9553 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9554 		mdic = CSR_READ(sc, WMREG_MDIC);
   9555 		if (mdic & MDIC_READY)
   9556 			break;
   9557 		delay(50);
   9558 	}
   9559 
   9560 	if ((mdic & MDIC_READY) == 0)
   9561 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9562 		    device_xname(sc->sc_dev), phy, reg);
   9563 	else if (mdic & MDIC_E)
   9564 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9565 		    device_xname(sc->sc_dev), phy, reg);
   9566 }
   9567 
   9568 /*
   9569  * wm_gmii_i82544_readreg:	[mii interface function]
   9570  *
   9571  *	Read a PHY register on the GMII.
   9572  */
   9573 static int
   9574 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   9575 {
   9576 	struct wm_softc *sc = device_private(self);
   9577 	int rv;
   9578 
   9579 	if (sc->phy.acquire(sc)) {
   9580 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9581 		    __func__);
   9582 		return 0;
   9583 	}
   9584 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9585 	sc->phy.release(sc);
   9586 
   9587 	return rv;
   9588 }
   9589 
   9590 /*
   9591  * wm_gmii_i82544_writereg:	[mii interface function]
   9592  *
   9593  *	Write a PHY register on the GMII.
   9594  */
   9595 static void
   9596 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   9597 {
   9598 	struct wm_softc *sc = device_private(self);
   9599 
   9600 	if (sc->phy.acquire(sc)) {
   9601 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9602 		    __func__);
   9603 	}
   9604 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9605 	sc->phy.release(sc);
   9606 }
   9607 
   9608 /*
   9609  * wm_gmii_i80003_readreg:	[mii interface function]
   9610  *
   9611  *	Read a PHY register on the kumeran
   9612  * This could be handled by the PHY layer if we didn't have to lock the
   9613  * ressource ...
   9614  */
   9615 static int
   9616 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   9617 {
   9618 	struct wm_softc *sc = device_private(self);
   9619 	int rv;
   9620 
   9621 	if (phy != 1) /* only one PHY on kumeran bus */
   9622 		return 0;
   9623 
   9624 	if (sc->phy.acquire(sc)) {
   9625 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9626 		    __func__);
   9627 		return 0;
   9628 	}
   9629 
   9630 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9631 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9632 		    reg >> GG82563_PAGE_SHIFT);
   9633 	} else {
   9634 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9635 		    reg >> GG82563_PAGE_SHIFT);
   9636 	}
   9637 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9638 	delay(200);
   9639 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9640 	delay(200);
   9641 	sc->phy.release(sc);
   9642 
   9643 	return rv;
   9644 }
   9645 
   9646 /*
   9647  * wm_gmii_i80003_writereg:	[mii interface function]
   9648  *
   9649  *	Write a PHY register on the kumeran.
   9650  * This could be handled by the PHY layer if we didn't have to lock the
   9651  * ressource ...
   9652  */
   9653 static void
   9654 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   9655 {
   9656 	struct wm_softc *sc = device_private(self);
   9657 
   9658 	if (phy != 1) /* only one PHY on kumeran bus */
   9659 		return;
   9660 
   9661 	if (sc->phy.acquire(sc)) {
   9662 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9663 		    __func__);
   9664 		return;
   9665 	}
   9666 
   9667 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9668 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9669 		    reg >> GG82563_PAGE_SHIFT);
   9670 	} else {
   9671 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9672 		    reg >> GG82563_PAGE_SHIFT);
   9673 	}
   9674 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9675 	delay(200);
   9676 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9677 	delay(200);
   9678 
   9679 	sc->phy.release(sc);
   9680 }
   9681 
   9682 /*
   9683  * wm_gmii_bm_readreg:	[mii interface function]
   9684  *
   9685  *	Read a PHY register on the kumeran
   9686  * This could be handled by the PHY layer if we didn't have to lock the
   9687  * ressource ...
   9688  */
   9689 static int
   9690 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   9691 {
   9692 	struct wm_softc *sc = device_private(self);
   9693 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9694 	uint16_t val;
   9695 	int rv;
   9696 
   9697 	if (sc->phy.acquire(sc)) {
   9698 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9699 		    __func__);
   9700 		return 0;
   9701 	}
   9702 
   9703 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9704 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9705 		    || (reg == 31)) ? 1 : phy;
   9706 	/* Page 800 works differently than the rest so it has its own func */
   9707 	if (page == BM_WUC_PAGE) {
   9708 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9709 		rv = val;
   9710 		goto release;
   9711 	}
   9712 
   9713 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9714 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9715 		    && (sc->sc_type != WM_T_82583))
   9716 			wm_gmii_mdic_writereg(self, phy,
   9717 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9718 		else
   9719 			wm_gmii_mdic_writereg(self, phy,
   9720 			    BME1000_PHY_PAGE_SELECT, page);
   9721 	}
   9722 
   9723 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9724 
   9725 release:
   9726 	sc->phy.release(sc);
   9727 	return rv;
   9728 }
   9729 
   9730 /*
   9731  * wm_gmii_bm_writereg:	[mii interface function]
   9732  *
   9733  *	Write a PHY register on the kumeran.
   9734  * This could be handled by the PHY layer if we didn't have to lock the
   9735  * ressource ...
   9736  */
   9737 static void
   9738 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   9739 {
   9740 	struct wm_softc *sc = device_private(self);
   9741 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9742 
   9743 	if (sc->phy.acquire(sc)) {
   9744 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9745 		    __func__);
   9746 		return;
   9747 	}
   9748 
   9749 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9750 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9751 		    || (reg == 31)) ? 1 : phy;
   9752 	/* Page 800 works differently than the rest so it has its own func */
   9753 	if (page == BM_WUC_PAGE) {
   9754 		uint16_t tmp;
   9755 
   9756 		tmp = val;
   9757 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9758 		goto release;
   9759 	}
   9760 
   9761 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9762 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9763 		    && (sc->sc_type != WM_T_82583))
   9764 			wm_gmii_mdic_writereg(self, phy,
   9765 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9766 		else
   9767 			wm_gmii_mdic_writereg(self, phy,
   9768 			    BME1000_PHY_PAGE_SELECT, page);
   9769 	}
   9770 
   9771 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9772 
   9773 release:
   9774 	sc->phy.release(sc);
   9775 }
   9776 
   9777 static void
   9778 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   9779 {
   9780 	struct wm_softc *sc = device_private(self);
   9781 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9782 	uint16_t wuce, reg;
   9783 
   9784 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9785 		device_xname(sc->sc_dev), __func__));
   9786 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9787 	if (sc->sc_type == WM_T_PCH) {
   9788 		/* XXX e1000 driver do nothing... why? */
   9789 	}
   9790 
   9791 	/*
   9792 	 * 1) Enable PHY wakeup register first.
   9793 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9794 	 */
   9795 
   9796 	/* Set page 769 */
   9797 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9798 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9799 
   9800 	/* Read WUCE and save it */
   9801 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9802 
   9803 	reg = wuce | BM_WUC_ENABLE_BIT;
   9804 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9805 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9806 
   9807 	/* Select page 800 */
   9808 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9809 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9810 
   9811 	/*
   9812 	 * 2) Access PHY wakeup register.
   9813 	 * See e1000_access_phy_wakeup_reg_bm.
   9814 	 */
   9815 
   9816 	/* Write page 800 */
   9817 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9818 
   9819 	if (rd)
   9820 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9821 	else
   9822 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9823 
   9824 	/*
   9825 	 * 3) Disable PHY wakeup register.
   9826 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9827 	 */
   9828 	/* Set page 769 */
   9829 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9830 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9831 
   9832 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9833 }
   9834 
   9835 /*
   9836  * wm_gmii_hv_readreg:	[mii interface function]
   9837  *
   9838  *	Read a PHY register on the kumeran
   9839  * This could be handled by the PHY layer if we didn't have to lock the
   9840  * ressource ...
   9841  */
   9842 static int
   9843 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9844 {
   9845 	struct wm_softc *sc = device_private(self);
   9846 	int rv;
   9847 
   9848 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9849 		device_xname(sc->sc_dev), __func__));
   9850 	if (sc->phy.acquire(sc)) {
   9851 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9852 		    __func__);
   9853 		return 0;
   9854 	}
   9855 
   9856 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9857 	sc->phy.release(sc);
   9858 	return rv;
   9859 }
   9860 
   9861 static int
   9862 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9863 {
   9864 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9865 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9866 	uint16_t val;
   9867 	int rv;
   9868 
   9869 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9870 
   9871 	/* Page 800 works differently than the rest so it has its own func */
   9872 	if (page == BM_WUC_PAGE) {
   9873 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9874 		return val;
   9875 	}
   9876 
   9877 	/*
   9878 	 * Lower than page 768 works differently than the rest so it has its
   9879 	 * own func
   9880 	 */
   9881 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9882 		printf("gmii_hv_readreg!!!\n");
   9883 		return 0;
   9884 	}
   9885 
   9886 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9887 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9888 		    page << BME1000_PAGE_SHIFT);
   9889 	}
   9890 
   9891 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9892 	return rv;
   9893 }
   9894 
   9895 /*
   9896  * wm_gmii_hv_writereg:	[mii interface function]
   9897  *
   9898  *	Write a PHY register on the kumeran.
   9899  * This could be handled by the PHY layer if we didn't have to lock the
   9900  * ressource ...
   9901  */
   9902 static void
   9903 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9904 {
   9905 	struct wm_softc *sc = device_private(self);
   9906 
   9907 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9908 		device_xname(sc->sc_dev), __func__));
   9909 
   9910 	if (sc->phy.acquire(sc)) {
   9911 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9912 		    __func__);
   9913 		return;
   9914 	}
   9915 
   9916 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   9917 	sc->phy.release(sc);
   9918 }
   9919 
   9920 static void
   9921 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   9922 {
   9923 	struct wm_softc *sc = device_private(self);
   9924 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9925 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9926 
   9927 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9928 
   9929 	/* Page 800 works differently than the rest so it has its own func */
   9930 	if (page == BM_WUC_PAGE) {
   9931 		uint16_t tmp;
   9932 
   9933 		tmp = val;
   9934 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9935 		return;
   9936 	}
   9937 
   9938 	/*
   9939 	 * Lower than page 768 works differently than the rest so it has its
   9940 	 * own func
   9941 	 */
   9942 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9943 		printf("gmii_hv_writereg!!!\n");
   9944 		return;
   9945 	}
   9946 
   9947 	{
   9948 		/*
   9949 		 * XXX Workaround MDIO accesses being disabled after entering
   9950 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9951 		 * register is set)
   9952 		 */
   9953 		if (sc->sc_phytype == WMPHY_82578) {
   9954 			struct mii_softc *child;
   9955 
   9956 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9957 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   9958 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   9959 			    && ((val & (1 << 11)) != 0)) {
   9960 				printf("XXX need workaround\n");
   9961 			}
   9962 		}
   9963 
   9964 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9965 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9966 			    page << BME1000_PAGE_SHIFT);
   9967 		}
   9968 	}
   9969 
   9970 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   9971 }
   9972 
   9973 /*
   9974  * wm_gmii_82580_readreg:	[mii interface function]
   9975  *
   9976  *	Read a PHY register on the 82580 and I350.
   9977  * This could be handled by the PHY layer if we didn't have to lock the
   9978  * ressource ...
   9979  */
   9980 static int
   9981 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   9982 {
   9983 	struct wm_softc *sc = device_private(self);
   9984 	int rv;
   9985 
   9986 	if (sc->phy.acquire(sc) != 0) {
   9987 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9988 		    __func__);
   9989 		return 0;
   9990 	}
   9991 
   9992 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9993 
   9994 	sc->phy.release(sc);
   9995 	return rv;
   9996 }
   9997 
   9998 /*
   9999  * wm_gmii_82580_writereg:	[mii interface function]
   10000  *
   10001  *	Write a PHY register on the 82580 and I350.
   10002  * This could be handled by the PHY layer if we didn't have to lock the
   10003  * ressource ...
   10004  */
   10005 static void
   10006 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   10007 {
   10008 	struct wm_softc *sc = device_private(self);
   10009 
   10010 	if (sc->phy.acquire(sc) != 0) {
   10011 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10012 		    __func__);
   10013 		return;
   10014 	}
   10015 
   10016 	wm_gmii_mdic_writereg(self, phy, reg, val);
   10017 
   10018 	sc->phy.release(sc);
   10019 }
   10020 
   10021 /*
   10022  * wm_gmii_gs40g_readreg:	[mii interface function]
   10023  *
   10024  *	Read a PHY register on the I2100 and I211.
   10025  * This could be handled by the PHY layer if we didn't have to lock the
   10026  * ressource ...
   10027  */
   10028 static int
   10029 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   10030 {
   10031 	struct wm_softc *sc = device_private(self);
   10032 	int page, offset;
   10033 	int rv;
   10034 
   10035 	/* Acquire semaphore */
   10036 	if (sc->phy.acquire(sc)) {
   10037 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10038 		    __func__);
   10039 		return 0;
   10040 	}
   10041 
   10042 	/* Page select */
   10043 	page = reg >> GS40G_PAGE_SHIFT;
   10044 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10045 
   10046 	/* Read reg */
   10047 	offset = reg & GS40G_OFFSET_MASK;
   10048 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   10049 
   10050 	sc->phy.release(sc);
   10051 	return rv;
   10052 }
   10053 
   10054 /*
   10055  * wm_gmii_gs40g_writereg:	[mii interface function]
   10056  *
   10057  *	Write a PHY register on the I210 and I211.
   10058  * This could be handled by the PHY layer if we didn't have to lock the
   10059  * ressource ...
   10060  */
   10061 static void
   10062 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   10063 {
   10064 	struct wm_softc *sc = device_private(self);
   10065 	int page, offset;
   10066 
   10067 	/* Acquire semaphore */
   10068 	if (sc->phy.acquire(sc)) {
   10069 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10070 		    __func__);
   10071 		return;
   10072 	}
   10073 
   10074 	/* Page select */
   10075 	page = reg >> GS40G_PAGE_SHIFT;
   10076 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10077 
   10078 	/* Write reg */
   10079 	offset = reg & GS40G_OFFSET_MASK;
   10080 	wm_gmii_mdic_writereg(self, phy, offset, val);
   10081 
   10082 	/* Release semaphore */
   10083 	sc->phy.release(sc);
   10084 }
   10085 
   10086 /*
   10087  * wm_gmii_statchg:	[mii interface function]
   10088  *
   10089  *	Callback from MII layer when media changes.
   10090  */
   10091 static void
   10092 wm_gmii_statchg(struct ifnet *ifp)
   10093 {
   10094 	struct wm_softc *sc = ifp->if_softc;
   10095 	struct mii_data *mii = &sc->sc_mii;
   10096 
   10097 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10098 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10099 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10100 
   10101 	/*
   10102 	 * Get flow control negotiation result.
   10103 	 */
   10104 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10105 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10106 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10107 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10108 	}
   10109 
   10110 	if (sc->sc_flowflags & IFM_FLOW) {
   10111 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10112 			sc->sc_ctrl |= CTRL_TFCE;
   10113 			sc->sc_fcrtl |= FCRTL_XONE;
   10114 		}
   10115 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10116 			sc->sc_ctrl |= CTRL_RFCE;
   10117 	}
   10118 
   10119 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10120 		DPRINTF(WM_DEBUG_LINK,
   10121 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10122 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10123 	} else {
   10124 		DPRINTF(WM_DEBUG_LINK,
   10125 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10126 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10127 	}
   10128 
   10129 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10130 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10131 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10132 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10133 	if (sc->sc_type == WM_T_80003) {
   10134 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10135 		case IFM_1000_T:
   10136 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10137 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10138 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10139 			break;
   10140 		default:
   10141 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10142 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10143 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10144 			break;
   10145 		}
   10146 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10147 	}
   10148 }
   10149 
   10150 /* kumeran related (80003, ICH* and PCH*) */
   10151 
   10152 /*
   10153  * wm_kmrn_readreg:
   10154  *
   10155  *	Read a kumeran register
   10156  */
   10157 static int
   10158 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   10159 {
   10160 	int rv;
   10161 
   10162 	if (sc->sc_type == WM_T_80003)
   10163 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10164 	else
   10165 		rv = sc->phy.acquire(sc);
   10166 	if (rv != 0) {
   10167 		aprint_error_dev(sc->sc_dev,
   10168 		    "%s: failed to get semaphore\n", __func__);
   10169 		return 0;
   10170 	}
   10171 
   10172 	rv = wm_kmrn_readreg_locked(sc, reg);
   10173 
   10174 	if (sc->sc_type == WM_T_80003)
   10175 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10176 	else
   10177 		sc->phy.release(sc);
   10178 
   10179 	return rv;
   10180 }
   10181 
   10182 static int
   10183 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   10184 {
   10185 	int rv;
   10186 
   10187 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10188 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10189 	    KUMCTRLSTA_REN);
   10190 	CSR_WRITE_FLUSH(sc);
   10191 	delay(2);
   10192 
   10193 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10194 
   10195 	return rv;
   10196 }
   10197 
   10198 /*
   10199  * wm_kmrn_writereg:
   10200  *
   10201  *	Write a kumeran register
   10202  */
   10203 static void
   10204 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   10205 {
   10206 	int rv;
   10207 
   10208 	if (sc->sc_type == WM_T_80003)
   10209 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10210 	else
   10211 		rv = sc->phy.acquire(sc);
   10212 	if (rv != 0) {
   10213 		aprint_error_dev(sc->sc_dev,
   10214 		    "%s: failed to get semaphore\n", __func__);
   10215 		return;
   10216 	}
   10217 
   10218 	wm_kmrn_writereg_locked(sc, reg, val);
   10219 
   10220 	if (sc->sc_type == WM_T_80003)
   10221 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10222 	else
   10223 		sc->phy.release(sc);
   10224 }
   10225 
   10226 static void
   10227 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   10228 {
   10229 
   10230 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10231 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10232 	    (val & KUMCTRLSTA_MASK));
   10233 }
   10234 
   10235 /* SGMII related */
   10236 
   10237 /*
   10238  * wm_sgmii_uses_mdio
   10239  *
   10240  * Check whether the transaction is to the internal PHY or the external
   10241  * MDIO interface. Return true if it's MDIO.
   10242  */
   10243 static bool
   10244 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10245 {
   10246 	uint32_t reg;
   10247 	bool ismdio = false;
   10248 
   10249 	switch (sc->sc_type) {
   10250 	case WM_T_82575:
   10251 	case WM_T_82576:
   10252 		reg = CSR_READ(sc, WMREG_MDIC);
   10253 		ismdio = ((reg & MDIC_DEST) != 0);
   10254 		break;
   10255 	case WM_T_82580:
   10256 	case WM_T_I350:
   10257 	case WM_T_I354:
   10258 	case WM_T_I210:
   10259 	case WM_T_I211:
   10260 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10261 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10262 		break;
   10263 	default:
   10264 		break;
   10265 	}
   10266 
   10267 	return ismdio;
   10268 }
   10269 
   10270 /*
   10271  * wm_sgmii_readreg:	[mii interface function]
   10272  *
   10273  *	Read a PHY register on the SGMII
   10274  * This could be handled by the PHY layer if we didn't have to lock the
   10275  * ressource ...
   10276  */
   10277 static int
   10278 wm_sgmii_readreg(device_t self, int phy, int reg)
   10279 {
   10280 	struct wm_softc *sc = device_private(self);
   10281 	uint32_t i2ccmd;
   10282 	int i, rv;
   10283 
   10284 	if (sc->phy.acquire(sc)) {
   10285 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10286 		    __func__);
   10287 		return 0;
   10288 	}
   10289 
   10290 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10291 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10292 	    | I2CCMD_OPCODE_READ;
   10293 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10294 
   10295 	/* Poll the ready bit */
   10296 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10297 		delay(50);
   10298 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10299 		if (i2ccmd & I2CCMD_READY)
   10300 			break;
   10301 	}
   10302 	if ((i2ccmd & I2CCMD_READY) == 0)
   10303 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   10304 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10305 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10306 
   10307 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10308 
   10309 	sc->phy.release(sc);
   10310 	return rv;
   10311 }
   10312 
   10313 /*
   10314  * wm_sgmii_writereg:	[mii interface function]
   10315  *
   10316  *	Write a PHY register on the SGMII.
   10317  * This could be handled by the PHY layer if we didn't have to lock the
   10318  * ressource ...
   10319  */
   10320 static void
   10321 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   10322 {
   10323 	struct wm_softc *sc = device_private(self);
   10324 	uint32_t i2ccmd;
   10325 	int i;
   10326 	int val_swapped;
   10327 
   10328 	if (sc->phy.acquire(sc) != 0) {
   10329 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10330 		    __func__);
   10331 		return;
   10332 	}
   10333 	/* Swap the data bytes for the I2C interface */
   10334 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10335 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10336 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10337 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10338 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10339 
   10340 	/* Poll the ready bit */
   10341 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10342 		delay(50);
   10343 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10344 		if (i2ccmd & I2CCMD_READY)
   10345 			break;
   10346 	}
   10347 	if ((i2ccmd & I2CCMD_READY) == 0)
   10348 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   10349 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10350 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10351 
   10352 	sc->phy.release(sc);
   10353 }
   10354 
   10355 /* TBI related */
   10356 
   10357 /*
   10358  * wm_tbi_mediainit:
   10359  *
   10360  *	Initialize media for use on 1000BASE-X devices.
   10361  */
   10362 static void
   10363 wm_tbi_mediainit(struct wm_softc *sc)
   10364 {
   10365 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10366 	const char *sep = "";
   10367 
   10368 	if (sc->sc_type < WM_T_82543)
   10369 		sc->sc_tipg = TIPG_WM_DFLT;
   10370 	else
   10371 		sc->sc_tipg = TIPG_LG_DFLT;
   10372 
   10373 	sc->sc_tbi_serdes_anegticks = 5;
   10374 
   10375 	/* Initialize our media structures */
   10376 	sc->sc_mii.mii_ifp = ifp;
   10377 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10378 
   10379 	if ((sc->sc_type >= WM_T_82575)
   10380 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10381 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10382 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10383 	else
   10384 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10385 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10386 
   10387 	/*
   10388 	 * SWD Pins:
   10389 	 *
   10390 	 *	0 = Link LED (output)
   10391 	 *	1 = Loss Of Signal (input)
   10392 	 */
   10393 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10394 
   10395 	/* XXX Perhaps this is only for TBI */
   10396 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10397 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10398 
   10399 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10400 		sc->sc_ctrl &= ~CTRL_LRST;
   10401 
   10402 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10403 
   10404 #define	ADD(ss, mm, dd)							\
   10405 do {									\
   10406 	aprint_normal("%s%s", sep, ss);					\
   10407 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10408 	sep = ", ";							\
   10409 } while (/*CONSTCOND*/0)
   10410 
   10411 	aprint_normal_dev(sc->sc_dev, "");
   10412 
   10413 	if (sc->sc_type == WM_T_I354) {
   10414 		uint32_t status;
   10415 
   10416 		status = CSR_READ(sc, WMREG_STATUS);
   10417 		if (((status & STATUS_2P5_SKU) != 0)
   10418 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10419 			ADD("2500baseKX-FDX", IFM_2500_SX | IFM_FDX,ANAR_X_FD);
   10420 		} else
   10421 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX,ANAR_X_FD);
   10422 	} else if (sc->sc_type == WM_T_82545) {
   10423 		/* Only 82545 is LX (XXX except SFP) */
   10424 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10425 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10426 	} else {
   10427 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10428 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10429 	}
   10430 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10431 	aprint_normal("\n");
   10432 
   10433 #undef ADD
   10434 
   10435 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10436 }
   10437 
   10438 /*
   10439  * wm_tbi_mediachange:	[ifmedia interface function]
   10440  *
   10441  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10442  */
   10443 static int
   10444 wm_tbi_mediachange(struct ifnet *ifp)
   10445 {
   10446 	struct wm_softc *sc = ifp->if_softc;
   10447 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10448 	uint32_t status;
   10449 	int i;
   10450 
   10451 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10452 		/* XXX need some work for >= 82571 and < 82575 */
   10453 		if (sc->sc_type < WM_T_82575)
   10454 			return 0;
   10455 	}
   10456 
   10457 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10458 	    || (sc->sc_type >= WM_T_82575))
   10459 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10460 
   10461 	sc->sc_ctrl &= ~CTRL_LRST;
   10462 	sc->sc_txcw = TXCW_ANE;
   10463 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10464 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10465 	else if (ife->ifm_media & IFM_FDX)
   10466 		sc->sc_txcw |= TXCW_FD;
   10467 	else
   10468 		sc->sc_txcw |= TXCW_HD;
   10469 
   10470 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10471 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10472 
   10473 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10474 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10475 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10476 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10477 	CSR_WRITE_FLUSH(sc);
   10478 	delay(1000);
   10479 
   10480 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10481 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10482 
   10483 	/*
   10484 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10485 	 * optics detect a signal, 0 if they don't.
   10486 	 */
   10487 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10488 		/* Have signal; wait for the link to come up. */
   10489 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10490 			delay(10000);
   10491 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10492 				break;
   10493 		}
   10494 
   10495 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10496 			    device_xname(sc->sc_dev),i));
   10497 
   10498 		status = CSR_READ(sc, WMREG_STATUS);
   10499 		DPRINTF(WM_DEBUG_LINK,
   10500 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10501 			device_xname(sc->sc_dev),status, STATUS_LU));
   10502 		if (status & STATUS_LU) {
   10503 			/* Link is up. */
   10504 			DPRINTF(WM_DEBUG_LINK,
   10505 			    ("%s: LINK: set media -> link up %s\n",
   10506 			    device_xname(sc->sc_dev),
   10507 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10508 
   10509 			/*
   10510 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10511 			 * so we should update sc->sc_ctrl
   10512 			 */
   10513 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10514 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10515 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10516 			if (status & STATUS_FD)
   10517 				sc->sc_tctl |=
   10518 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10519 			else
   10520 				sc->sc_tctl |=
   10521 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10522 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10523 				sc->sc_fcrtl |= FCRTL_XONE;
   10524 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10525 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10526 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10527 				      sc->sc_fcrtl);
   10528 			sc->sc_tbi_linkup = 1;
   10529 		} else {
   10530 			if (i == WM_LINKUP_TIMEOUT)
   10531 				wm_check_for_link(sc);
   10532 			/* Link is down. */
   10533 			DPRINTF(WM_DEBUG_LINK,
   10534 			    ("%s: LINK: set media -> link down\n",
   10535 			    device_xname(sc->sc_dev)));
   10536 			sc->sc_tbi_linkup = 0;
   10537 		}
   10538 	} else {
   10539 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10540 		    device_xname(sc->sc_dev)));
   10541 		sc->sc_tbi_linkup = 0;
   10542 	}
   10543 
   10544 	wm_tbi_serdes_set_linkled(sc);
   10545 
   10546 	return 0;
   10547 }
   10548 
   10549 /*
   10550  * wm_tbi_mediastatus:	[ifmedia interface function]
   10551  *
   10552  *	Get the current interface media status on a 1000BASE-X device.
   10553  */
   10554 static void
   10555 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10556 {
   10557 	struct wm_softc *sc = ifp->if_softc;
   10558 	uint32_t ctrl, status;
   10559 
   10560 	ifmr->ifm_status = IFM_AVALID;
   10561 	ifmr->ifm_active = IFM_ETHER;
   10562 
   10563 	status = CSR_READ(sc, WMREG_STATUS);
   10564 	if ((status & STATUS_LU) == 0) {
   10565 		ifmr->ifm_active |= IFM_NONE;
   10566 		return;
   10567 	}
   10568 
   10569 	ifmr->ifm_status |= IFM_ACTIVE;
   10570 	/* Only 82545 is LX */
   10571 	if (sc->sc_type == WM_T_82545)
   10572 		ifmr->ifm_active |= IFM_1000_LX;
   10573 	else
   10574 		ifmr->ifm_active |= IFM_1000_SX;
   10575 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10576 		ifmr->ifm_active |= IFM_FDX;
   10577 	else
   10578 		ifmr->ifm_active |= IFM_HDX;
   10579 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10580 	if (ctrl & CTRL_RFCE)
   10581 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10582 	if (ctrl & CTRL_TFCE)
   10583 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10584 }
   10585 
   10586 /* XXX TBI only */
   10587 static int
   10588 wm_check_for_link(struct wm_softc *sc)
   10589 {
   10590 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10591 	uint32_t rxcw;
   10592 	uint32_t ctrl;
   10593 	uint32_t status;
   10594 	uint32_t sig;
   10595 
   10596 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10597 		/* XXX need some work for >= 82571 */
   10598 		if (sc->sc_type >= WM_T_82571) {
   10599 			sc->sc_tbi_linkup = 1;
   10600 			return 0;
   10601 		}
   10602 	}
   10603 
   10604 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10605 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10606 	status = CSR_READ(sc, WMREG_STATUS);
   10607 
   10608 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10609 
   10610 	DPRINTF(WM_DEBUG_LINK,
   10611 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10612 		device_xname(sc->sc_dev), __func__,
   10613 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10614 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10615 
   10616 	/*
   10617 	 * SWDPIN   LU RXCW
   10618 	 *      0    0    0
   10619 	 *      0    0    1	(should not happen)
   10620 	 *      0    1    0	(should not happen)
   10621 	 *      0    1    1	(should not happen)
   10622 	 *      1    0    0	Disable autonego and force linkup
   10623 	 *      1    0    1	got /C/ but not linkup yet
   10624 	 *      1    1    0	(linkup)
   10625 	 *      1    1    1	If IFM_AUTO, back to autonego
   10626 	 *
   10627 	 */
   10628 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10629 	    && ((status & STATUS_LU) == 0)
   10630 	    && ((rxcw & RXCW_C) == 0)) {
   10631 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10632 			__func__));
   10633 		sc->sc_tbi_linkup = 0;
   10634 		/* Disable auto-negotiation in the TXCW register */
   10635 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10636 
   10637 		/*
   10638 		 * Force link-up and also force full-duplex.
   10639 		 *
   10640 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10641 		 * so we should update sc->sc_ctrl
   10642 		 */
   10643 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10644 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10645 	} else if (((status & STATUS_LU) != 0)
   10646 	    && ((rxcw & RXCW_C) != 0)
   10647 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10648 		sc->sc_tbi_linkup = 1;
   10649 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10650 			__func__));
   10651 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10652 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10653 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10654 	    && ((rxcw & RXCW_C) != 0)) {
   10655 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10656 	} else {
   10657 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10658 			status));
   10659 	}
   10660 
   10661 	return 0;
   10662 }
   10663 
   10664 /*
   10665  * wm_tbi_tick:
   10666  *
   10667  *	Check the link on TBI devices.
   10668  *	This function acts as mii_tick().
   10669  */
   10670 static void
   10671 wm_tbi_tick(struct wm_softc *sc)
   10672 {
   10673 	struct mii_data *mii = &sc->sc_mii;
   10674 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10675 	uint32_t status;
   10676 
   10677 	KASSERT(WM_CORE_LOCKED(sc));
   10678 
   10679 	status = CSR_READ(sc, WMREG_STATUS);
   10680 
   10681 	/* XXX is this needed? */
   10682 	(void)CSR_READ(sc, WMREG_RXCW);
   10683 	(void)CSR_READ(sc, WMREG_CTRL);
   10684 
   10685 	/* set link status */
   10686 	if ((status & STATUS_LU) == 0) {
   10687 		DPRINTF(WM_DEBUG_LINK,
   10688 		    ("%s: LINK: checklink -> down\n",
   10689 			device_xname(sc->sc_dev)));
   10690 		sc->sc_tbi_linkup = 0;
   10691 	} else if (sc->sc_tbi_linkup == 0) {
   10692 		DPRINTF(WM_DEBUG_LINK,
   10693 		    ("%s: LINK: checklink -> up %s\n",
   10694 			device_xname(sc->sc_dev),
   10695 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10696 		sc->sc_tbi_linkup = 1;
   10697 		sc->sc_tbi_serdes_ticks = 0;
   10698 	}
   10699 
   10700 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   10701 		goto setled;
   10702 
   10703 	if ((status & STATUS_LU) == 0) {
   10704 		sc->sc_tbi_linkup = 0;
   10705 		/* If the timer expired, retry autonegotiation */
   10706 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10707 		    && (++sc->sc_tbi_serdes_ticks
   10708 			>= sc->sc_tbi_serdes_anegticks)) {
   10709 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10710 			sc->sc_tbi_serdes_ticks = 0;
   10711 			/*
   10712 			 * Reset the link, and let autonegotiation do
   10713 			 * its thing
   10714 			 */
   10715 			sc->sc_ctrl |= CTRL_LRST;
   10716 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10717 			CSR_WRITE_FLUSH(sc);
   10718 			delay(1000);
   10719 			sc->sc_ctrl &= ~CTRL_LRST;
   10720 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10721 			CSR_WRITE_FLUSH(sc);
   10722 			delay(1000);
   10723 			CSR_WRITE(sc, WMREG_TXCW,
   10724 			    sc->sc_txcw & ~TXCW_ANE);
   10725 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10726 		}
   10727 	}
   10728 
   10729 setled:
   10730 	wm_tbi_serdes_set_linkled(sc);
   10731 }
   10732 
   10733 /* SERDES related */
   10734 static void
   10735 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10736 {
   10737 	uint32_t reg;
   10738 
   10739 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10740 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   10741 		return;
   10742 
   10743 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   10744 	reg |= PCS_CFG_PCS_EN;
   10745 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   10746 
   10747 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10748 	reg &= ~CTRL_EXT_SWDPIN(3);
   10749 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10750 	CSR_WRITE_FLUSH(sc);
   10751 }
   10752 
   10753 static int
   10754 wm_serdes_mediachange(struct ifnet *ifp)
   10755 {
   10756 	struct wm_softc *sc = ifp->if_softc;
   10757 	bool pcs_autoneg = true; /* XXX */
   10758 	uint32_t ctrl_ext, pcs_lctl, reg;
   10759 
   10760 	/* XXX Currently, this function is not called on 8257[12] */
   10761 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10762 	    || (sc->sc_type >= WM_T_82575))
   10763 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10764 
   10765 	wm_serdes_power_up_link_82575(sc);
   10766 
   10767 	sc->sc_ctrl |= CTRL_SLU;
   10768 
   10769 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10770 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10771 
   10772 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10773 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10774 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10775 	case CTRL_EXT_LINK_MODE_SGMII:
   10776 		pcs_autoneg = true;
   10777 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10778 		break;
   10779 	case CTRL_EXT_LINK_MODE_1000KX:
   10780 		pcs_autoneg = false;
   10781 		/* FALLTHROUGH */
   10782 	default:
   10783 		if ((sc->sc_type == WM_T_82575)
   10784 		    || (sc->sc_type == WM_T_82576)) {
   10785 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10786 				pcs_autoneg = false;
   10787 		}
   10788 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10789 		    | CTRL_FRCFDX;
   10790 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10791 	}
   10792 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10793 
   10794 	if (pcs_autoneg) {
   10795 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10796 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10797 
   10798 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10799 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10800 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10801 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10802 	} else
   10803 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10804 
   10805 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10806 
   10807 
   10808 	return 0;
   10809 }
   10810 
   10811 static void
   10812 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10813 {
   10814 	struct wm_softc *sc = ifp->if_softc;
   10815 	struct mii_data *mii = &sc->sc_mii;
   10816 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10817 	uint32_t pcs_adv, pcs_lpab, reg;
   10818 
   10819 	ifmr->ifm_status = IFM_AVALID;
   10820 	ifmr->ifm_active = IFM_ETHER;
   10821 
   10822 	/* Check PCS */
   10823 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10824 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10825 		ifmr->ifm_active |= IFM_NONE;
   10826 		sc->sc_tbi_linkup = 0;
   10827 		goto setled;
   10828 	}
   10829 
   10830 	sc->sc_tbi_linkup = 1;
   10831 	ifmr->ifm_status |= IFM_ACTIVE;
   10832 	if (sc->sc_type == WM_T_I354) {
   10833 		uint32_t status;
   10834 
   10835 		status = CSR_READ(sc, WMREG_STATUS);
   10836 		if (((status & STATUS_2P5_SKU) != 0)
   10837 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10838 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   10839 		} else
   10840 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   10841 	} else {
   10842 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   10843 		case PCS_LSTS_SPEED_10:
   10844 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   10845 			break;
   10846 		case PCS_LSTS_SPEED_100:
   10847 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   10848 			break;
   10849 		case PCS_LSTS_SPEED_1000:
   10850 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10851 			break;
   10852 		default:
   10853 			device_printf(sc->sc_dev, "Unknown speed\n");
   10854 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10855 			break;
   10856 		}
   10857 	}
   10858 	if ((reg & PCS_LSTS_FDX) != 0)
   10859 		ifmr->ifm_active |= IFM_FDX;
   10860 	else
   10861 		ifmr->ifm_active |= IFM_HDX;
   10862 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10863 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10864 		/* Check flow */
   10865 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10866 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10867 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10868 			goto setled;
   10869 		}
   10870 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10871 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10872 		DPRINTF(WM_DEBUG_LINK,
   10873 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10874 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10875 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10876 			mii->mii_media_active |= IFM_FLOW
   10877 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10878 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10879 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10880 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10881 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10882 			mii->mii_media_active |= IFM_FLOW
   10883 			    | IFM_ETH_TXPAUSE;
   10884 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10885 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10886 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10887 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10888 			mii->mii_media_active |= IFM_FLOW
   10889 			    | IFM_ETH_RXPAUSE;
   10890 		}
   10891 	}
   10892 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10893 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10894 setled:
   10895 	wm_tbi_serdes_set_linkled(sc);
   10896 }
   10897 
   10898 /*
   10899  * wm_serdes_tick:
   10900  *
   10901  *	Check the link on serdes devices.
   10902  */
   10903 static void
   10904 wm_serdes_tick(struct wm_softc *sc)
   10905 {
   10906 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10907 	struct mii_data *mii = &sc->sc_mii;
   10908 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10909 	uint32_t reg;
   10910 
   10911 	KASSERT(WM_CORE_LOCKED(sc));
   10912 
   10913 	mii->mii_media_status = IFM_AVALID;
   10914 	mii->mii_media_active = IFM_ETHER;
   10915 
   10916 	/* Check PCS */
   10917 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10918 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   10919 		mii->mii_media_status |= IFM_ACTIVE;
   10920 		sc->sc_tbi_linkup = 1;
   10921 		sc->sc_tbi_serdes_ticks = 0;
   10922 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   10923 		if ((reg & PCS_LSTS_FDX) != 0)
   10924 			mii->mii_media_active |= IFM_FDX;
   10925 		else
   10926 			mii->mii_media_active |= IFM_HDX;
   10927 	} else {
   10928 		mii->mii_media_status |= IFM_NONE;
   10929 		sc->sc_tbi_linkup = 0;
   10930 		/* If the timer expired, retry autonegotiation */
   10931 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10932 		    && (++sc->sc_tbi_serdes_ticks
   10933 			>= sc->sc_tbi_serdes_anegticks)) {
   10934 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10935 			sc->sc_tbi_serdes_ticks = 0;
   10936 			/* XXX */
   10937 			wm_serdes_mediachange(ifp);
   10938 		}
   10939 	}
   10940 
   10941 	wm_tbi_serdes_set_linkled(sc);
   10942 }
   10943 
   10944 /* SFP related */
   10945 
   10946 static int
   10947 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   10948 {
   10949 	uint32_t i2ccmd;
   10950 	int i;
   10951 
   10952 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10953 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10954 
   10955 	/* Poll the ready bit */
   10956 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10957 		delay(50);
   10958 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10959 		if (i2ccmd & I2CCMD_READY)
   10960 			break;
   10961 	}
   10962 	if ((i2ccmd & I2CCMD_READY) == 0)
   10963 		return -1;
   10964 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10965 		return -1;
   10966 
   10967 	*data = i2ccmd & 0x00ff;
   10968 
   10969 	return 0;
   10970 }
   10971 
   10972 static uint32_t
   10973 wm_sfp_get_media_type(struct wm_softc *sc)
   10974 {
   10975 	uint32_t ctrl_ext;
   10976 	uint8_t val = 0;
   10977 	int timeout = 3;
   10978 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   10979 	int rv = -1;
   10980 
   10981 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10982 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   10983 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   10984 	CSR_WRITE_FLUSH(sc);
   10985 
   10986 	/* Read SFP module data */
   10987 	while (timeout) {
   10988 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   10989 		if (rv == 0)
   10990 			break;
   10991 		delay(100*1000); /* XXX too big */
   10992 		timeout--;
   10993 	}
   10994 	if (rv != 0)
   10995 		goto out;
   10996 	switch (val) {
   10997 	case SFF_SFP_ID_SFF:
   10998 		aprint_normal_dev(sc->sc_dev,
   10999 		    "Module/Connector soldered to board\n");
   11000 		break;
   11001 	case SFF_SFP_ID_SFP:
   11002 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11003 		break;
   11004 	case SFF_SFP_ID_UNKNOWN:
   11005 		goto out;
   11006 	default:
   11007 		break;
   11008 	}
   11009 
   11010 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11011 	if (rv != 0) {
   11012 		goto out;
   11013 	}
   11014 
   11015 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11016 		mediatype = WM_MEDIATYPE_SERDES;
   11017 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11018 		sc->sc_flags |= WM_F_SGMII;
   11019 		mediatype = WM_MEDIATYPE_COPPER;
   11020 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11021 		sc->sc_flags |= WM_F_SGMII;
   11022 		mediatype = WM_MEDIATYPE_SERDES;
   11023 	}
   11024 
   11025 out:
   11026 	/* Restore I2C interface setting */
   11027 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11028 
   11029 	return mediatype;
   11030 }
   11031 
   11032 /*
   11033  * NVM related.
   11034  * Microwire, SPI (w/wo EERD) and Flash.
   11035  */
   11036 
   11037 /* Both spi and uwire */
   11038 
   11039 /*
   11040  * wm_eeprom_sendbits:
   11041  *
   11042  *	Send a series of bits to the EEPROM.
   11043  */
   11044 static void
   11045 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11046 {
   11047 	uint32_t reg;
   11048 	int x;
   11049 
   11050 	reg = CSR_READ(sc, WMREG_EECD);
   11051 
   11052 	for (x = nbits; x > 0; x--) {
   11053 		if (bits & (1U << (x - 1)))
   11054 			reg |= EECD_DI;
   11055 		else
   11056 			reg &= ~EECD_DI;
   11057 		CSR_WRITE(sc, WMREG_EECD, reg);
   11058 		CSR_WRITE_FLUSH(sc);
   11059 		delay(2);
   11060 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11061 		CSR_WRITE_FLUSH(sc);
   11062 		delay(2);
   11063 		CSR_WRITE(sc, WMREG_EECD, reg);
   11064 		CSR_WRITE_FLUSH(sc);
   11065 		delay(2);
   11066 	}
   11067 }
   11068 
   11069 /*
   11070  * wm_eeprom_recvbits:
   11071  *
   11072  *	Receive a series of bits from the EEPROM.
   11073  */
   11074 static void
   11075 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11076 {
   11077 	uint32_t reg, val;
   11078 	int x;
   11079 
   11080 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11081 
   11082 	val = 0;
   11083 	for (x = nbits; x > 0; x--) {
   11084 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11085 		CSR_WRITE_FLUSH(sc);
   11086 		delay(2);
   11087 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11088 			val |= (1U << (x - 1));
   11089 		CSR_WRITE(sc, WMREG_EECD, reg);
   11090 		CSR_WRITE_FLUSH(sc);
   11091 		delay(2);
   11092 	}
   11093 	*valp = val;
   11094 }
   11095 
   11096 /* Microwire */
   11097 
   11098 /*
   11099  * wm_nvm_read_uwire:
   11100  *
   11101  *	Read a word from the EEPROM using the MicroWire protocol.
   11102  */
   11103 static int
   11104 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11105 {
   11106 	uint32_t reg, val;
   11107 	int i;
   11108 
   11109 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11110 		device_xname(sc->sc_dev), __func__));
   11111 
   11112 	for (i = 0; i < wordcnt; i++) {
   11113 		/* Clear SK and DI. */
   11114 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11115 		CSR_WRITE(sc, WMREG_EECD, reg);
   11116 
   11117 		/*
   11118 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11119 		 * and Xen.
   11120 		 *
   11121 		 * We use this workaround only for 82540 because qemu's
   11122 		 * e1000 act as 82540.
   11123 		 */
   11124 		if (sc->sc_type == WM_T_82540) {
   11125 			reg |= EECD_SK;
   11126 			CSR_WRITE(sc, WMREG_EECD, reg);
   11127 			reg &= ~EECD_SK;
   11128 			CSR_WRITE(sc, WMREG_EECD, reg);
   11129 			CSR_WRITE_FLUSH(sc);
   11130 			delay(2);
   11131 		}
   11132 		/* XXX: end of workaround */
   11133 
   11134 		/* Set CHIP SELECT. */
   11135 		reg |= EECD_CS;
   11136 		CSR_WRITE(sc, WMREG_EECD, reg);
   11137 		CSR_WRITE_FLUSH(sc);
   11138 		delay(2);
   11139 
   11140 		/* Shift in the READ command. */
   11141 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11142 
   11143 		/* Shift in address. */
   11144 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11145 
   11146 		/* Shift out the data. */
   11147 		wm_eeprom_recvbits(sc, &val, 16);
   11148 		data[i] = val & 0xffff;
   11149 
   11150 		/* Clear CHIP SELECT. */
   11151 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11152 		CSR_WRITE(sc, WMREG_EECD, reg);
   11153 		CSR_WRITE_FLUSH(sc);
   11154 		delay(2);
   11155 	}
   11156 
   11157 	return 0;
   11158 }
   11159 
   11160 /* SPI */
   11161 
   11162 /*
   11163  * Set SPI and FLASH related information from the EECD register.
   11164  * For 82541 and 82547, the word size is taken from EEPROM.
   11165  */
   11166 static int
   11167 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11168 {
   11169 	int size;
   11170 	uint32_t reg;
   11171 	uint16_t data;
   11172 
   11173 	reg = CSR_READ(sc, WMREG_EECD);
   11174 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11175 
   11176 	/* Read the size of NVM from EECD by default */
   11177 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11178 	switch (sc->sc_type) {
   11179 	case WM_T_82541:
   11180 	case WM_T_82541_2:
   11181 	case WM_T_82547:
   11182 	case WM_T_82547_2:
   11183 		/* Set dummy value to access EEPROM */
   11184 		sc->sc_nvm_wordsize = 64;
   11185 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11186 		reg = data;
   11187 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11188 		if (size == 0)
   11189 			size = 6; /* 64 word size */
   11190 		else
   11191 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11192 		break;
   11193 	case WM_T_80003:
   11194 	case WM_T_82571:
   11195 	case WM_T_82572:
   11196 	case WM_T_82573: /* SPI case */
   11197 	case WM_T_82574: /* SPI case */
   11198 	case WM_T_82583: /* SPI case */
   11199 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11200 		if (size > 14)
   11201 			size = 14;
   11202 		break;
   11203 	case WM_T_82575:
   11204 	case WM_T_82576:
   11205 	case WM_T_82580:
   11206 	case WM_T_I350:
   11207 	case WM_T_I354:
   11208 	case WM_T_I210:
   11209 	case WM_T_I211:
   11210 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11211 		if (size > 15)
   11212 			size = 15;
   11213 		break;
   11214 	default:
   11215 		aprint_error_dev(sc->sc_dev,
   11216 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11217 		return -1;
   11218 		break;
   11219 	}
   11220 
   11221 	sc->sc_nvm_wordsize = 1 << size;
   11222 
   11223 	return 0;
   11224 }
   11225 
   11226 /*
   11227  * wm_nvm_ready_spi:
   11228  *
   11229  *	Wait for a SPI EEPROM to be ready for commands.
   11230  */
   11231 static int
   11232 wm_nvm_ready_spi(struct wm_softc *sc)
   11233 {
   11234 	uint32_t val;
   11235 	int usec;
   11236 
   11237 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11238 		device_xname(sc->sc_dev), __func__));
   11239 
   11240 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11241 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11242 		wm_eeprom_recvbits(sc, &val, 8);
   11243 		if ((val & SPI_SR_RDY) == 0)
   11244 			break;
   11245 	}
   11246 	if (usec >= SPI_MAX_RETRIES) {
   11247 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11248 		return 1;
   11249 	}
   11250 	return 0;
   11251 }
   11252 
   11253 /*
   11254  * wm_nvm_read_spi:
   11255  *
   11256  *	Read a work from the EEPROM using the SPI protocol.
   11257  */
   11258 static int
   11259 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11260 {
   11261 	uint32_t reg, val;
   11262 	int i;
   11263 	uint8_t opc;
   11264 
   11265 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11266 		device_xname(sc->sc_dev), __func__));
   11267 
   11268 	/* Clear SK and CS. */
   11269 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11270 	CSR_WRITE(sc, WMREG_EECD, reg);
   11271 	CSR_WRITE_FLUSH(sc);
   11272 	delay(2);
   11273 
   11274 	if (wm_nvm_ready_spi(sc))
   11275 		return 1;
   11276 
   11277 	/* Toggle CS to flush commands. */
   11278 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11279 	CSR_WRITE_FLUSH(sc);
   11280 	delay(2);
   11281 	CSR_WRITE(sc, WMREG_EECD, reg);
   11282 	CSR_WRITE_FLUSH(sc);
   11283 	delay(2);
   11284 
   11285 	opc = SPI_OPC_READ;
   11286 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11287 		opc |= SPI_OPC_A8;
   11288 
   11289 	wm_eeprom_sendbits(sc, opc, 8);
   11290 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11291 
   11292 	for (i = 0; i < wordcnt; i++) {
   11293 		wm_eeprom_recvbits(sc, &val, 16);
   11294 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11295 	}
   11296 
   11297 	/* Raise CS and clear SK. */
   11298 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11299 	CSR_WRITE(sc, WMREG_EECD, reg);
   11300 	CSR_WRITE_FLUSH(sc);
   11301 	delay(2);
   11302 
   11303 	return 0;
   11304 }
   11305 
   11306 /* Using with EERD */
   11307 
   11308 static int
   11309 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11310 {
   11311 	uint32_t attempts = 100000;
   11312 	uint32_t i, reg = 0;
   11313 	int32_t done = -1;
   11314 
   11315 	for (i = 0; i < attempts; i++) {
   11316 		reg = CSR_READ(sc, rw);
   11317 
   11318 		if (reg & EERD_DONE) {
   11319 			done = 0;
   11320 			break;
   11321 		}
   11322 		delay(5);
   11323 	}
   11324 
   11325 	return done;
   11326 }
   11327 
   11328 static int
   11329 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11330     uint16_t *data)
   11331 {
   11332 	int i, eerd = 0;
   11333 	int error = 0;
   11334 
   11335 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11336 		device_xname(sc->sc_dev), __func__));
   11337 
   11338 	for (i = 0; i < wordcnt; i++) {
   11339 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11340 
   11341 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11342 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11343 		if (error != 0)
   11344 			break;
   11345 
   11346 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11347 	}
   11348 
   11349 	return error;
   11350 }
   11351 
   11352 /* Flash */
   11353 
   11354 static int
   11355 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11356 {
   11357 	uint32_t eecd;
   11358 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11359 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11360 	uint8_t sig_byte = 0;
   11361 
   11362 	switch (sc->sc_type) {
   11363 	case WM_T_PCH_SPT:
   11364 		/*
   11365 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11366 		 * sector valid bits from the NVM.
   11367 		 */
   11368 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11369 		if ((*bank == 0) || (*bank == 1)) {
   11370 			aprint_error_dev(sc->sc_dev,
   11371 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11372 				*bank);
   11373 			return -1;
   11374 		} else {
   11375 			*bank = *bank - 2;
   11376 			return 0;
   11377 		}
   11378 	case WM_T_ICH8:
   11379 	case WM_T_ICH9:
   11380 		eecd = CSR_READ(sc, WMREG_EECD);
   11381 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11382 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11383 			return 0;
   11384 		}
   11385 		/* FALLTHROUGH */
   11386 	default:
   11387 		/* Default to 0 */
   11388 		*bank = 0;
   11389 
   11390 		/* Check bank 0 */
   11391 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11392 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11393 			*bank = 0;
   11394 			return 0;
   11395 		}
   11396 
   11397 		/* Check bank 1 */
   11398 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11399 		    &sig_byte);
   11400 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11401 			*bank = 1;
   11402 			return 0;
   11403 		}
   11404 	}
   11405 
   11406 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11407 		device_xname(sc->sc_dev)));
   11408 	return -1;
   11409 }
   11410 
   11411 /******************************************************************************
   11412  * This function does initial flash setup so that a new read/write/erase cycle
   11413  * can be started.
   11414  *
   11415  * sc - The pointer to the hw structure
   11416  ****************************************************************************/
   11417 static int32_t
   11418 wm_ich8_cycle_init(struct wm_softc *sc)
   11419 {
   11420 	uint16_t hsfsts;
   11421 	int32_t error = 1;
   11422 	int32_t i     = 0;
   11423 
   11424 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11425 
   11426 	/* May be check the Flash Des Valid bit in Hw status */
   11427 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11428 		return error;
   11429 	}
   11430 
   11431 	/* Clear FCERR in Hw status by writing 1 */
   11432 	/* Clear DAEL in Hw status by writing a 1 */
   11433 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11434 
   11435 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11436 
   11437 	/*
   11438 	 * Either we should have a hardware SPI cycle in progress bit to check
   11439 	 * against, in order to start a new cycle or FDONE bit should be
   11440 	 * changed in the hardware so that it is 1 after harware reset, which
   11441 	 * can then be used as an indication whether a cycle is in progress or
   11442 	 * has been completed .. we should also have some software semaphore
   11443 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11444 	 * threads access to those bits can be sequentiallized or a way so that
   11445 	 * 2 threads dont start the cycle at the same time
   11446 	 */
   11447 
   11448 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11449 		/*
   11450 		 * There is no cycle running at present, so we can start a
   11451 		 * cycle
   11452 		 */
   11453 
   11454 		/* Begin by setting Flash Cycle Done. */
   11455 		hsfsts |= HSFSTS_DONE;
   11456 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11457 		error = 0;
   11458 	} else {
   11459 		/*
   11460 		 * otherwise poll for sometime so the current cycle has a
   11461 		 * chance to end before giving up.
   11462 		 */
   11463 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11464 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11465 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11466 				error = 0;
   11467 				break;
   11468 			}
   11469 			delay(1);
   11470 		}
   11471 		if (error == 0) {
   11472 			/*
   11473 			 * Successful in waiting for previous cycle to timeout,
   11474 			 * now set the Flash Cycle Done.
   11475 			 */
   11476 			hsfsts |= HSFSTS_DONE;
   11477 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11478 		}
   11479 	}
   11480 	return error;
   11481 }
   11482 
   11483 /******************************************************************************
   11484  * This function starts a flash cycle and waits for its completion
   11485  *
   11486  * sc - The pointer to the hw structure
   11487  ****************************************************************************/
   11488 static int32_t
   11489 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11490 {
   11491 	uint16_t hsflctl;
   11492 	uint16_t hsfsts;
   11493 	int32_t error = 1;
   11494 	uint32_t i = 0;
   11495 
   11496 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11497 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11498 	hsflctl |= HSFCTL_GO;
   11499 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11500 
   11501 	/* Wait till FDONE bit is set to 1 */
   11502 	do {
   11503 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11504 		if (hsfsts & HSFSTS_DONE)
   11505 			break;
   11506 		delay(1);
   11507 		i++;
   11508 	} while (i < timeout);
   11509 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11510 		error = 0;
   11511 
   11512 	return error;
   11513 }
   11514 
   11515 /******************************************************************************
   11516  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11517  *
   11518  * sc - The pointer to the hw structure
   11519  * index - The index of the byte or word to read.
   11520  * size - Size of data to read, 1=byte 2=word, 4=dword
   11521  * data - Pointer to the word to store the value read.
   11522  *****************************************************************************/
   11523 static int32_t
   11524 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11525     uint32_t size, uint32_t *data)
   11526 {
   11527 	uint16_t hsfsts;
   11528 	uint16_t hsflctl;
   11529 	uint32_t flash_linear_address;
   11530 	uint32_t flash_data = 0;
   11531 	int32_t error = 1;
   11532 	int32_t count = 0;
   11533 
   11534 	if (size < 1  || size > 4 || data == 0x0 ||
   11535 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11536 		return error;
   11537 
   11538 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11539 	    sc->sc_ich8_flash_base;
   11540 
   11541 	do {
   11542 		delay(1);
   11543 		/* Steps */
   11544 		error = wm_ich8_cycle_init(sc);
   11545 		if (error)
   11546 			break;
   11547 
   11548 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11549 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11550 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11551 		    & HSFCTL_BCOUNT_MASK;
   11552 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11553 		if (sc->sc_type == WM_T_PCH_SPT) {
   11554 			/*
   11555 			 * In SPT, This register is in Lan memory space, not
   11556 			 * flash. Therefore, only 32 bit access is supported.
   11557 			 */
   11558 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11559 			    (uint32_t)hsflctl);
   11560 		} else
   11561 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11562 
   11563 		/*
   11564 		 * Write the last 24 bits of index into Flash Linear address
   11565 		 * field in Flash Address
   11566 		 */
   11567 		/* TODO: TBD maybe check the index against the size of flash */
   11568 
   11569 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11570 
   11571 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11572 
   11573 		/*
   11574 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11575 		 * the whole sequence a few more times, else read in (shift in)
   11576 		 * the Flash Data0, the order is least significant byte first
   11577 		 * msb to lsb
   11578 		 */
   11579 		if (error == 0) {
   11580 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11581 			if (size == 1)
   11582 				*data = (uint8_t)(flash_data & 0x000000FF);
   11583 			else if (size == 2)
   11584 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11585 			else if (size == 4)
   11586 				*data = (uint32_t)flash_data;
   11587 			break;
   11588 		} else {
   11589 			/*
   11590 			 * If we've gotten here, then things are probably
   11591 			 * completely hosed, but if the error condition is
   11592 			 * detected, it won't hurt to give it another try...
   11593 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11594 			 */
   11595 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11596 			if (hsfsts & HSFSTS_ERR) {
   11597 				/* Repeat for some time before giving up. */
   11598 				continue;
   11599 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11600 				break;
   11601 		}
   11602 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11603 
   11604 	return error;
   11605 }
   11606 
   11607 /******************************************************************************
   11608  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11609  *
   11610  * sc - pointer to wm_hw structure
   11611  * index - The index of the byte to read.
   11612  * data - Pointer to a byte to store the value read.
   11613  *****************************************************************************/
   11614 static int32_t
   11615 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11616 {
   11617 	int32_t status;
   11618 	uint32_t word = 0;
   11619 
   11620 	status = wm_read_ich8_data(sc, index, 1, &word);
   11621 	if (status == 0)
   11622 		*data = (uint8_t)word;
   11623 	else
   11624 		*data = 0;
   11625 
   11626 	return status;
   11627 }
   11628 
   11629 /******************************************************************************
   11630  * Reads a word from the NVM using the ICH8 flash access registers.
   11631  *
   11632  * sc - pointer to wm_hw structure
   11633  * index - The starting byte index of the word to read.
   11634  * data - Pointer to a word to store the value read.
   11635  *****************************************************************************/
   11636 static int32_t
   11637 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11638 {
   11639 	int32_t status;
   11640 	uint32_t word = 0;
   11641 
   11642 	status = wm_read_ich8_data(sc, index, 2, &word);
   11643 	if (status == 0)
   11644 		*data = (uint16_t)word;
   11645 	else
   11646 		*data = 0;
   11647 
   11648 	return status;
   11649 }
   11650 
   11651 /******************************************************************************
   11652  * Reads a dword from the NVM using the ICH8 flash access registers.
   11653  *
   11654  * sc - pointer to wm_hw structure
   11655  * index - The starting byte index of the word to read.
   11656  * data - Pointer to a word to store the value read.
   11657  *****************************************************************************/
   11658 static int32_t
   11659 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11660 {
   11661 	int32_t status;
   11662 
   11663 	status = wm_read_ich8_data(sc, index, 4, data);
   11664 	return status;
   11665 }
   11666 
   11667 /******************************************************************************
   11668  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11669  * register.
   11670  *
   11671  * sc - Struct containing variables accessed by shared code
   11672  * offset - offset of word in the EEPROM to read
   11673  * data - word read from the EEPROM
   11674  * words - number of words to read
   11675  *****************************************************************************/
   11676 static int
   11677 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11678 {
   11679 	int32_t  error = 0;
   11680 	uint32_t flash_bank = 0;
   11681 	uint32_t act_offset = 0;
   11682 	uint32_t bank_offset = 0;
   11683 	uint16_t word = 0;
   11684 	uint16_t i = 0;
   11685 
   11686 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11687 		device_xname(sc->sc_dev), __func__));
   11688 
   11689 	/*
   11690 	 * We need to know which is the valid flash bank.  In the event
   11691 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11692 	 * managing flash_bank.  So it cannot be trusted and needs
   11693 	 * to be updated with each read.
   11694 	 */
   11695 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11696 	if (error) {
   11697 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11698 			device_xname(sc->sc_dev)));
   11699 		flash_bank = 0;
   11700 	}
   11701 
   11702 	/*
   11703 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11704 	 * size
   11705 	 */
   11706 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11707 
   11708 	error = wm_get_swfwhw_semaphore(sc);
   11709 	if (error) {
   11710 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11711 		    __func__);
   11712 		return error;
   11713 	}
   11714 
   11715 	for (i = 0; i < words; i++) {
   11716 		/* The NVM part needs a byte offset, hence * 2 */
   11717 		act_offset = bank_offset + ((offset + i) * 2);
   11718 		error = wm_read_ich8_word(sc, act_offset, &word);
   11719 		if (error) {
   11720 			aprint_error_dev(sc->sc_dev,
   11721 			    "%s: failed to read NVM\n", __func__);
   11722 			break;
   11723 		}
   11724 		data[i] = word;
   11725 	}
   11726 
   11727 	wm_put_swfwhw_semaphore(sc);
   11728 	return error;
   11729 }
   11730 
   11731 /******************************************************************************
   11732  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11733  * register.
   11734  *
   11735  * sc - Struct containing variables accessed by shared code
   11736  * offset - offset of word in the EEPROM to read
   11737  * data - word read from the EEPROM
   11738  * words - number of words to read
   11739  *****************************************************************************/
   11740 static int
   11741 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11742 {
   11743 	int32_t  error = 0;
   11744 	uint32_t flash_bank = 0;
   11745 	uint32_t act_offset = 0;
   11746 	uint32_t bank_offset = 0;
   11747 	uint32_t dword = 0;
   11748 	uint16_t i = 0;
   11749 
   11750 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11751 		device_xname(sc->sc_dev), __func__));
   11752 
   11753 	/*
   11754 	 * We need to know which is the valid flash bank.  In the event
   11755 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11756 	 * managing flash_bank.  So it cannot be trusted and needs
   11757 	 * to be updated with each read.
   11758 	 */
   11759 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11760 	if (error) {
   11761 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11762 			device_xname(sc->sc_dev)));
   11763 		flash_bank = 0;
   11764 	}
   11765 
   11766 	/*
   11767 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11768 	 * size
   11769 	 */
   11770 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11771 
   11772 	error = wm_get_swfwhw_semaphore(sc);
   11773 	if (error) {
   11774 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11775 		    __func__);
   11776 		return error;
   11777 	}
   11778 
   11779 	for (i = 0; i < words; i++) {
   11780 		/* The NVM part needs a byte offset, hence * 2 */
   11781 		act_offset = bank_offset + ((offset + i) * 2);
   11782 		/* but we must read dword aligned, so mask ... */
   11783 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11784 		if (error) {
   11785 			aprint_error_dev(sc->sc_dev,
   11786 			    "%s: failed to read NVM\n", __func__);
   11787 			break;
   11788 		}
   11789 		/* ... and pick out low or high word */
   11790 		if ((act_offset & 0x2) == 0)
   11791 			data[i] = (uint16_t)(dword & 0xFFFF);
   11792 		else
   11793 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11794 	}
   11795 
   11796 	wm_put_swfwhw_semaphore(sc);
   11797 	return error;
   11798 }
   11799 
   11800 /* iNVM */
   11801 
   11802 static int
   11803 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11804 {
   11805 	int32_t  rv = 0;
   11806 	uint32_t invm_dword;
   11807 	uint16_t i;
   11808 	uint8_t record_type, word_address;
   11809 
   11810 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11811 		device_xname(sc->sc_dev), __func__));
   11812 
   11813 	for (i = 0; i < INVM_SIZE; i++) {
   11814 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11815 		/* Get record type */
   11816 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11817 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11818 			break;
   11819 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11820 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11821 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11822 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11823 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11824 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11825 			if (word_address == address) {
   11826 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11827 				rv = 0;
   11828 				break;
   11829 			}
   11830 		}
   11831 	}
   11832 
   11833 	return rv;
   11834 }
   11835 
   11836 static int
   11837 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11838 {
   11839 	int rv = 0;
   11840 	int i;
   11841 
   11842 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11843 		device_xname(sc->sc_dev), __func__));
   11844 
   11845 	for (i = 0; i < words; i++) {
   11846 		switch (offset + i) {
   11847 		case NVM_OFF_MACADDR:
   11848 		case NVM_OFF_MACADDR1:
   11849 		case NVM_OFF_MACADDR2:
   11850 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11851 			if (rv != 0) {
   11852 				data[i] = 0xffff;
   11853 				rv = -1;
   11854 			}
   11855 			break;
   11856 		case NVM_OFF_CFG2:
   11857 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11858 			if (rv != 0) {
   11859 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11860 				rv = 0;
   11861 			}
   11862 			break;
   11863 		case NVM_OFF_CFG4:
   11864 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11865 			if (rv != 0) {
   11866 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11867 				rv = 0;
   11868 			}
   11869 			break;
   11870 		case NVM_OFF_LED_1_CFG:
   11871 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11872 			if (rv != 0) {
   11873 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11874 				rv = 0;
   11875 			}
   11876 			break;
   11877 		case NVM_OFF_LED_0_2_CFG:
   11878 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11879 			if (rv != 0) {
   11880 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11881 				rv = 0;
   11882 			}
   11883 			break;
   11884 		case NVM_OFF_ID_LED_SETTINGS:
   11885 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11886 			if (rv != 0) {
   11887 				*data = ID_LED_RESERVED_FFFF;
   11888 				rv = 0;
   11889 			}
   11890 			break;
   11891 		default:
   11892 			DPRINTF(WM_DEBUG_NVM,
   11893 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11894 			*data = NVM_RESERVED_WORD;
   11895 			break;
   11896 		}
   11897 	}
   11898 
   11899 	return rv;
   11900 }
   11901 
   11902 /* Lock, detecting NVM type, validate checksum, version and read */
   11903 
   11904 /*
   11905  * wm_nvm_acquire:
   11906  *
   11907  *	Perform the EEPROM handshake required on some chips.
   11908  */
   11909 static int
   11910 wm_nvm_acquire(struct wm_softc *sc)
   11911 {
   11912 	uint32_t reg;
   11913 	int x;
   11914 	int ret = 0;
   11915 
   11916 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11917 		device_xname(sc->sc_dev), __func__));
   11918 
   11919 	if (sc->sc_type >= WM_T_ICH8) {
   11920 		ret = wm_get_nvm_ich8lan(sc);
   11921 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   11922 		ret = wm_get_swfwhw_semaphore(sc);
   11923 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   11924 		/* This will also do wm_get_swsm_semaphore() if needed */
   11925 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   11926 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11927 		ret = wm_get_swsm_semaphore(sc);
   11928 	}
   11929 
   11930 	if (ret) {
   11931 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11932 			__func__);
   11933 		return 1;
   11934 	}
   11935 
   11936 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11937 		reg = CSR_READ(sc, WMREG_EECD);
   11938 
   11939 		/* Request EEPROM access. */
   11940 		reg |= EECD_EE_REQ;
   11941 		CSR_WRITE(sc, WMREG_EECD, reg);
   11942 
   11943 		/* ..and wait for it to be granted. */
   11944 		for (x = 0; x < 1000; x++) {
   11945 			reg = CSR_READ(sc, WMREG_EECD);
   11946 			if (reg & EECD_EE_GNT)
   11947 				break;
   11948 			delay(5);
   11949 		}
   11950 		if ((reg & EECD_EE_GNT) == 0) {
   11951 			aprint_error_dev(sc->sc_dev,
   11952 			    "could not acquire EEPROM GNT\n");
   11953 			reg &= ~EECD_EE_REQ;
   11954 			CSR_WRITE(sc, WMREG_EECD, reg);
   11955 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11956 				wm_put_swfwhw_semaphore(sc);
   11957 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   11958 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11959 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11960 				wm_put_swsm_semaphore(sc);
   11961 			return 1;
   11962 		}
   11963 	}
   11964 
   11965 	return 0;
   11966 }
   11967 
   11968 /*
   11969  * wm_nvm_release:
   11970  *
   11971  *	Release the EEPROM mutex.
   11972  */
   11973 static void
   11974 wm_nvm_release(struct wm_softc *sc)
   11975 {
   11976 	uint32_t reg;
   11977 
   11978 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11979 		device_xname(sc->sc_dev), __func__));
   11980 
   11981 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11982 		reg = CSR_READ(sc, WMREG_EECD);
   11983 		reg &= ~EECD_EE_REQ;
   11984 		CSR_WRITE(sc, WMREG_EECD, reg);
   11985 	}
   11986 
   11987 	if (sc->sc_type >= WM_T_ICH8) {
   11988 		wm_put_nvm_ich8lan(sc);
   11989 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11990 		wm_put_swfwhw_semaphore(sc);
   11991 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   11992 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11993 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11994 		wm_put_swsm_semaphore(sc);
   11995 }
   11996 
   11997 static int
   11998 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   11999 {
   12000 	uint32_t eecd = 0;
   12001 
   12002 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12003 	    || sc->sc_type == WM_T_82583) {
   12004 		eecd = CSR_READ(sc, WMREG_EECD);
   12005 
   12006 		/* Isolate bits 15 & 16 */
   12007 		eecd = ((eecd >> 15) & 0x03);
   12008 
   12009 		/* If both bits are set, device is Flash type */
   12010 		if (eecd == 0x03)
   12011 			return 0;
   12012 	}
   12013 	return 1;
   12014 }
   12015 
   12016 static int
   12017 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12018 {
   12019 	uint32_t eec;
   12020 
   12021 	eec = CSR_READ(sc, WMREG_EEC);
   12022 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12023 		return 1;
   12024 
   12025 	return 0;
   12026 }
   12027 
   12028 /*
   12029  * wm_nvm_validate_checksum
   12030  *
   12031  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12032  */
   12033 static int
   12034 wm_nvm_validate_checksum(struct wm_softc *sc)
   12035 {
   12036 	uint16_t checksum;
   12037 	uint16_t eeprom_data;
   12038 #ifdef WM_DEBUG
   12039 	uint16_t csum_wordaddr, valid_checksum;
   12040 #endif
   12041 	int i;
   12042 
   12043 	checksum = 0;
   12044 
   12045 	/* Don't check for I211 */
   12046 	if (sc->sc_type == WM_T_I211)
   12047 		return 0;
   12048 
   12049 #ifdef WM_DEBUG
   12050 	if (sc->sc_type == WM_T_PCH_LPT) {
   12051 		csum_wordaddr = NVM_OFF_COMPAT;
   12052 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12053 	} else {
   12054 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12055 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12056 	}
   12057 
   12058 	/* Dump EEPROM image for debug */
   12059 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12060 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12061 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12062 		/* XXX PCH_SPT? */
   12063 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12064 		if ((eeprom_data & valid_checksum) == 0) {
   12065 			DPRINTF(WM_DEBUG_NVM,
   12066 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12067 				device_xname(sc->sc_dev), eeprom_data,
   12068 				    valid_checksum));
   12069 		}
   12070 	}
   12071 
   12072 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12073 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12074 		for (i = 0; i < NVM_SIZE; i++) {
   12075 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12076 				printf("XXXX ");
   12077 			else
   12078 				printf("%04hx ", eeprom_data);
   12079 			if (i % 8 == 7)
   12080 				printf("\n");
   12081 		}
   12082 	}
   12083 
   12084 #endif /* WM_DEBUG */
   12085 
   12086 	for (i = 0; i < NVM_SIZE; i++) {
   12087 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12088 			return 1;
   12089 		checksum += eeprom_data;
   12090 	}
   12091 
   12092 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12093 #ifdef WM_DEBUG
   12094 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12095 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12096 #endif
   12097 	}
   12098 
   12099 	return 0;
   12100 }
   12101 
   12102 static void
   12103 wm_nvm_version_invm(struct wm_softc *sc)
   12104 {
   12105 	uint32_t dword;
   12106 
   12107 	/*
   12108 	 * Linux's code to decode version is very strange, so we don't
   12109 	 * obey that algorithm and just use word 61 as the document.
   12110 	 * Perhaps it's not perfect though...
   12111 	 *
   12112 	 * Example:
   12113 	 *
   12114 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12115 	 */
   12116 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12117 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12118 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12119 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12120 }
   12121 
   12122 static void
   12123 wm_nvm_version(struct wm_softc *sc)
   12124 {
   12125 	uint16_t major, minor, build, patch;
   12126 	uint16_t uid0, uid1;
   12127 	uint16_t nvm_data;
   12128 	uint16_t off;
   12129 	bool check_version = false;
   12130 	bool check_optionrom = false;
   12131 	bool have_build = false;
   12132 
   12133 	/*
   12134 	 * Version format:
   12135 	 *
   12136 	 * XYYZ
   12137 	 * X0YZ
   12138 	 * X0YY
   12139 	 *
   12140 	 * Example:
   12141 	 *
   12142 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12143 	 *	82571	0x50a6	5.10.6?
   12144 	 *	82572	0x506a	5.6.10?
   12145 	 *	82572EI	0x5069	5.6.9?
   12146 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12147 	 *		0x2013	2.1.3?
   12148 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12149 	 */
   12150 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12151 	switch (sc->sc_type) {
   12152 	case WM_T_82571:
   12153 	case WM_T_82572:
   12154 	case WM_T_82574:
   12155 	case WM_T_82583:
   12156 		check_version = true;
   12157 		check_optionrom = true;
   12158 		have_build = true;
   12159 		break;
   12160 	case WM_T_82575:
   12161 	case WM_T_82576:
   12162 	case WM_T_82580:
   12163 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12164 			check_version = true;
   12165 		break;
   12166 	case WM_T_I211:
   12167 		wm_nvm_version_invm(sc);
   12168 		goto printver;
   12169 	case WM_T_I210:
   12170 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12171 			wm_nvm_version_invm(sc);
   12172 			goto printver;
   12173 		}
   12174 		/* FALLTHROUGH */
   12175 	case WM_T_I350:
   12176 	case WM_T_I354:
   12177 		check_version = true;
   12178 		check_optionrom = true;
   12179 		break;
   12180 	default:
   12181 		return;
   12182 	}
   12183 	if (check_version) {
   12184 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12185 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12186 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12187 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12188 			build = nvm_data & NVM_BUILD_MASK;
   12189 			have_build = true;
   12190 		} else
   12191 			minor = nvm_data & 0x00ff;
   12192 
   12193 		/* Decimal */
   12194 		minor = (minor / 16) * 10 + (minor % 16);
   12195 		sc->sc_nvm_ver_major = major;
   12196 		sc->sc_nvm_ver_minor = minor;
   12197 
   12198 printver:
   12199 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12200 		    sc->sc_nvm_ver_minor);
   12201 		if (have_build) {
   12202 			sc->sc_nvm_ver_build = build;
   12203 			aprint_verbose(".%d", build);
   12204 		}
   12205 	}
   12206 	if (check_optionrom) {
   12207 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12208 		/* Option ROM Version */
   12209 		if ((off != 0x0000) && (off != 0xffff)) {
   12210 			off += NVM_COMBO_VER_OFF;
   12211 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12212 			wm_nvm_read(sc, off, 1, &uid0);
   12213 			if ((uid0 != 0) && (uid0 != 0xffff)
   12214 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12215 				/* 16bits */
   12216 				major = uid0 >> 8;
   12217 				build = (uid0 << 8) | (uid1 >> 8);
   12218 				patch = uid1 & 0x00ff;
   12219 				aprint_verbose(", option ROM Version %d.%d.%d",
   12220 				    major, build, patch);
   12221 			}
   12222 		}
   12223 	}
   12224 
   12225 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12226 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12227 }
   12228 
   12229 /*
   12230  * wm_nvm_read:
   12231  *
   12232  *	Read data from the serial EEPROM.
   12233  */
   12234 static int
   12235 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12236 {
   12237 	int rv;
   12238 
   12239 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12240 		device_xname(sc->sc_dev), __func__));
   12241 
   12242 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12243 		return 1;
   12244 
   12245 	if (wm_nvm_acquire(sc))
   12246 		return 1;
   12247 
   12248 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12249 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12250 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   12251 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   12252 	else if (sc->sc_type == WM_T_PCH_SPT)
   12253 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   12254 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   12255 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   12256 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   12257 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   12258 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   12259 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   12260 	else
   12261 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   12262 
   12263 	wm_nvm_release(sc);
   12264 	return rv;
   12265 }
   12266 
   12267 /*
   12268  * Hardware semaphores.
   12269  * Very complexed...
   12270  */
   12271 
   12272 static int
   12273 wm_get_null(struct wm_softc *sc)
   12274 {
   12275 
   12276 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12277 		device_xname(sc->sc_dev), __func__));
   12278 	return 0;
   12279 }
   12280 
   12281 static void
   12282 wm_put_null(struct wm_softc *sc)
   12283 {
   12284 
   12285 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12286 		device_xname(sc->sc_dev), __func__));
   12287 	return;
   12288 }
   12289 
   12290 /*
   12291  * Get hardware semaphore.
   12292  * Same as e1000_get_hw_semaphore_generic()
   12293  */
   12294 static int
   12295 wm_get_swsm_semaphore(struct wm_softc *sc)
   12296 {
   12297 	int32_t timeout;
   12298 	uint32_t swsm;
   12299 
   12300 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12301 		device_xname(sc->sc_dev), __func__));
   12302 	KASSERT(sc->sc_nvm_wordsize > 0);
   12303 
   12304 	/* Get the SW semaphore. */
   12305 	timeout = sc->sc_nvm_wordsize + 1;
   12306 	while (timeout) {
   12307 		swsm = CSR_READ(sc, WMREG_SWSM);
   12308 
   12309 		if ((swsm & SWSM_SMBI) == 0)
   12310 			break;
   12311 
   12312 		delay(50);
   12313 		timeout--;
   12314 	}
   12315 
   12316 	if (timeout == 0) {
   12317 		aprint_error_dev(sc->sc_dev,
   12318 		    "could not acquire SWSM SMBI\n");
   12319 		return 1;
   12320 	}
   12321 
   12322 	/* Get the FW semaphore. */
   12323 	timeout = sc->sc_nvm_wordsize + 1;
   12324 	while (timeout) {
   12325 		swsm = CSR_READ(sc, WMREG_SWSM);
   12326 		swsm |= SWSM_SWESMBI;
   12327 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12328 		/* If we managed to set the bit we got the semaphore. */
   12329 		swsm = CSR_READ(sc, WMREG_SWSM);
   12330 		if (swsm & SWSM_SWESMBI)
   12331 			break;
   12332 
   12333 		delay(50);
   12334 		timeout--;
   12335 	}
   12336 
   12337 	if (timeout == 0) {
   12338 		aprint_error_dev(sc->sc_dev,
   12339 		    "could not acquire SWSM SWESMBI\n");
   12340 		/* Release semaphores */
   12341 		wm_put_swsm_semaphore(sc);
   12342 		return 1;
   12343 	}
   12344 	return 0;
   12345 }
   12346 
   12347 /*
   12348  * Put hardware semaphore.
   12349  * Same as e1000_put_hw_semaphore_generic()
   12350  */
   12351 static void
   12352 wm_put_swsm_semaphore(struct wm_softc *sc)
   12353 {
   12354 	uint32_t swsm;
   12355 
   12356 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12357 		device_xname(sc->sc_dev), __func__));
   12358 
   12359 	swsm = CSR_READ(sc, WMREG_SWSM);
   12360 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12361 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12362 }
   12363 
   12364 /*
   12365  * Get SW/FW semaphore.
   12366  * Same as e1000_acquire_swfw_sync_82575().
   12367  */
   12368 static int
   12369 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12370 {
   12371 	uint32_t swfw_sync;
   12372 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12373 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12374 	int timeout = 200;
   12375 
   12376 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12377 		device_xname(sc->sc_dev), __func__));
   12378 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12379 
   12380 	for (timeout = 0; timeout < 200; timeout++) {
   12381 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12382 			if (wm_get_swsm_semaphore(sc)) {
   12383 				aprint_error_dev(sc->sc_dev,
   12384 				    "%s: failed to get semaphore\n",
   12385 				    __func__);
   12386 				return 1;
   12387 			}
   12388 		}
   12389 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12390 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12391 			swfw_sync |= swmask;
   12392 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12393 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   12394 				wm_put_swsm_semaphore(sc);
   12395 			return 0;
   12396 		}
   12397 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   12398 			wm_put_swsm_semaphore(sc);
   12399 		delay(5000);
   12400 	}
   12401 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12402 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12403 	return 1;
   12404 }
   12405 
   12406 static void
   12407 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12408 {
   12409 	uint32_t swfw_sync;
   12410 
   12411 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12412 		device_xname(sc->sc_dev), __func__));
   12413 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12414 
   12415 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12416 		while (wm_get_swsm_semaphore(sc) != 0)
   12417 			continue;
   12418 	}
   12419 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12420 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12421 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12422 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   12423 		wm_put_swsm_semaphore(sc);
   12424 }
   12425 
   12426 static int
   12427 wm_get_phy_82575(struct wm_softc *sc)
   12428 {
   12429 
   12430 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12431 		device_xname(sc->sc_dev), __func__));
   12432 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12433 }
   12434 
   12435 static void
   12436 wm_put_phy_82575(struct wm_softc *sc)
   12437 {
   12438 
   12439 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12440 		device_xname(sc->sc_dev), __func__));
   12441 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12442 }
   12443 
   12444 static int
   12445 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12446 {
   12447 	uint32_t ext_ctrl;
   12448 	int timeout = 200;
   12449 
   12450 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12451 		device_xname(sc->sc_dev), __func__));
   12452 
   12453 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12454 	for (timeout = 0; timeout < 200; timeout++) {
   12455 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12456 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12457 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12458 
   12459 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12460 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12461 			return 0;
   12462 		delay(5000);
   12463 	}
   12464 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12465 	    device_xname(sc->sc_dev), ext_ctrl);
   12466 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12467 	return 1;
   12468 }
   12469 
   12470 static void
   12471 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12472 {
   12473 	uint32_t ext_ctrl;
   12474 
   12475 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12476 		device_xname(sc->sc_dev), __func__));
   12477 
   12478 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12479 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12480 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12481 
   12482 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12483 }
   12484 
   12485 static int
   12486 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12487 {
   12488 	uint32_t ext_ctrl;
   12489 	int timeout;
   12490 
   12491 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12492 		device_xname(sc->sc_dev), __func__));
   12493 	mutex_enter(sc->sc_ich_phymtx);
   12494 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12495 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12496 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12497 			break;
   12498 		delay(1000);
   12499 	}
   12500 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12501 		printf("%s: SW has already locked the resource\n",
   12502 		    device_xname(sc->sc_dev));
   12503 		goto out;
   12504 	}
   12505 
   12506 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12507 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12508 	for (timeout = 0; timeout < 1000; timeout++) {
   12509 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12510 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12511 			break;
   12512 		delay(1000);
   12513 	}
   12514 	if (timeout >= 1000) {
   12515 		printf("%s: failed to acquire semaphore\n",
   12516 		    device_xname(sc->sc_dev));
   12517 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12518 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12519 		goto out;
   12520 	}
   12521 	return 0;
   12522 
   12523 out:
   12524 	mutex_exit(sc->sc_ich_phymtx);
   12525 	return 1;
   12526 }
   12527 
   12528 static void
   12529 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12530 {
   12531 	uint32_t ext_ctrl;
   12532 
   12533 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12534 		device_xname(sc->sc_dev), __func__));
   12535 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12536 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12537 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12538 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12539 	} else {
   12540 		printf("%s: Semaphore unexpectedly released\n",
   12541 		    device_xname(sc->sc_dev));
   12542 	}
   12543 
   12544 	mutex_exit(sc->sc_ich_phymtx);
   12545 }
   12546 
   12547 static int
   12548 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12549 {
   12550 
   12551 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12552 		device_xname(sc->sc_dev), __func__));
   12553 	mutex_enter(sc->sc_ich_nvmmtx);
   12554 
   12555 	return 0;
   12556 }
   12557 
   12558 static void
   12559 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12560 {
   12561 
   12562 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12563 		device_xname(sc->sc_dev), __func__));
   12564 	mutex_exit(sc->sc_ich_nvmmtx);
   12565 }
   12566 
   12567 static int
   12568 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12569 {
   12570 	int i = 0;
   12571 	uint32_t reg;
   12572 
   12573 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12574 		device_xname(sc->sc_dev), __func__));
   12575 
   12576 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12577 	do {
   12578 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12579 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12580 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12581 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12582 			break;
   12583 		delay(2*1000);
   12584 		i++;
   12585 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12586 
   12587 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12588 		wm_put_hw_semaphore_82573(sc);
   12589 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12590 		    device_xname(sc->sc_dev));
   12591 		return -1;
   12592 	}
   12593 
   12594 	return 0;
   12595 }
   12596 
   12597 static void
   12598 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12599 {
   12600 	uint32_t reg;
   12601 
   12602 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12603 		device_xname(sc->sc_dev), __func__));
   12604 
   12605 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12606 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12607 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12608 }
   12609 
   12610 /*
   12611  * Management mode and power management related subroutines.
   12612  * BMC, AMT, suspend/resume and EEE.
   12613  */
   12614 
   12615 #ifdef WM_WOL
   12616 static int
   12617 wm_check_mng_mode(struct wm_softc *sc)
   12618 {
   12619 	int rv;
   12620 
   12621 	switch (sc->sc_type) {
   12622 	case WM_T_ICH8:
   12623 	case WM_T_ICH9:
   12624 	case WM_T_ICH10:
   12625 	case WM_T_PCH:
   12626 	case WM_T_PCH2:
   12627 	case WM_T_PCH_LPT:
   12628 	case WM_T_PCH_SPT:
   12629 		rv = wm_check_mng_mode_ich8lan(sc);
   12630 		break;
   12631 	case WM_T_82574:
   12632 	case WM_T_82583:
   12633 		rv = wm_check_mng_mode_82574(sc);
   12634 		break;
   12635 	case WM_T_82571:
   12636 	case WM_T_82572:
   12637 	case WM_T_82573:
   12638 	case WM_T_80003:
   12639 		rv = wm_check_mng_mode_generic(sc);
   12640 		break;
   12641 	default:
   12642 		/* noting to do */
   12643 		rv = 0;
   12644 		break;
   12645 	}
   12646 
   12647 	return rv;
   12648 }
   12649 
   12650 static int
   12651 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12652 {
   12653 	uint32_t fwsm;
   12654 
   12655 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12656 
   12657 	if (((fwsm & FWSM_FW_VALID) != 0)
   12658 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12659 		return 1;
   12660 
   12661 	return 0;
   12662 }
   12663 
   12664 static int
   12665 wm_check_mng_mode_82574(struct wm_softc *sc)
   12666 {
   12667 	uint16_t data;
   12668 
   12669 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12670 
   12671 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12672 		return 1;
   12673 
   12674 	return 0;
   12675 }
   12676 
   12677 static int
   12678 wm_check_mng_mode_generic(struct wm_softc *sc)
   12679 {
   12680 	uint32_t fwsm;
   12681 
   12682 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12683 
   12684 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12685 		return 1;
   12686 
   12687 	return 0;
   12688 }
   12689 #endif /* WM_WOL */
   12690 
   12691 static int
   12692 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12693 {
   12694 	uint32_t manc, fwsm, factps;
   12695 
   12696 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12697 		return 0;
   12698 
   12699 	manc = CSR_READ(sc, WMREG_MANC);
   12700 
   12701 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   12702 		device_xname(sc->sc_dev), manc));
   12703 	if ((manc & MANC_RECV_TCO_EN) == 0)
   12704 		return 0;
   12705 
   12706 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   12707 		fwsm = CSR_READ(sc, WMREG_FWSM);
   12708 		factps = CSR_READ(sc, WMREG_FACTPS);
   12709 		if (((factps & FACTPS_MNGCG) == 0)
   12710 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12711 			return 1;
   12712 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   12713 		uint16_t data;
   12714 
   12715 		factps = CSR_READ(sc, WMREG_FACTPS);
   12716 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12717 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   12718 			device_xname(sc->sc_dev), factps, data));
   12719 		if (((factps & FACTPS_MNGCG) == 0)
   12720 		    && ((data & NVM_CFG2_MNGM_MASK)
   12721 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   12722 			return 1;
   12723 	} else if (((manc & MANC_SMBUS_EN) != 0)
   12724 	    && ((manc & MANC_ASF_EN) == 0))
   12725 		return 1;
   12726 
   12727 	return 0;
   12728 }
   12729 
   12730 static bool
   12731 wm_phy_resetisblocked(struct wm_softc *sc)
   12732 {
   12733 	bool blocked = false;
   12734 	uint32_t reg;
   12735 	int i = 0;
   12736 
   12737 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12738 		device_xname(sc->sc_dev), __func__));
   12739 
   12740 	switch (sc->sc_type) {
   12741 	case WM_T_ICH8:
   12742 	case WM_T_ICH9:
   12743 	case WM_T_ICH10:
   12744 	case WM_T_PCH:
   12745 	case WM_T_PCH2:
   12746 	case WM_T_PCH_LPT:
   12747 	case WM_T_PCH_SPT:
   12748 		do {
   12749 			reg = CSR_READ(sc, WMREG_FWSM);
   12750 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12751 				blocked = true;
   12752 				delay(10*1000);
   12753 				continue;
   12754 			}
   12755 			blocked = false;
   12756 		} while (blocked && (i++ < 30));
   12757 		return blocked;
   12758 		break;
   12759 	case WM_T_82571:
   12760 	case WM_T_82572:
   12761 	case WM_T_82573:
   12762 	case WM_T_82574:
   12763 	case WM_T_82583:
   12764 	case WM_T_80003:
   12765 		reg = CSR_READ(sc, WMREG_MANC);
   12766 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12767 			return true;
   12768 		else
   12769 			return false;
   12770 		break;
   12771 	default:
   12772 		/* no problem */
   12773 		break;
   12774 	}
   12775 
   12776 	return false;
   12777 }
   12778 
   12779 static void
   12780 wm_get_hw_control(struct wm_softc *sc)
   12781 {
   12782 	uint32_t reg;
   12783 
   12784 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12785 		device_xname(sc->sc_dev), __func__));
   12786 
   12787 	if (sc->sc_type == WM_T_82573) {
   12788 		reg = CSR_READ(sc, WMREG_SWSM);
   12789 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12790 	} else if (sc->sc_type >= WM_T_82571) {
   12791 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12792 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12793 	}
   12794 }
   12795 
   12796 static void
   12797 wm_release_hw_control(struct wm_softc *sc)
   12798 {
   12799 	uint32_t reg;
   12800 
   12801 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12802 		device_xname(sc->sc_dev), __func__));
   12803 
   12804 	if (sc->sc_type == WM_T_82573) {
   12805 		reg = CSR_READ(sc, WMREG_SWSM);
   12806 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12807 	} else if (sc->sc_type >= WM_T_82571) {
   12808 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12809 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12810 	}
   12811 }
   12812 
   12813 static void
   12814 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12815 {
   12816 	uint32_t reg;
   12817 
   12818 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12819 		device_xname(sc->sc_dev), __func__));
   12820 
   12821 	if (sc->sc_type < WM_T_PCH2)
   12822 		return;
   12823 
   12824 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12825 
   12826 	if (gate)
   12827 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12828 	else
   12829 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12830 
   12831 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12832 }
   12833 
   12834 static void
   12835 wm_smbustopci(struct wm_softc *sc)
   12836 {
   12837 	uint32_t fwsm, reg;
   12838 	int rv = 0;
   12839 
   12840 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12841 		device_xname(sc->sc_dev), __func__));
   12842 
   12843 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12844 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12845 
   12846 	/* Disable ULP */
   12847 	wm_ulp_disable(sc);
   12848 
   12849 	/* Acquire PHY semaphore */
   12850 	sc->phy.acquire(sc);
   12851 
   12852 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12853 	switch (sc->sc_type) {
   12854 	case WM_T_PCH_LPT:
   12855 	case WM_T_PCH_SPT:
   12856 		if (wm_phy_is_accessible_pchlan(sc))
   12857 			break;
   12858 
   12859 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12860 		reg |= CTRL_EXT_FORCE_SMBUS;
   12861 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12862 #if 0
   12863 		/* XXX Isn't this required??? */
   12864 		CSR_WRITE_FLUSH(sc);
   12865 #endif
   12866 		delay(50 * 1000);
   12867 		/* FALLTHROUGH */
   12868 	case WM_T_PCH2:
   12869 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12870 			break;
   12871 		/* FALLTHROUGH */
   12872 	case WM_T_PCH:
   12873 		if (sc->sc_type == WM_T_PCH)
   12874 			if ((fwsm & FWSM_FW_VALID) != 0)
   12875 				break;
   12876 
   12877 		if (wm_phy_resetisblocked(sc) == true) {
   12878 			printf("XXX reset is blocked(3)\n");
   12879 			break;
   12880 		}
   12881 
   12882 		wm_toggle_lanphypc_pch_lpt(sc);
   12883 
   12884 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12885 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12886 				break;
   12887 
   12888 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12889 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12890 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12891 
   12892 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12893 				break;
   12894 			rv = -1;
   12895 		}
   12896 		break;
   12897 	default:
   12898 		break;
   12899 	}
   12900 
   12901 	/* Release semaphore */
   12902 	sc->phy.release(sc);
   12903 
   12904 	if (rv == 0) {
   12905 		if (wm_phy_resetisblocked(sc)) {
   12906 			printf("XXX reset is blocked(4)\n");
   12907 			goto out;
   12908 		}
   12909 		wm_reset_phy(sc);
   12910 		if (wm_phy_resetisblocked(sc))
   12911 			printf("XXX reset is blocked(4)\n");
   12912 	}
   12913 
   12914 out:
   12915 	/*
   12916 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   12917 	 */
   12918 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   12919 		delay(10*1000);
   12920 		wm_gate_hw_phy_config_ich8lan(sc, false);
   12921 	}
   12922 }
   12923 
   12924 static void
   12925 wm_init_manageability(struct wm_softc *sc)
   12926 {
   12927 
   12928 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12929 		device_xname(sc->sc_dev), __func__));
   12930 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12931 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   12932 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12933 
   12934 		/* Disable hardware interception of ARP */
   12935 		manc &= ~MANC_ARP_EN;
   12936 
   12937 		/* Enable receiving management packets to the host */
   12938 		if (sc->sc_type >= WM_T_82571) {
   12939 			manc |= MANC_EN_MNG2HOST;
   12940 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   12941 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   12942 		}
   12943 
   12944 		CSR_WRITE(sc, WMREG_MANC, manc);
   12945 	}
   12946 }
   12947 
   12948 static void
   12949 wm_release_manageability(struct wm_softc *sc)
   12950 {
   12951 
   12952 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12953 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12954 
   12955 		manc |= MANC_ARP_EN;
   12956 		if (sc->sc_type >= WM_T_82571)
   12957 			manc &= ~MANC_EN_MNG2HOST;
   12958 
   12959 		CSR_WRITE(sc, WMREG_MANC, manc);
   12960 	}
   12961 }
   12962 
   12963 static void
   12964 wm_get_wakeup(struct wm_softc *sc)
   12965 {
   12966 
   12967 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   12968 	switch (sc->sc_type) {
   12969 	case WM_T_82573:
   12970 	case WM_T_82583:
   12971 		sc->sc_flags |= WM_F_HAS_AMT;
   12972 		/* FALLTHROUGH */
   12973 	case WM_T_80003:
   12974 	case WM_T_82575:
   12975 	case WM_T_82576:
   12976 	case WM_T_82580:
   12977 	case WM_T_I350:
   12978 	case WM_T_I354:
   12979 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   12980 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   12981 		/* FALLTHROUGH */
   12982 	case WM_T_82541:
   12983 	case WM_T_82541_2:
   12984 	case WM_T_82547:
   12985 	case WM_T_82547_2:
   12986 	case WM_T_82571:
   12987 	case WM_T_82572:
   12988 	case WM_T_82574:
   12989 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12990 		break;
   12991 	case WM_T_ICH8:
   12992 	case WM_T_ICH9:
   12993 	case WM_T_ICH10:
   12994 	case WM_T_PCH:
   12995 	case WM_T_PCH2:
   12996 	case WM_T_PCH_LPT:
   12997 	case WM_T_PCH_SPT:
   12998 		sc->sc_flags |= WM_F_HAS_AMT;
   12999 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13000 		break;
   13001 	default:
   13002 		break;
   13003 	}
   13004 
   13005 	/* 1: HAS_MANAGE */
   13006 	if (wm_enable_mng_pass_thru(sc) != 0)
   13007 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13008 
   13009 #ifdef WM_DEBUG
   13010 	printf("\n");
   13011 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   13012 		printf("HAS_AMT,");
   13013 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   13014 		printf("ARC_SUBSYS_VALID,");
   13015 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   13016 		printf("ASF_FIRMWARE_PRES,");
   13017 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   13018 		printf("HAS_MANAGE,");
   13019 	printf("\n");
   13020 #endif
   13021 	/*
   13022 	 * Note that the WOL flags is set after the resetting of the eeprom
   13023 	 * stuff
   13024 	 */
   13025 }
   13026 
   13027 /*
   13028  * Unconfigure Ultra Low Power mode.
   13029  * Only for I217 and newer (see below).
   13030  */
   13031 static void
   13032 wm_ulp_disable(struct wm_softc *sc)
   13033 {
   13034 	uint32_t reg;
   13035 	int i = 0;
   13036 
   13037 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13038 		device_xname(sc->sc_dev), __func__));
   13039 	/* Exclude old devices */
   13040 	if ((sc->sc_type < WM_T_PCH_LPT)
   13041 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13042 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13043 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13044 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13045 		return;
   13046 
   13047 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13048 		/* Request ME un-configure ULP mode in the PHY */
   13049 		reg = CSR_READ(sc, WMREG_H2ME);
   13050 		reg &= ~H2ME_ULP;
   13051 		reg |= H2ME_ENFORCE_SETTINGS;
   13052 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13053 
   13054 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13055 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13056 			if (i++ == 30) {
   13057 				printf("%s timed out\n", __func__);
   13058 				return;
   13059 			}
   13060 			delay(10 * 1000);
   13061 		}
   13062 		reg = CSR_READ(sc, WMREG_H2ME);
   13063 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13064 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13065 
   13066 		return;
   13067 	}
   13068 
   13069 	/* Acquire semaphore */
   13070 	sc->phy.acquire(sc);
   13071 
   13072 	/* Toggle LANPHYPC */
   13073 	wm_toggle_lanphypc_pch_lpt(sc);
   13074 
   13075 	/* Unforce SMBus mode in PHY */
   13076 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13077 	if (reg == 0x0000 || reg == 0xffff) {
   13078 		uint32_t reg2;
   13079 
   13080 		printf("%s: Force SMBus first.\n", __func__);
   13081 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13082 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13083 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13084 		delay(50 * 1000);
   13085 
   13086 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13087 	}
   13088 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13089 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13090 
   13091 	/* Unforce SMBus mode in MAC */
   13092 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13093 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13094 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13095 
   13096 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13097 	reg |= HV_PM_CTRL_K1_ENA;
   13098 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13099 
   13100 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13101 	reg &= ~(I218_ULP_CONFIG1_IND
   13102 	    | I218_ULP_CONFIG1_STICKY_ULP
   13103 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13104 	    | I218_ULP_CONFIG1_WOL_HOST
   13105 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13106 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13107 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13108 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13109 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13110 	reg |= I218_ULP_CONFIG1_START;
   13111 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13112 
   13113 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13114 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13115 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13116 
   13117 	/* Release semaphore */
   13118 	sc->phy.release(sc);
   13119 	wm_gmii_reset(sc);
   13120 	delay(50 * 1000);
   13121 }
   13122 
   13123 /* WOL in the newer chipset interfaces (pchlan) */
   13124 static void
   13125 wm_enable_phy_wakeup(struct wm_softc *sc)
   13126 {
   13127 #if 0
   13128 	uint16_t preg;
   13129 
   13130 	/* Copy MAC RARs to PHY RARs */
   13131 
   13132 	/* Copy MAC MTA to PHY MTA */
   13133 
   13134 	/* Configure PHY Rx Control register */
   13135 
   13136 	/* Enable PHY wakeup in MAC register */
   13137 
   13138 	/* Configure and enable PHY wakeup in PHY registers */
   13139 
   13140 	/* Activate PHY wakeup */
   13141 
   13142 	/* XXX */
   13143 #endif
   13144 }
   13145 
   13146 /* Power down workaround on D3 */
   13147 static void
   13148 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13149 {
   13150 	uint32_t reg;
   13151 	int i;
   13152 
   13153 	for (i = 0; i < 2; i++) {
   13154 		/* Disable link */
   13155 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13156 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13157 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13158 
   13159 		/*
   13160 		 * Call gig speed drop workaround on Gig disable before
   13161 		 * accessing any PHY registers
   13162 		 */
   13163 		if (sc->sc_type == WM_T_ICH8)
   13164 			wm_gig_downshift_workaround_ich8lan(sc);
   13165 
   13166 		/* Write VR power-down enable */
   13167 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13168 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13169 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13170 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13171 
   13172 		/* Read it back and test */
   13173 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13174 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13175 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13176 			break;
   13177 
   13178 		/* Issue PHY reset and repeat at most one more time */
   13179 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13180 	}
   13181 }
   13182 
   13183 static void
   13184 wm_enable_wakeup(struct wm_softc *sc)
   13185 {
   13186 	uint32_t reg, pmreg;
   13187 	pcireg_t pmode;
   13188 
   13189 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13190 		device_xname(sc->sc_dev), __func__));
   13191 
   13192 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13193 		&pmreg, NULL) == 0)
   13194 		return;
   13195 
   13196 	/* Advertise the wakeup capability */
   13197 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13198 	    | CTRL_SWDPIN(3));
   13199 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13200 
   13201 	/* ICH workaround */
   13202 	switch (sc->sc_type) {
   13203 	case WM_T_ICH8:
   13204 	case WM_T_ICH9:
   13205 	case WM_T_ICH10:
   13206 	case WM_T_PCH:
   13207 	case WM_T_PCH2:
   13208 	case WM_T_PCH_LPT:
   13209 	case WM_T_PCH_SPT:
   13210 		/* Disable gig during WOL */
   13211 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13212 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13213 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13214 		if (sc->sc_type == WM_T_PCH)
   13215 			wm_gmii_reset(sc);
   13216 
   13217 		/* Power down workaround */
   13218 		if (sc->sc_phytype == WMPHY_82577) {
   13219 			struct mii_softc *child;
   13220 
   13221 			/* Assume that the PHY is copper */
   13222 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13223 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13224 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13225 				    (768 << 5) | 25, 0x0444); /* magic num */
   13226 		}
   13227 		break;
   13228 	default:
   13229 		break;
   13230 	}
   13231 
   13232 	/* Keep the laser running on fiber adapters */
   13233 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13234 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13235 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13236 		reg |= CTRL_EXT_SWDPIN(3);
   13237 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13238 	}
   13239 
   13240 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13241 #if 0	/* for the multicast packet */
   13242 	reg |= WUFC_MC;
   13243 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13244 #endif
   13245 
   13246 	if (sc->sc_type >= WM_T_PCH)
   13247 		wm_enable_phy_wakeup(sc);
   13248 	else {
   13249 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13250 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13251 	}
   13252 
   13253 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13254 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13255 		|| (sc->sc_type == WM_T_PCH2))
   13256 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13257 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13258 
   13259 	/* Request PME */
   13260 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13261 #if 0
   13262 	/* Disable WOL */
   13263 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13264 #else
   13265 	/* For WOL */
   13266 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13267 #endif
   13268 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13269 }
   13270 
   13271 /* LPLU */
   13272 
   13273 static void
   13274 wm_lplu_d0_disable(struct wm_softc *sc)
   13275 {
   13276 	uint32_t reg;
   13277 
   13278 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13279 		device_xname(sc->sc_dev), __func__));
   13280 
   13281 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13282 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13283 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13284 }
   13285 
   13286 static void
   13287 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   13288 {
   13289 	uint32_t reg;
   13290 
   13291 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13292 		device_xname(sc->sc_dev), __func__));
   13293 
   13294 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13295 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13296 	reg |= HV_OEM_BITS_ANEGNOW;
   13297 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13298 }
   13299 
   13300 /* EEE */
   13301 
   13302 static void
   13303 wm_set_eee_i350(struct wm_softc *sc)
   13304 {
   13305 	uint32_t ipcnfg, eeer;
   13306 
   13307 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13308 	eeer = CSR_READ(sc, WMREG_EEER);
   13309 
   13310 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13311 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13312 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13313 		    | EEER_LPI_FC);
   13314 	} else {
   13315 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13316 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13317 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13318 		    | EEER_LPI_FC);
   13319 	}
   13320 
   13321 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13322 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13323 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13324 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13325 }
   13326 
   13327 /*
   13328  * Workarounds (mainly PHY related).
   13329  * Basically, PHY's workarounds are in the PHY drivers.
   13330  */
   13331 
   13332 /* Work-around for 82566 Kumeran PCS lock loss */
   13333 static void
   13334 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13335 {
   13336 #if 0
   13337 	int miistatus, active, i;
   13338 	int reg;
   13339 
   13340 	miistatus = sc->sc_mii.mii_media_status;
   13341 
   13342 	/* If the link is not up, do nothing */
   13343 	if ((miistatus & IFM_ACTIVE) == 0)
   13344 		return;
   13345 
   13346 	active = sc->sc_mii.mii_media_active;
   13347 
   13348 	/* Nothing to do if the link is other than 1Gbps */
   13349 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   13350 		return;
   13351 
   13352 	for (i = 0; i < 10; i++) {
   13353 		/* read twice */
   13354 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13355 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13356 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13357 			goto out;	/* GOOD! */
   13358 
   13359 		/* Reset the PHY */
   13360 		wm_gmii_reset(sc);
   13361 		delay(5*1000);
   13362 	}
   13363 
   13364 	/* Disable GigE link negotiation */
   13365 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13366 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13367 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13368 
   13369 	/*
   13370 	 * Call gig speed drop workaround on Gig disable before accessing
   13371 	 * any PHY registers.
   13372 	 */
   13373 	wm_gig_downshift_workaround_ich8lan(sc);
   13374 
   13375 out:
   13376 	return;
   13377 #endif
   13378 }
   13379 
   13380 /* WOL from S5 stops working */
   13381 static void
   13382 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13383 {
   13384 	uint16_t kmrn_reg;
   13385 
   13386 	/* Only for igp3 */
   13387 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13388 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13389 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13390 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13391 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13392 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13393 	}
   13394 }
   13395 
   13396 /*
   13397  * Workaround for pch's PHYs
   13398  * XXX should be moved to new PHY driver?
   13399  */
   13400 static void
   13401 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13402 {
   13403 
   13404 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13405 		device_xname(sc->sc_dev), __func__));
   13406 	KASSERT(sc->sc_type == WM_T_PCH);
   13407 
   13408 	if (sc->sc_phytype == WMPHY_82577)
   13409 		wm_set_mdio_slow_mode_hv(sc);
   13410 
   13411 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13412 
   13413 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13414 
   13415 	/* 82578 */
   13416 	if (sc->sc_phytype == WMPHY_82578) {
   13417 		struct mii_softc *child;
   13418 
   13419 		/*
   13420 		 * Return registers to default by doing a soft reset then
   13421 		 * writing 0x3140 to the control register
   13422 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13423 		 */
   13424 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13425 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13426 			PHY_RESET(child);
   13427 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13428 			    0x3140);
   13429 		}
   13430 	}
   13431 
   13432 	/* Select page 0 */
   13433 	sc->phy.acquire(sc);
   13434 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13435 	sc->phy.release(sc);
   13436 
   13437 	/*
   13438 	 * Configure the K1 Si workaround during phy reset assuming there is
   13439 	 * link so that it disables K1 if link is in 1Gbps.
   13440 	 */
   13441 	wm_k1_gig_workaround_hv(sc, 1);
   13442 }
   13443 
   13444 static void
   13445 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13446 {
   13447 
   13448 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13449 		device_xname(sc->sc_dev), __func__));
   13450 	KASSERT(sc->sc_type == WM_T_PCH2);
   13451 
   13452 	wm_set_mdio_slow_mode_hv(sc);
   13453 }
   13454 
   13455 static int
   13456 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13457 {
   13458 	int k1_enable = sc->sc_nvm_k1_enabled;
   13459 
   13460 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13461 		device_xname(sc->sc_dev), __func__));
   13462 
   13463 	if (sc->phy.acquire(sc) != 0)
   13464 		return -1;
   13465 
   13466 	if (link) {
   13467 		k1_enable = 0;
   13468 
   13469 		/* Link stall fix for link up */
   13470 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13471 	} else {
   13472 		/* Link stall fix for link down */
   13473 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13474 	}
   13475 
   13476 	wm_configure_k1_ich8lan(sc, k1_enable);
   13477 	sc->phy.release(sc);
   13478 
   13479 	return 0;
   13480 }
   13481 
   13482 static void
   13483 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13484 {
   13485 	uint32_t reg;
   13486 
   13487 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13488 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13489 	    reg | HV_KMRN_MDIO_SLOW);
   13490 }
   13491 
   13492 static void
   13493 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13494 {
   13495 	uint32_t ctrl, ctrl_ext, tmp;
   13496 	uint16_t kmrn_reg;
   13497 
   13498 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13499 
   13500 	if (k1_enable)
   13501 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13502 	else
   13503 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13504 
   13505 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13506 
   13507 	delay(20);
   13508 
   13509 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13510 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13511 
   13512 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13513 	tmp |= CTRL_FRCSPD;
   13514 
   13515 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13516 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13517 	CSR_WRITE_FLUSH(sc);
   13518 	delay(20);
   13519 
   13520 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13521 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13522 	CSR_WRITE_FLUSH(sc);
   13523 	delay(20);
   13524 }
   13525 
   13526 /* special case - for 82575 - need to do manual init ... */
   13527 static void
   13528 wm_reset_init_script_82575(struct wm_softc *sc)
   13529 {
   13530 	/*
   13531 	 * remark: this is untested code - we have no board without EEPROM
   13532 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13533 	 */
   13534 
   13535 	/* SerDes configuration via SERDESCTRL */
   13536 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13537 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13538 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13539 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13540 
   13541 	/* CCM configuration via CCMCTL register */
   13542 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13543 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13544 
   13545 	/* PCIe lanes configuration */
   13546 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13547 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13548 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13549 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13550 
   13551 	/* PCIe PLL Configuration */
   13552 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13553 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13554 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13555 }
   13556 
   13557 static void
   13558 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13559 {
   13560 	uint32_t reg;
   13561 	uint16_t nvmword;
   13562 	int rv;
   13563 
   13564 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13565 		return;
   13566 
   13567 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13568 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13569 	if (rv != 0) {
   13570 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13571 		    __func__);
   13572 		return;
   13573 	}
   13574 
   13575 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13576 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13577 		reg |= MDICNFG_DEST;
   13578 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13579 		reg |= MDICNFG_COM_MDIO;
   13580 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13581 }
   13582 
   13583 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13584 
   13585 static bool
   13586 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13587 {
   13588 	int i;
   13589 	uint32_t reg;
   13590 	uint16_t id1, id2;
   13591 
   13592 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13593 		device_xname(sc->sc_dev), __func__));
   13594 	id1 = id2 = 0xffff;
   13595 	for (i = 0; i < 2; i++) {
   13596 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13597 		if (MII_INVALIDID(id1))
   13598 			continue;
   13599 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13600 		if (MII_INVALIDID(id2))
   13601 			continue;
   13602 		break;
   13603 	}
   13604 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13605 		goto out;
   13606 	}
   13607 
   13608 	if (sc->sc_type < WM_T_PCH_LPT) {
   13609 		sc->phy.release(sc);
   13610 		wm_set_mdio_slow_mode_hv(sc);
   13611 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13612 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13613 		sc->phy.acquire(sc);
   13614 	}
   13615 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13616 		printf("XXX return with false\n");
   13617 		return false;
   13618 	}
   13619 out:
   13620 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13621 		/* Only unforce SMBus if ME is not active */
   13622 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13623 			/* Unforce SMBus mode in PHY */
   13624 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13625 			    CV_SMB_CTRL);
   13626 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13627 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13628 			    CV_SMB_CTRL, reg);
   13629 
   13630 			/* Unforce SMBus mode in MAC */
   13631 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13632 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13633 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13634 		}
   13635 	}
   13636 	return true;
   13637 }
   13638 
   13639 static void
   13640 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13641 {
   13642 	uint32_t reg;
   13643 	int i;
   13644 
   13645 	/* Set PHY Config Counter to 50msec */
   13646 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13647 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13648 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13649 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13650 
   13651 	/* Toggle LANPHYPC */
   13652 	reg = CSR_READ(sc, WMREG_CTRL);
   13653 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13654 	reg &= ~CTRL_LANPHYPC_VALUE;
   13655 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13656 	CSR_WRITE_FLUSH(sc);
   13657 	delay(1000);
   13658 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13659 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13660 	CSR_WRITE_FLUSH(sc);
   13661 
   13662 	if (sc->sc_type < WM_T_PCH_LPT)
   13663 		delay(50 * 1000);
   13664 	else {
   13665 		i = 20;
   13666 
   13667 		do {
   13668 			delay(5 * 1000);
   13669 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13670 		    && i--);
   13671 
   13672 		delay(30 * 1000);
   13673 	}
   13674 }
   13675 
   13676 static int
   13677 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13678 {
   13679 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13680 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13681 	uint32_t rxa;
   13682 	uint16_t scale = 0, lat_enc = 0;
   13683 	int64_t lat_ns, value;
   13684 
   13685 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13686 		device_xname(sc->sc_dev), __func__));
   13687 
   13688 	if (link) {
   13689 		pcireg_t preg;
   13690 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   13691 
   13692 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   13693 
   13694 		/*
   13695 		 * Determine the maximum latency tolerated by the device.
   13696 		 *
   13697 		 * Per the PCIe spec, the tolerated latencies are encoded as
   13698 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   13699 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   13700 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   13701 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   13702 		 */
   13703 		lat_ns = ((int64_t)rxa * 1024 -
   13704 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   13705 		if (lat_ns < 0)
   13706 			lat_ns = 0;
   13707 		else {
   13708 			uint32_t status;
   13709 			uint16_t speed;
   13710 
   13711 			status = CSR_READ(sc, WMREG_STATUS);
   13712 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   13713 			case STATUS_SPEED_10:
   13714 				speed = 10;
   13715 				break;
   13716 			case STATUS_SPEED_100:
   13717 				speed = 100;
   13718 				break;
   13719 			case STATUS_SPEED_1000:
   13720 				speed = 1000;
   13721 				break;
   13722 			default:
   13723 				printf("%s: Unknown speed (status = %08x)\n",
   13724 				    device_xname(sc->sc_dev), status);
   13725 				return -1;
   13726 			}
   13727 			lat_ns /= speed;
   13728 		}
   13729 		value = lat_ns;
   13730 
   13731 		while (value > LTRV_VALUE) {
   13732 			scale ++;
   13733 			value = howmany(value, __BIT(5));
   13734 		}
   13735 		if (scale > LTRV_SCALE_MAX) {
   13736 			printf("%s: Invalid LTR latency scale %d\n",
   13737 			    device_xname(sc->sc_dev), scale);
   13738 			return -1;
   13739 		}
   13740 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   13741 
   13742 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13743 		    WM_PCI_LTR_CAP_LPT);
   13744 		max_snoop = preg & 0xffff;
   13745 		max_nosnoop = preg >> 16;
   13746 
   13747 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   13748 
   13749 		if (lat_enc > max_ltr_enc) {
   13750 			lat_enc = max_ltr_enc;
   13751 		}
   13752 	}
   13753 	/* Snoop and No-Snoop latencies the same */
   13754 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   13755 	CSR_WRITE(sc, WMREG_LTRV, reg);
   13756 
   13757 	return 0;
   13758 }
   13759 
   13760 /*
   13761  * I210 Errata 25 and I211 Errata 10
   13762  * Slow System Clock.
   13763  */
   13764 static void
   13765 wm_pll_workaround_i210(struct wm_softc *sc)
   13766 {
   13767 	uint32_t mdicnfg, wuc;
   13768 	uint32_t reg;
   13769 	pcireg_t pcireg;
   13770 	uint32_t pmreg;
   13771 	uint16_t nvmword, tmp_nvmword;
   13772 	int phyval;
   13773 	bool wa_done = false;
   13774 	int i;
   13775 
   13776 	/* Save WUC and MDICNFG registers */
   13777 	wuc = CSR_READ(sc, WMREG_WUC);
   13778 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13779 
   13780 	reg = mdicnfg & ~MDICNFG_DEST;
   13781 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13782 
   13783 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13784 		nvmword = INVM_DEFAULT_AL;
   13785 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13786 
   13787 	/* Get Power Management cap offset */
   13788 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13789 		&pmreg, NULL) == 0)
   13790 		return;
   13791 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13792 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13793 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13794 
   13795 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13796 			break; /* OK */
   13797 		}
   13798 
   13799 		wa_done = true;
   13800 		/* Directly reset the internal PHY */
   13801 		reg = CSR_READ(sc, WMREG_CTRL);
   13802 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13803 
   13804 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13805 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13806 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13807 
   13808 		CSR_WRITE(sc, WMREG_WUC, 0);
   13809 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13810 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13811 
   13812 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13813 		    pmreg + PCI_PMCSR);
   13814 		pcireg |= PCI_PMCSR_STATE_D3;
   13815 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13816 		    pmreg + PCI_PMCSR, pcireg);
   13817 		delay(1000);
   13818 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13819 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13820 		    pmreg + PCI_PMCSR, pcireg);
   13821 
   13822 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13823 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13824 
   13825 		/* Restore WUC register */
   13826 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13827 	}
   13828 
   13829 	/* Restore MDICNFG setting */
   13830 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13831 	if (wa_done)
   13832 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13833 }
   13834