Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.444
      1 /*	$NetBSD: if_wm.c,v 1.444 2016/11/14 05:38:39 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Advanced Receive Descriptor
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.444 2016/11/14 05:38:39 msaitoh Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #endif
    165 
    166 /*
    167  * This device driver's max interrupt numbers.
    168  */
    169 #define WM_MAX_NQUEUEINTR	16
    170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    171 
    172 /*
    173  * Transmit descriptor list size.  Due to errata, we can only have
    174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    175  * on >= 82544.  We tell the upper layers that they can queue a lot
    176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    177  * of them at a time.
    178  *
    179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    180  * chains containing many small mbufs have been observed in zero-copy
    181  * situations with jumbo frames.
    182  */
    183 #define	WM_NTXSEGS		256
    184 #define	WM_IFQUEUELEN		256
    185 #define	WM_TXQUEUELEN_MAX	64
    186 #define	WM_TXQUEUELEN_MAX_82547	16
    187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    190 #define	WM_NTXDESC_82542	256
    191 #define	WM_NTXDESC_82544	4096
    192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    197 
    198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    199 
    200 #define	WM_TXINTERQSIZE		256
    201 
    202 /*
    203  * Receive descriptor list size.  We have one Rx buffer for normal
    204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    205  * packet.  We allocate 256 receive descriptors, each with a 2k
    206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    207  */
    208 #define	WM_NRXDESC		256
    209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    212 
    213 typedef union txdescs {
    214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    216 } txdescs_t;
    217 
    218 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    219 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    220 
    221 /*
    222  * Software state for transmit jobs.
    223  */
    224 struct wm_txsoft {
    225 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    226 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    227 	int txs_firstdesc;		/* first descriptor in packet */
    228 	int txs_lastdesc;		/* last descriptor in packet */
    229 	int txs_ndesc;			/* # of descriptors used */
    230 };
    231 
    232 /*
    233  * Software state for receive buffers.  Each descriptor gets a
    234  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    235  * more than one buffer, we chain them together.
    236  */
    237 struct wm_rxsoft {
    238 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    239 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    240 };
    241 
    242 #define WM_LINKUP_TIMEOUT	50
    243 
    244 static uint16_t swfwphysem[] = {
    245 	SWFW_PHY0_SM,
    246 	SWFW_PHY1_SM,
    247 	SWFW_PHY2_SM,
    248 	SWFW_PHY3_SM
    249 };
    250 
    251 static const uint32_t wm_82580_rxpbs_table[] = {
    252 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    253 };
    254 
    255 struct wm_softc;
    256 
    257 #ifdef WM_EVENT_COUNTERS
    258 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    259 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    260 	struct evcnt qname##_ev_##evname;
    261 
    262 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    263 	do{								\
    264 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    265 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    266 		    "%s%02d%s", #qname, (qnum), #evname);		\
    267 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    268 		    (evtype), NULL, (xname),				\
    269 		    (q)->qname##_##evname##_evcnt_name);		\
    270 	}while(0)
    271 
    272 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    273 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    274 
    275 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    276 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    277 #endif /* WM_EVENT_COUNTERS */
    278 
    279 struct wm_txqueue {
    280 	kmutex_t *txq_lock;		/* lock for tx operations */
    281 
    282 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    283 
    284 	/* Software state for the transmit descriptors. */
    285 	int txq_num;			/* must be a power of two */
    286 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    287 
    288 	/* TX control data structures. */
    289 	int txq_ndesc;			/* must be a power of two */
    290 	size_t txq_descsize;		/* a tx descriptor size */
    291 	txdescs_t *txq_descs_u;
    292         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    293 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    294 	int txq_desc_rseg;		/* real number of control segment */
    295 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    296 #define	txq_descs	txq_descs_u->sctxu_txdescs
    297 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    298 
    299 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    300 
    301 	int txq_free;			/* number of free Tx descriptors */
    302 	int txq_next;			/* next ready Tx descriptor */
    303 
    304 	int txq_sfree;			/* number of free Tx jobs */
    305 	int txq_snext;			/* next free Tx job */
    306 	int txq_sdirty;			/* dirty Tx jobs */
    307 
    308 	/* These 4 variables are used only on the 82547. */
    309 	int txq_fifo_size;		/* Tx FIFO size */
    310 	int txq_fifo_head;		/* current head of FIFO */
    311 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    312 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    313 
    314 	/*
    315 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    316 	 * CPUs. This queue intermediate them without block.
    317 	 */
    318 	pcq_t *txq_interq;
    319 
    320 	/*
    321 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    322 	 * to manage Tx H/W queue's busy flag.
    323 	 */
    324 	int txq_flags;			/* flags for H/W queue, see below */
    325 #define	WM_TXQ_NO_SPACE	0x1
    326 
    327 	bool txq_stopping;
    328 
    329 #ifdef WM_EVENT_COUNTERS
    330 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    331 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    332 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    333 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    334 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    335 						/* XXX not used? */
    336 
    337 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    338 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    339 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    340 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    341 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    342 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    343 
    344 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    345 
    346 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    347 
    348 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    349 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    350 #endif /* WM_EVENT_COUNTERS */
    351 };
    352 
    353 struct wm_rxqueue {
    354 	kmutex_t *rxq_lock;		/* lock for rx operations */
    355 
    356 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    357 
    358 	/* Software state for the receive descriptors. */
    359 	wiseman_rxdesc_t *rxq_descs;
    360 
    361 	/* RX control data structures. */
    362 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    363 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    364 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    365 	int rxq_desc_rseg;		/* real number of control segment */
    366 	size_t rxq_desc_size;		/* control data size */
    367 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    368 
    369 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    370 
    371 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    372 	int rxq_discard;
    373 	int rxq_len;
    374 	struct mbuf *rxq_head;
    375 	struct mbuf *rxq_tail;
    376 	struct mbuf **rxq_tailp;
    377 
    378 	bool rxq_stopping;
    379 
    380 #ifdef WM_EVENT_COUNTERS
    381 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    382 
    383 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    384 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    385 #endif
    386 };
    387 
    388 struct wm_queue {
    389 	int wmq_id;			/* index of transmit and receive queues */
    390 	int wmq_intr_idx;		/* index of MSI-X tables */
    391 
    392 	struct wm_txqueue wmq_txq;
    393 	struct wm_rxqueue wmq_rxq;
    394 };
    395 
    396 struct wm_phyop {
    397 	int (*acquire)(struct wm_softc *);
    398 	void (*release)(struct wm_softc *);
    399 };
    400 
    401 /*
    402  * Software state per device.
    403  */
    404 struct wm_softc {
    405 	device_t sc_dev;		/* generic device information */
    406 	bus_space_tag_t sc_st;		/* bus space tag */
    407 	bus_space_handle_t sc_sh;	/* bus space handle */
    408 	bus_size_t sc_ss;		/* bus space size */
    409 	bus_space_tag_t sc_iot;		/* I/O space tag */
    410 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    411 	bus_size_t sc_ios;		/* I/O space size */
    412 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    413 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    414 	bus_size_t sc_flashs;		/* flash registers space size */
    415 	off_t sc_flashreg_offset;	/*
    416 					 * offset to flash registers from
    417 					 * start of BAR
    418 					 */
    419 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    420 
    421 	struct ethercom sc_ethercom;	/* ethernet common data */
    422 	struct mii_data sc_mii;		/* MII/media information */
    423 
    424 	pci_chipset_tag_t sc_pc;
    425 	pcitag_t sc_pcitag;
    426 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    427 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    428 
    429 	uint16_t sc_pcidevid;		/* PCI device ID */
    430 	wm_chip_type sc_type;		/* MAC type */
    431 	int sc_rev;			/* MAC revision */
    432 	wm_phy_type sc_phytype;		/* PHY type */
    433 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    434 #define	WM_MEDIATYPE_UNKNOWN		0x00
    435 #define	WM_MEDIATYPE_FIBER		0x01
    436 #define	WM_MEDIATYPE_COPPER		0x02
    437 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    438 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    439 	int sc_flags;			/* flags; see below */
    440 	int sc_if_flags;		/* last if_flags */
    441 	int sc_flowflags;		/* 802.3x flow control flags */
    442 	int sc_align_tweak;
    443 
    444 	void *sc_ihs[WM_MAX_NINTR];	/*
    445 					 * interrupt cookie.
    446 					 * legacy and msi use sc_ihs[0].
    447 					 */
    448 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    449 	int sc_nintrs;			/* number of interrupts */
    450 
    451 	int sc_link_intr_idx;		/* index of MSI-X tables */
    452 
    453 	callout_t sc_tick_ch;		/* tick callout */
    454 	bool sc_core_stopping;
    455 
    456 	int sc_nvm_ver_major;
    457 	int sc_nvm_ver_minor;
    458 	int sc_nvm_ver_build;
    459 	int sc_nvm_addrbits;		/* NVM address bits */
    460 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    461 	int sc_ich8_flash_base;
    462 	int sc_ich8_flash_bank_size;
    463 	int sc_nvm_k1_enabled;
    464 
    465 	int sc_nqueues;
    466 	struct wm_queue *sc_queue;
    467 
    468 	int sc_affinity_offset;
    469 
    470 #ifdef WM_EVENT_COUNTERS
    471 	/* Event counters. */
    472 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    473 
    474         /* WM_T_82542_2_1 only */
    475 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    476 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    477 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    478 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    479 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    480 #endif /* WM_EVENT_COUNTERS */
    481 
    482 	/* This variable are used only on the 82547. */
    483 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    484 
    485 	uint32_t sc_ctrl;		/* prototype CTRL register */
    486 #if 0
    487 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    488 #endif
    489 	uint32_t sc_icr;		/* prototype interrupt bits */
    490 	uint32_t sc_itr;		/* prototype intr throttling reg */
    491 	uint32_t sc_tctl;		/* prototype TCTL register */
    492 	uint32_t sc_rctl;		/* prototype RCTL register */
    493 	uint32_t sc_txcw;		/* prototype TXCW register */
    494 	uint32_t sc_tipg;		/* prototype TIPG register */
    495 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    496 	uint32_t sc_pba;		/* prototype PBA register */
    497 
    498 	int sc_tbi_linkup;		/* TBI link status */
    499 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    500 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    501 
    502 	int sc_mchash_type;		/* multicast filter offset */
    503 
    504 	krndsource_t rnd_source;	/* random source */
    505 
    506 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    507 
    508 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    509 	kmutex_t *sc_ich_phymtx;	/*
    510 					 * 82574/82583/ICH/PCH specific PHY
    511 					 * mutex. For 82574/82583, the mutex
    512 					 * is used for both PHY and NVM.
    513 					 */
    514 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    515 
    516 	struct wm_phyop phy;
    517 };
    518 
    519 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    520 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    521 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    522 
    523 #ifdef WM_MPSAFE
    524 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    525 #else
    526 #define CALLOUT_FLAGS	0
    527 #endif
    528 
    529 #define	WM_RXCHAIN_RESET(rxq)						\
    530 do {									\
    531 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    532 	*(rxq)->rxq_tailp = NULL;					\
    533 	(rxq)->rxq_len = 0;						\
    534 } while (/*CONSTCOND*/0)
    535 
    536 #define	WM_RXCHAIN_LINK(rxq, m)						\
    537 do {									\
    538 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    539 	(rxq)->rxq_tailp = &(m)->m_next;				\
    540 } while (/*CONSTCOND*/0)
    541 
    542 #ifdef WM_EVENT_COUNTERS
    543 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    544 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    545 
    546 #define WM_Q_EVCNT_INCR(qname, evname)			\
    547 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    548 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    549 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    550 #else /* !WM_EVENT_COUNTERS */
    551 #define	WM_EVCNT_INCR(ev)	/* nothing */
    552 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    553 
    554 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    555 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    556 #endif /* !WM_EVENT_COUNTERS */
    557 
    558 #define	CSR_READ(sc, reg)						\
    559 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    560 #define	CSR_WRITE(sc, reg, val)						\
    561 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    562 #define	CSR_WRITE_FLUSH(sc)						\
    563 	(void) CSR_READ((sc), WMREG_STATUS)
    564 
    565 #define ICH8_FLASH_READ32(sc, reg)					\
    566 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    567 	    (reg) + sc->sc_flashreg_offset)
    568 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    569 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    570 	    (reg) + sc->sc_flashreg_offset, (data))
    571 
    572 #define ICH8_FLASH_READ16(sc, reg)					\
    573 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    574 	    (reg) + sc->sc_flashreg_offset)
    575 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    576 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    577 	    (reg) + sc->sc_flashreg_offset, (data))
    578 
    579 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    580 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    581 
    582 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    583 #define	WM_CDTXADDR_HI(txq, x)						\
    584 	(sizeof(bus_addr_t) == 8 ?					\
    585 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    586 
    587 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    588 #define	WM_CDRXADDR_HI(rxq, x)						\
    589 	(sizeof(bus_addr_t) == 8 ?					\
    590 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    591 
    592 /*
    593  * Register read/write functions.
    594  * Other than CSR_{READ|WRITE}().
    595  */
    596 #if 0
    597 static inline uint32_t wm_io_read(struct wm_softc *, int);
    598 #endif
    599 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    600 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    601 	uint32_t, uint32_t);
    602 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    603 
    604 /*
    605  * Descriptor sync/init functions.
    606  */
    607 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    608 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    609 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    610 
    611 /*
    612  * Device driver interface functions and commonly used functions.
    613  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    614  */
    615 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    616 static int	wm_match(device_t, cfdata_t, void *);
    617 static void	wm_attach(device_t, device_t, void *);
    618 static int	wm_detach(device_t, int);
    619 static bool	wm_suspend(device_t, const pmf_qual_t *);
    620 static bool	wm_resume(device_t, const pmf_qual_t *);
    621 static void	wm_watchdog(struct ifnet *);
    622 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    623 static void	wm_tick(void *);
    624 static int	wm_ifflags_cb(struct ethercom *);
    625 static int	wm_ioctl(struct ifnet *, u_long, void *);
    626 /* MAC address related */
    627 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    628 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    629 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    630 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    631 static void	wm_set_filter(struct wm_softc *);
    632 /* Reset and init related */
    633 static void	wm_set_vlan(struct wm_softc *);
    634 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    635 static void	wm_get_auto_rd_done(struct wm_softc *);
    636 static void	wm_lan_init_done(struct wm_softc *);
    637 static void	wm_get_cfg_done(struct wm_softc *);
    638 static void	wm_initialize_hardware_bits(struct wm_softc *);
    639 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    640 static void	wm_flush_desc_rings(struct wm_softc *);
    641 static void	wm_reset(struct wm_softc *);
    642 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    643 static void	wm_rxdrain(struct wm_rxqueue *);
    644 static void	wm_rss_getkey(uint8_t *);
    645 static void	wm_init_rss(struct wm_softc *);
    646 static void	wm_adjust_qnum(struct wm_softc *, int);
    647 static int	wm_setup_legacy(struct wm_softc *);
    648 static int	wm_setup_msix(struct wm_softc *);
    649 static int	wm_init(struct ifnet *);
    650 static int	wm_init_locked(struct ifnet *);
    651 static void	wm_turnon(struct wm_softc *);
    652 static void	wm_turnoff(struct wm_softc *);
    653 static void	wm_stop(struct ifnet *, int);
    654 static void	wm_stop_locked(struct ifnet *, int);
    655 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    656 static void	wm_82547_txfifo_stall(void *);
    657 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    658 /* DMA related */
    659 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    660 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    661 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    662 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    663     struct wm_txqueue *);
    664 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    665 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    666 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    667     struct wm_rxqueue *);
    668 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    669 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    670 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    671 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    672 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    673 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    674 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    675     struct wm_txqueue *);
    676 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    677     struct wm_rxqueue *);
    678 static int	wm_alloc_txrx_queues(struct wm_softc *);
    679 static void	wm_free_txrx_queues(struct wm_softc *);
    680 static int	wm_init_txrx_queues(struct wm_softc *);
    681 /* Start */
    682 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    683     uint32_t *, uint8_t *);
    684 static void	wm_start(struct ifnet *);
    685 static void	wm_start_locked(struct ifnet *);
    686 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    687     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    688 static void	wm_nq_start(struct ifnet *);
    689 static void	wm_nq_start_locked(struct ifnet *);
    690 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    691 static inline int	wm_nq_select_txqueue(struct ifnet *, struct mbuf *);
    692 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    693 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    694 /* Interrupt */
    695 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    696 static void	wm_rxeof(struct wm_rxqueue *);
    697 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    698 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    699 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    700 static void	wm_linkintr(struct wm_softc *, uint32_t);
    701 static int	wm_intr_legacy(void *);
    702 static int	wm_txrxintr_msix(void *);
    703 static int	wm_linkintr_msix(void *);
    704 
    705 /*
    706  * Media related.
    707  * GMII, SGMII, TBI, SERDES and SFP.
    708  */
    709 /* Common */
    710 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    711 /* GMII related */
    712 static void	wm_gmii_reset(struct wm_softc *);
    713 static int	wm_get_phy_id_82575(struct wm_softc *);
    714 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    715 static int	wm_gmii_mediachange(struct ifnet *);
    716 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    717 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    718 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    719 static int	wm_gmii_i82543_readreg(device_t, int, int);
    720 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    721 static int	wm_gmii_mdic_readreg(device_t, int, int);
    722 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    723 static int	wm_gmii_i82544_readreg(device_t, int, int);
    724 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    725 static int	wm_gmii_i80003_readreg(device_t, int, int);
    726 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    727 static int	wm_gmii_bm_readreg(device_t, int, int);
    728 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    729 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    730 static int	wm_gmii_hv_readreg(device_t, int, int);
    731 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    732 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    733 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    734 static int	wm_gmii_82580_readreg(device_t, int, int);
    735 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    736 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    737 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    738 static void	wm_gmii_statchg(struct ifnet *);
    739 static int	wm_kmrn_readreg(struct wm_softc *, int);
    740 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    741 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    742 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    743 /* SGMII */
    744 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    745 static int	wm_sgmii_readreg(device_t, int, int);
    746 static void	wm_sgmii_writereg(device_t, int, int, int);
    747 /* TBI related */
    748 static void	wm_tbi_mediainit(struct wm_softc *);
    749 static int	wm_tbi_mediachange(struct ifnet *);
    750 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    751 static int	wm_check_for_link(struct wm_softc *);
    752 static void	wm_tbi_tick(struct wm_softc *);
    753 /* SERDES related */
    754 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    755 static int	wm_serdes_mediachange(struct ifnet *);
    756 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    757 static void	wm_serdes_tick(struct wm_softc *);
    758 /* SFP related */
    759 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    760 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    761 
    762 /*
    763  * NVM related.
    764  * Microwire, SPI (w/wo EERD) and Flash.
    765  */
    766 /* Misc functions */
    767 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    768 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    769 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    770 /* Microwire */
    771 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    772 /* SPI */
    773 static int	wm_nvm_ready_spi(struct wm_softc *);
    774 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    775 /* Using with EERD */
    776 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    777 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    778 /* Flash */
    779 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    780     unsigned int *);
    781 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    782 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    783 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    784 	uint32_t *);
    785 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    786 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    787 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    788 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    789 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    790 /* iNVM */
    791 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    792 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    793 /* Lock, detecting NVM type, validate checksum and read */
    794 static int	wm_nvm_acquire(struct wm_softc *);
    795 static void	wm_nvm_release(struct wm_softc *);
    796 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    797 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    798 static int	wm_nvm_validate_checksum(struct wm_softc *);
    799 static void	wm_nvm_version_invm(struct wm_softc *);
    800 static void	wm_nvm_version(struct wm_softc *);
    801 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    802 
    803 /*
    804  * Hardware semaphores.
    805  * Very complexed...
    806  */
    807 static int	wm_get_null(struct wm_softc *);
    808 static void	wm_put_null(struct wm_softc *);
    809 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    810 static void	wm_put_swsm_semaphore(struct wm_softc *);
    811 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    812 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    813 static int	wm_get_phy_82575(struct wm_softc *);
    814 static void	wm_put_phy_82575(struct wm_softc *);
    815 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    816 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    817 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    818 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    819 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    820 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    821 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    822 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    823 
    824 /*
    825  * Management mode and power management related subroutines.
    826  * BMC, AMT, suspend/resume and EEE.
    827  */
    828 #if 0
    829 static int	wm_check_mng_mode(struct wm_softc *);
    830 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    831 static int	wm_check_mng_mode_82574(struct wm_softc *);
    832 static int	wm_check_mng_mode_generic(struct wm_softc *);
    833 #endif
    834 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    835 static bool	wm_phy_resetisblocked(struct wm_softc *);
    836 static void	wm_get_hw_control(struct wm_softc *);
    837 static void	wm_release_hw_control(struct wm_softc *);
    838 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    839 static void	wm_smbustopci(struct wm_softc *);
    840 static void	wm_init_manageability(struct wm_softc *);
    841 static void	wm_release_manageability(struct wm_softc *);
    842 static void	wm_get_wakeup(struct wm_softc *);
    843 static void	wm_enable_phy_wakeup(struct wm_softc *);
    844 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    845 static void	wm_enable_wakeup(struct wm_softc *);
    846 /* LPLU (Low Power Link Up) */
    847 static void	wm_lplu_d0_disable(struct wm_softc *);
    848 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    849 /* EEE */
    850 static void	wm_set_eee_i350(struct wm_softc *);
    851 
    852 /*
    853  * Workarounds (mainly PHY related).
    854  * Basically, PHY's workarounds are in the PHY drivers.
    855  */
    856 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    857 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    858 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    859 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    860 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    861 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    862 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    863 static void	wm_reset_init_script_82575(struct wm_softc *);
    864 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    865 static void	wm_pll_workaround_i210(struct wm_softc *);
    866 
    867 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    868     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    869 
    870 /*
    871  * Devices supported by this driver.
    872  */
    873 static const struct wm_product {
    874 	pci_vendor_id_t		wmp_vendor;
    875 	pci_product_id_t	wmp_product;
    876 	const char		*wmp_name;
    877 	wm_chip_type		wmp_type;
    878 	uint32_t		wmp_flags;
    879 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    880 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    881 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    882 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    883 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    884 } wm_products[] = {
    885 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    886 	  "Intel i82542 1000BASE-X Ethernet",
    887 	  WM_T_82542_2_1,	WMP_F_FIBER },
    888 
    889 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    890 	  "Intel i82543GC 1000BASE-X Ethernet",
    891 	  WM_T_82543,		WMP_F_FIBER },
    892 
    893 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    894 	  "Intel i82543GC 1000BASE-T Ethernet",
    895 	  WM_T_82543,		WMP_F_COPPER },
    896 
    897 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    898 	  "Intel i82544EI 1000BASE-T Ethernet",
    899 	  WM_T_82544,		WMP_F_COPPER },
    900 
    901 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    902 	  "Intel i82544EI 1000BASE-X Ethernet",
    903 	  WM_T_82544,		WMP_F_FIBER },
    904 
    905 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    906 	  "Intel i82544GC 1000BASE-T Ethernet",
    907 	  WM_T_82544,		WMP_F_COPPER },
    908 
    909 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    910 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    911 	  WM_T_82544,		WMP_F_COPPER },
    912 
    913 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    914 	  "Intel i82540EM 1000BASE-T Ethernet",
    915 	  WM_T_82540,		WMP_F_COPPER },
    916 
    917 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    918 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    919 	  WM_T_82540,		WMP_F_COPPER },
    920 
    921 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    922 	  "Intel i82540EP 1000BASE-T Ethernet",
    923 	  WM_T_82540,		WMP_F_COPPER },
    924 
    925 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    926 	  "Intel i82540EP 1000BASE-T Ethernet",
    927 	  WM_T_82540,		WMP_F_COPPER },
    928 
    929 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    930 	  "Intel i82540EP 1000BASE-T Ethernet",
    931 	  WM_T_82540,		WMP_F_COPPER },
    932 
    933 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    934 	  "Intel i82545EM 1000BASE-T Ethernet",
    935 	  WM_T_82545,		WMP_F_COPPER },
    936 
    937 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    938 	  "Intel i82545GM 1000BASE-T Ethernet",
    939 	  WM_T_82545_3,		WMP_F_COPPER },
    940 
    941 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    942 	  "Intel i82545GM 1000BASE-X Ethernet",
    943 	  WM_T_82545_3,		WMP_F_FIBER },
    944 
    945 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    946 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    947 	  WM_T_82545_3,		WMP_F_SERDES },
    948 
    949 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    950 	  "Intel i82546EB 1000BASE-T Ethernet",
    951 	  WM_T_82546,		WMP_F_COPPER },
    952 
    953 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    954 	  "Intel i82546EB 1000BASE-T Ethernet",
    955 	  WM_T_82546,		WMP_F_COPPER },
    956 
    957 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    958 	  "Intel i82545EM 1000BASE-X Ethernet",
    959 	  WM_T_82545,		WMP_F_FIBER },
    960 
    961 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    962 	  "Intel i82546EB 1000BASE-X Ethernet",
    963 	  WM_T_82546,		WMP_F_FIBER },
    964 
    965 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    966 	  "Intel i82546GB 1000BASE-T Ethernet",
    967 	  WM_T_82546_3,		WMP_F_COPPER },
    968 
    969 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    970 	  "Intel i82546GB 1000BASE-X Ethernet",
    971 	  WM_T_82546_3,		WMP_F_FIBER },
    972 
    973 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    974 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    975 	  WM_T_82546_3,		WMP_F_SERDES },
    976 
    977 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    978 	  "i82546GB quad-port Gigabit Ethernet",
    979 	  WM_T_82546_3,		WMP_F_COPPER },
    980 
    981 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    982 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    983 	  WM_T_82546_3,		WMP_F_COPPER },
    984 
    985 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    986 	  "Intel PRO/1000MT (82546GB)",
    987 	  WM_T_82546_3,		WMP_F_COPPER },
    988 
    989 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    990 	  "Intel i82541EI 1000BASE-T Ethernet",
    991 	  WM_T_82541,		WMP_F_COPPER },
    992 
    993 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    994 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    995 	  WM_T_82541,		WMP_F_COPPER },
    996 
    997 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    998 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    999 	  WM_T_82541,		WMP_F_COPPER },
   1000 
   1001 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1002 	  "Intel i82541ER 1000BASE-T Ethernet",
   1003 	  WM_T_82541_2,		WMP_F_COPPER },
   1004 
   1005 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1006 	  "Intel i82541GI 1000BASE-T Ethernet",
   1007 	  WM_T_82541_2,		WMP_F_COPPER },
   1008 
   1009 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1010 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1011 	  WM_T_82541_2,		WMP_F_COPPER },
   1012 
   1013 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1014 	  "Intel i82541PI 1000BASE-T Ethernet",
   1015 	  WM_T_82541_2,		WMP_F_COPPER },
   1016 
   1017 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1018 	  "Intel i82547EI 1000BASE-T Ethernet",
   1019 	  WM_T_82547,		WMP_F_COPPER },
   1020 
   1021 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1022 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1023 	  WM_T_82547,		WMP_F_COPPER },
   1024 
   1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1026 	  "Intel i82547GI 1000BASE-T Ethernet",
   1027 	  WM_T_82547_2,		WMP_F_COPPER },
   1028 
   1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1030 	  "Intel PRO/1000 PT (82571EB)",
   1031 	  WM_T_82571,		WMP_F_COPPER },
   1032 
   1033 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1034 	  "Intel PRO/1000 PF (82571EB)",
   1035 	  WM_T_82571,		WMP_F_FIBER },
   1036 
   1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1038 	  "Intel PRO/1000 PB (82571EB)",
   1039 	  WM_T_82571,		WMP_F_SERDES },
   1040 
   1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1042 	  "Intel PRO/1000 QT (82571EB)",
   1043 	  WM_T_82571,		WMP_F_COPPER },
   1044 
   1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1046 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1047 	  WM_T_82571,		WMP_F_COPPER, },
   1048 
   1049 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1050 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1051 	  WM_T_82571,		WMP_F_COPPER, },
   1052 
   1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1054 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1055 	  WM_T_82571,		WMP_F_SERDES, },
   1056 
   1057 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1058 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1059 	  WM_T_82571,		WMP_F_SERDES, },
   1060 
   1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1062 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1063 	  WM_T_82571,		WMP_F_FIBER, },
   1064 
   1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1066 	  "Intel i82572EI 1000baseT Ethernet",
   1067 	  WM_T_82572,		WMP_F_COPPER },
   1068 
   1069 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1070 	  "Intel i82572EI 1000baseX Ethernet",
   1071 	  WM_T_82572,		WMP_F_FIBER },
   1072 
   1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1074 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1075 	  WM_T_82572,		WMP_F_SERDES },
   1076 
   1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1078 	  "Intel i82572EI 1000baseT Ethernet",
   1079 	  WM_T_82572,		WMP_F_COPPER },
   1080 
   1081 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1082 	  "Intel i82573E",
   1083 	  WM_T_82573,		WMP_F_COPPER },
   1084 
   1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1086 	  "Intel i82573E IAMT",
   1087 	  WM_T_82573,		WMP_F_COPPER },
   1088 
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1090 	  "Intel i82573L Gigabit Ethernet",
   1091 	  WM_T_82573,		WMP_F_COPPER },
   1092 
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1094 	  "Intel i82574L",
   1095 	  WM_T_82574,		WMP_F_COPPER },
   1096 
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1098 	  "Intel i82574L",
   1099 	  WM_T_82574,		WMP_F_COPPER },
   1100 
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1102 	  "Intel i82583V",
   1103 	  WM_T_82583,		WMP_F_COPPER },
   1104 
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1106 	  "i80003 dual 1000baseT Ethernet",
   1107 	  WM_T_80003,		WMP_F_COPPER },
   1108 
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1110 	  "i80003 dual 1000baseX Ethernet",
   1111 	  WM_T_80003,		WMP_F_COPPER },
   1112 
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1114 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1115 	  WM_T_80003,		WMP_F_SERDES },
   1116 
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1118 	  "Intel i80003 1000baseT Ethernet",
   1119 	  WM_T_80003,		WMP_F_COPPER },
   1120 
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1122 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1123 	  WM_T_80003,		WMP_F_SERDES },
   1124 
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1126 	  "Intel i82801H (M_AMT) LAN Controller",
   1127 	  WM_T_ICH8,		WMP_F_COPPER },
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1129 	  "Intel i82801H (AMT) LAN Controller",
   1130 	  WM_T_ICH8,		WMP_F_COPPER },
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1132 	  "Intel i82801H LAN Controller",
   1133 	  WM_T_ICH8,		WMP_F_COPPER },
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1135 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1136 	  WM_T_ICH8,		WMP_F_COPPER },
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1138 	  "Intel i82801H (M) LAN Controller",
   1139 	  WM_T_ICH8,		WMP_F_COPPER },
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1141 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1142 	  WM_T_ICH8,		WMP_F_COPPER },
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1144 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1145 	  WM_T_ICH8,		WMP_F_COPPER },
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1147 	  "82567V-3 LAN Controller",
   1148 	  WM_T_ICH8,		WMP_F_COPPER },
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1150 	  "82801I (AMT) LAN Controller",
   1151 	  WM_T_ICH9,		WMP_F_COPPER },
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1153 	  "82801I 10/100 LAN Controller",
   1154 	  WM_T_ICH9,		WMP_F_COPPER },
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1156 	  "82801I (G) 10/100 LAN Controller",
   1157 	  WM_T_ICH9,		WMP_F_COPPER },
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1159 	  "82801I (GT) 10/100 LAN Controller",
   1160 	  WM_T_ICH9,		WMP_F_COPPER },
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1162 	  "82801I (C) LAN Controller",
   1163 	  WM_T_ICH9,		WMP_F_COPPER },
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1165 	  "82801I mobile LAN Controller",
   1166 	  WM_T_ICH9,		WMP_F_COPPER },
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1168 	  "82801I mobile (V) LAN Controller",
   1169 	  WM_T_ICH9,		WMP_F_COPPER },
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1171 	  "82801I mobile (AMT) LAN Controller",
   1172 	  WM_T_ICH9,		WMP_F_COPPER },
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1174 	  "82567LM-4 LAN Controller",
   1175 	  WM_T_ICH9,		WMP_F_COPPER },
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1177 	  "82567LM-2 LAN Controller",
   1178 	  WM_T_ICH10,		WMP_F_COPPER },
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1180 	  "82567LF-2 LAN Controller",
   1181 	  WM_T_ICH10,		WMP_F_COPPER },
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1183 	  "82567LM-3 LAN Controller",
   1184 	  WM_T_ICH10,		WMP_F_COPPER },
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1186 	  "82567LF-3 LAN Controller",
   1187 	  WM_T_ICH10,		WMP_F_COPPER },
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1189 	  "82567V-2 LAN Controller",
   1190 	  WM_T_ICH10,		WMP_F_COPPER },
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1192 	  "82567V-3? LAN Controller",
   1193 	  WM_T_ICH10,		WMP_F_COPPER },
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1195 	  "HANKSVILLE LAN Controller",
   1196 	  WM_T_ICH10,		WMP_F_COPPER },
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1198 	  "PCH LAN (82577LM) Controller",
   1199 	  WM_T_PCH,		WMP_F_COPPER },
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1201 	  "PCH LAN (82577LC) Controller",
   1202 	  WM_T_PCH,		WMP_F_COPPER },
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1204 	  "PCH LAN (82578DM) Controller",
   1205 	  WM_T_PCH,		WMP_F_COPPER },
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1207 	  "PCH LAN (82578DC) Controller",
   1208 	  WM_T_PCH,		WMP_F_COPPER },
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1210 	  "PCH2 LAN (82579LM) Controller",
   1211 	  WM_T_PCH2,		WMP_F_COPPER },
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1213 	  "PCH2 LAN (82579V) Controller",
   1214 	  WM_T_PCH2,		WMP_F_COPPER },
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1216 	  "82575EB dual-1000baseT Ethernet",
   1217 	  WM_T_82575,		WMP_F_COPPER },
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1219 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1220 	  WM_T_82575,		WMP_F_SERDES },
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1222 	  "82575GB quad-1000baseT Ethernet",
   1223 	  WM_T_82575,		WMP_F_COPPER },
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1225 	  "82575GB quad-1000baseT Ethernet (PM)",
   1226 	  WM_T_82575,		WMP_F_COPPER },
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1228 	  "82576 1000BaseT Ethernet",
   1229 	  WM_T_82576,		WMP_F_COPPER },
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1231 	  "82576 1000BaseX Ethernet",
   1232 	  WM_T_82576,		WMP_F_FIBER },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1235 	  "82576 gigabit Ethernet (SERDES)",
   1236 	  WM_T_82576,		WMP_F_SERDES },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1239 	  "82576 quad-1000BaseT Ethernet",
   1240 	  WM_T_82576,		WMP_F_COPPER },
   1241 
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1243 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1244 	  WM_T_82576,		WMP_F_COPPER },
   1245 
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1247 	  "82576 gigabit Ethernet",
   1248 	  WM_T_82576,		WMP_F_COPPER },
   1249 
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1251 	  "82576 gigabit Ethernet (SERDES)",
   1252 	  WM_T_82576,		WMP_F_SERDES },
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1254 	  "82576 quad-gigabit Ethernet (SERDES)",
   1255 	  WM_T_82576,		WMP_F_SERDES },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1258 	  "82580 1000BaseT Ethernet",
   1259 	  WM_T_82580,		WMP_F_COPPER },
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1261 	  "82580 1000BaseX Ethernet",
   1262 	  WM_T_82580,		WMP_F_FIBER },
   1263 
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1265 	  "82580 1000BaseT Ethernet (SERDES)",
   1266 	  WM_T_82580,		WMP_F_SERDES },
   1267 
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1269 	  "82580 gigabit Ethernet (SGMII)",
   1270 	  WM_T_82580,		WMP_F_COPPER },
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1272 	  "82580 dual-1000BaseT Ethernet",
   1273 	  WM_T_82580,		WMP_F_COPPER },
   1274 
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1276 	  "82580 quad-1000BaseX Ethernet",
   1277 	  WM_T_82580,		WMP_F_FIBER },
   1278 
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1280 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1281 	  WM_T_82580,		WMP_F_COPPER },
   1282 
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1284 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1285 	  WM_T_82580,		WMP_F_SERDES },
   1286 
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1288 	  "DH89XXCC 1000BASE-KX Ethernet",
   1289 	  WM_T_82580,		WMP_F_SERDES },
   1290 
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1292 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1293 	  WM_T_82580,		WMP_F_SERDES },
   1294 
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1296 	  "I350 Gigabit Network Connection",
   1297 	  WM_T_I350,		WMP_F_COPPER },
   1298 
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1300 	  "I350 Gigabit Fiber Network Connection",
   1301 	  WM_T_I350,		WMP_F_FIBER },
   1302 
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1304 	  "I350 Gigabit Backplane Connection",
   1305 	  WM_T_I350,		WMP_F_SERDES },
   1306 
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1308 	  "I350 Quad Port Gigabit Ethernet",
   1309 	  WM_T_I350,		WMP_F_SERDES },
   1310 
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1312 	  "I350 Gigabit Connection",
   1313 	  WM_T_I350,		WMP_F_COPPER },
   1314 
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1316 	  "I354 Gigabit Ethernet (KX)",
   1317 	  WM_T_I354,		WMP_F_SERDES },
   1318 
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1320 	  "I354 Gigabit Ethernet (SGMII)",
   1321 	  WM_T_I354,		WMP_F_COPPER },
   1322 
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1324 	  "I354 Gigabit Ethernet (2.5G)",
   1325 	  WM_T_I354,		WMP_F_COPPER },
   1326 
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1328 	  "I210-T1 Ethernet Server Adapter",
   1329 	  WM_T_I210,		WMP_F_COPPER },
   1330 
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1332 	  "I210 Ethernet (Copper OEM)",
   1333 	  WM_T_I210,		WMP_F_COPPER },
   1334 
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1336 	  "I210 Ethernet (Copper IT)",
   1337 	  WM_T_I210,		WMP_F_COPPER },
   1338 
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1340 	  "I210 Ethernet (FLASH less)",
   1341 	  WM_T_I210,		WMP_F_COPPER },
   1342 
   1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1344 	  "I210 Gigabit Ethernet (Fiber)",
   1345 	  WM_T_I210,		WMP_F_FIBER },
   1346 
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1348 	  "I210 Gigabit Ethernet (SERDES)",
   1349 	  WM_T_I210,		WMP_F_SERDES },
   1350 
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1352 	  "I210 Gigabit Ethernet (FLASH less)",
   1353 	  WM_T_I210,		WMP_F_SERDES },
   1354 
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1356 	  "I210 Gigabit Ethernet (SGMII)",
   1357 	  WM_T_I210,		WMP_F_COPPER },
   1358 
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1360 	  "I211 Ethernet (COPPER)",
   1361 	  WM_T_I211,		WMP_F_COPPER },
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1363 	  "I217 V Ethernet Connection",
   1364 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1366 	  "I217 LM Ethernet Connection",
   1367 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1369 	  "I218 V Ethernet Connection",
   1370 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1372 	  "I218 V Ethernet Connection",
   1373 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1375 	  "I218 V Ethernet Connection",
   1376 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1378 	  "I218 LM Ethernet Connection",
   1379 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1381 	  "I218 LM Ethernet Connection",
   1382 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1384 	  "I218 LM Ethernet Connection",
   1385 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1386 #if 0
   1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1388 	  "I219 V Ethernet Connection",
   1389 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1391 	  "I219 V Ethernet Connection",
   1392 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1394 	  "I219 V Ethernet Connection",
   1395 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1397 	  "I219 V Ethernet Connection",
   1398 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1400 	  "I219 LM Ethernet Connection",
   1401 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1403 	  "I219 LM Ethernet Connection",
   1404 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1406 	  "I219 LM Ethernet Connection",
   1407 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1409 	  "I219 LM Ethernet Connection",
   1410 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1412 	  "I219 LM Ethernet Connection",
   1413 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1414 #endif
   1415 	{ 0,			0,
   1416 	  NULL,
   1417 	  0,			0 },
   1418 };
   1419 
   1420 /*
   1421  * Register read/write functions.
   1422  * Other than CSR_{READ|WRITE}().
   1423  */
   1424 
   1425 #if 0 /* Not currently used */
   1426 static inline uint32_t
   1427 wm_io_read(struct wm_softc *sc, int reg)
   1428 {
   1429 
   1430 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1431 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1432 }
   1433 #endif
   1434 
   1435 static inline void
   1436 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1437 {
   1438 
   1439 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1440 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1441 }
   1442 
   1443 static inline void
   1444 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1445     uint32_t data)
   1446 {
   1447 	uint32_t regval;
   1448 	int i;
   1449 
   1450 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1451 
   1452 	CSR_WRITE(sc, reg, regval);
   1453 
   1454 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1455 		delay(5);
   1456 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1457 			break;
   1458 	}
   1459 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1460 		aprint_error("%s: WARNING:"
   1461 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1462 		    device_xname(sc->sc_dev), reg);
   1463 	}
   1464 }
   1465 
   1466 static inline void
   1467 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1468 {
   1469 	wa->wa_low = htole32(v & 0xffffffffU);
   1470 	if (sizeof(bus_addr_t) == 8)
   1471 		wa->wa_high = htole32((uint64_t) v >> 32);
   1472 	else
   1473 		wa->wa_high = 0;
   1474 }
   1475 
   1476 /*
   1477  * Descriptor sync/init functions.
   1478  */
   1479 static inline void
   1480 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1481 {
   1482 	struct wm_softc *sc = txq->txq_sc;
   1483 
   1484 	/* If it will wrap around, sync to the end of the ring. */
   1485 	if ((start + num) > WM_NTXDESC(txq)) {
   1486 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1487 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1488 		    (WM_NTXDESC(txq) - start), ops);
   1489 		num -= (WM_NTXDESC(txq) - start);
   1490 		start = 0;
   1491 	}
   1492 
   1493 	/* Now sync whatever is left. */
   1494 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1495 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1496 }
   1497 
   1498 static inline void
   1499 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1500 {
   1501 	struct wm_softc *sc = rxq->rxq_sc;
   1502 
   1503 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1504 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1505 }
   1506 
   1507 static inline void
   1508 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1509 {
   1510 	struct wm_softc *sc = rxq->rxq_sc;
   1511 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1512 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1513 	struct mbuf *m = rxs->rxs_mbuf;
   1514 
   1515 	/*
   1516 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1517 	 * so that the payload after the Ethernet header is aligned
   1518 	 * to a 4-byte boundary.
   1519 
   1520 	 * XXX BRAINDAMAGE ALERT!
   1521 	 * The stupid chip uses the same size for every buffer, which
   1522 	 * is set in the Receive Control register.  We are using the 2K
   1523 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1524 	 * reason, we can't "scoot" packets longer than the standard
   1525 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1526 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1527 	 * the upper layer copy the headers.
   1528 	 */
   1529 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1530 
   1531 	wm_set_dma_addr(&rxd->wrx_addr,
   1532 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1533 	rxd->wrx_len = 0;
   1534 	rxd->wrx_cksum = 0;
   1535 	rxd->wrx_status = 0;
   1536 	rxd->wrx_errors = 0;
   1537 	rxd->wrx_special = 0;
   1538 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1539 
   1540 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1541 }
   1542 
   1543 /*
   1544  * Device driver interface functions and commonly used functions.
   1545  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1546  */
   1547 
   1548 /* Lookup supported device table */
   1549 static const struct wm_product *
   1550 wm_lookup(const struct pci_attach_args *pa)
   1551 {
   1552 	const struct wm_product *wmp;
   1553 
   1554 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1555 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1556 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1557 			return wmp;
   1558 	}
   1559 	return NULL;
   1560 }
   1561 
   1562 /* The match function (ca_match) */
   1563 static int
   1564 wm_match(device_t parent, cfdata_t cf, void *aux)
   1565 {
   1566 	struct pci_attach_args *pa = aux;
   1567 
   1568 	if (wm_lookup(pa) != NULL)
   1569 		return 1;
   1570 
   1571 	return 0;
   1572 }
   1573 
   1574 /* The attach function (ca_attach) */
   1575 static void
   1576 wm_attach(device_t parent, device_t self, void *aux)
   1577 {
   1578 	struct wm_softc *sc = device_private(self);
   1579 	struct pci_attach_args *pa = aux;
   1580 	prop_dictionary_t dict;
   1581 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1582 	pci_chipset_tag_t pc = pa->pa_pc;
   1583 	int counts[PCI_INTR_TYPE_SIZE];
   1584 	pci_intr_type_t max_type;
   1585 	const char *eetype, *xname;
   1586 	bus_space_tag_t memt;
   1587 	bus_space_handle_t memh;
   1588 	bus_size_t memsize;
   1589 	int memh_valid;
   1590 	int i, error;
   1591 	const struct wm_product *wmp;
   1592 	prop_data_t ea;
   1593 	prop_number_t pn;
   1594 	uint8_t enaddr[ETHER_ADDR_LEN];
   1595 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1596 	pcireg_t preg, memtype;
   1597 	uint16_t eeprom_data, apme_mask;
   1598 	bool force_clear_smbi;
   1599 	uint32_t link_mode;
   1600 	uint32_t reg;
   1601 
   1602 	sc->sc_dev = self;
   1603 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1604 	sc->sc_core_stopping = false;
   1605 
   1606 	wmp = wm_lookup(pa);
   1607 #ifdef DIAGNOSTIC
   1608 	if (wmp == NULL) {
   1609 		printf("\n");
   1610 		panic("wm_attach: impossible");
   1611 	}
   1612 #endif
   1613 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1614 
   1615 	sc->sc_pc = pa->pa_pc;
   1616 	sc->sc_pcitag = pa->pa_tag;
   1617 
   1618 	if (pci_dma64_available(pa))
   1619 		sc->sc_dmat = pa->pa_dmat64;
   1620 	else
   1621 		sc->sc_dmat = pa->pa_dmat;
   1622 
   1623 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1624 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1625 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1626 
   1627 	sc->sc_type = wmp->wmp_type;
   1628 
   1629 	/* Set default function pointers */
   1630 	sc->phy.acquire = wm_get_null;
   1631 	sc->phy.release = wm_put_null;
   1632 
   1633 	if (sc->sc_type < WM_T_82543) {
   1634 		if (sc->sc_rev < 2) {
   1635 			aprint_error_dev(sc->sc_dev,
   1636 			    "i82542 must be at least rev. 2\n");
   1637 			return;
   1638 		}
   1639 		if (sc->sc_rev < 3)
   1640 			sc->sc_type = WM_T_82542_2_0;
   1641 	}
   1642 
   1643 	/*
   1644 	 * Disable MSI for Errata:
   1645 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1646 	 *
   1647 	 *  82544: Errata 25
   1648 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1649 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1650 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1651 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1652 	 *
   1653 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1654 	 *
   1655 	 *  82571 & 82572: Errata 63
   1656 	 */
   1657 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1658 	    || (sc->sc_type == WM_T_82572))
   1659 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1660 
   1661 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1662 	    || (sc->sc_type == WM_T_82580)
   1663 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1664 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1665 		sc->sc_flags |= WM_F_NEWQUEUE;
   1666 
   1667 	/* Set device properties (mactype) */
   1668 	dict = device_properties(sc->sc_dev);
   1669 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1670 
   1671 	/*
   1672 	 * Map the device.  All devices support memory-mapped acccess,
   1673 	 * and it is really required for normal operation.
   1674 	 */
   1675 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1676 	switch (memtype) {
   1677 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1678 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1679 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1680 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1681 		break;
   1682 	default:
   1683 		memh_valid = 0;
   1684 		break;
   1685 	}
   1686 
   1687 	if (memh_valid) {
   1688 		sc->sc_st = memt;
   1689 		sc->sc_sh = memh;
   1690 		sc->sc_ss = memsize;
   1691 	} else {
   1692 		aprint_error_dev(sc->sc_dev,
   1693 		    "unable to map device registers\n");
   1694 		return;
   1695 	}
   1696 
   1697 	/*
   1698 	 * In addition, i82544 and later support I/O mapped indirect
   1699 	 * register access.  It is not desirable (nor supported in
   1700 	 * this driver) to use it for normal operation, though it is
   1701 	 * required to work around bugs in some chip versions.
   1702 	 */
   1703 	if (sc->sc_type >= WM_T_82544) {
   1704 		/* First we have to find the I/O BAR. */
   1705 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1706 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1707 			if (memtype == PCI_MAPREG_TYPE_IO)
   1708 				break;
   1709 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1710 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1711 				i += 4;	/* skip high bits, too */
   1712 		}
   1713 		if (i < PCI_MAPREG_END) {
   1714 			/*
   1715 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1716 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1717 			 * It's no problem because newer chips has no this
   1718 			 * bug.
   1719 			 *
   1720 			 * The i8254x doesn't apparently respond when the
   1721 			 * I/O BAR is 0, which looks somewhat like it's not
   1722 			 * been configured.
   1723 			 */
   1724 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1725 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1726 				aprint_error_dev(sc->sc_dev,
   1727 				    "WARNING: I/O BAR at zero.\n");
   1728 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1729 					0, &sc->sc_iot, &sc->sc_ioh,
   1730 					NULL, &sc->sc_ios) == 0) {
   1731 				sc->sc_flags |= WM_F_IOH_VALID;
   1732 			} else {
   1733 				aprint_error_dev(sc->sc_dev,
   1734 				    "WARNING: unable to map I/O space\n");
   1735 			}
   1736 		}
   1737 
   1738 	}
   1739 
   1740 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1741 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1742 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1743 	if (sc->sc_type < WM_T_82542_2_1)
   1744 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1745 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1746 
   1747 	/* power up chip */
   1748 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1749 	    NULL)) && error != EOPNOTSUPP) {
   1750 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1751 		return;
   1752 	}
   1753 
   1754 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1755 
   1756 	/* Allocation settings */
   1757 	max_type = PCI_INTR_TYPE_MSIX;
   1758 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1759 	counts[PCI_INTR_TYPE_MSI] = 1;
   1760 	counts[PCI_INTR_TYPE_INTX] = 1;
   1761 
   1762 alloc_retry:
   1763 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1764 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1765 		return;
   1766 	}
   1767 
   1768 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1769 		error = wm_setup_msix(sc);
   1770 		if (error) {
   1771 			pci_intr_release(pc, sc->sc_intrs,
   1772 			    counts[PCI_INTR_TYPE_MSIX]);
   1773 
   1774 			/* Setup for MSI: Disable MSI-X */
   1775 			max_type = PCI_INTR_TYPE_MSI;
   1776 			counts[PCI_INTR_TYPE_MSI] = 1;
   1777 			counts[PCI_INTR_TYPE_INTX] = 1;
   1778 			goto alloc_retry;
   1779 		}
   1780 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1781 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1782 		error = wm_setup_legacy(sc);
   1783 		if (error) {
   1784 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1785 			    counts[PCI_INTR_TYPE_MSI]);
   1786 
   1787 			/* The next try is for INTx: Disable MSI */
   1788 			max_type = PCI_INTR_TYPE_INTX;
   1789 			counts[PCI_INTR_TYPE_INTX] = 1;
   1790 			goto alloc_retry;
   1791 		}
   1792 	} else {
   1793 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1794 		error = wm_setup_legacy(sc);
   1795 		if (error) {
   1796 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1797 			    counts[PCI_INTR_TYPE_INTX]);
   1798 			return;
   1799 		}
   1800 	}
   1801 
   1802 	/*
   1803 	 * Check the function ID (unit number of the chip).
   1804 	 */
   1805 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1806 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1807 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1808 	    || (sc->sc_type == WM_T_82580)
   1809 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1810 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1811 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1812 	else
   1813 		sc->sc_funcid = 0;
   1814 
   1815 	/*
   1816 	 * Determine a few things about the bus we're connected to.
   1817 	 */
   1818 	if (sc->sc_type < WM_T_82543) {
   1819 		/* We don't really know the bus characteristics here. */
   1820 		sc->sc_bus_speed = 33;
   1821 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1822 		/*
   1823 		 * CSA (Communication Streaming Architecture) is about as fast
   1824 		 * a 32-bit 66MHz PCI Bus.
   1825 		 */
   1826 		sc->sc_flags |= WM_F_CSA;
   1827 		sc->sc_bus_speed = 66;
   1828 		aprint_verbose_dev(sc->sc_dev,
   1829 		    "Communication Streaming Architecture\n");
   1830 		if (sc->sc_type == WM_T_82547) {
   1831 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1832 			callout_setfunc(&sc->sc_txfifo_ch,
   1833 					wm_82547_txfifo_stall, sc);
   1834 			aprint_verbose_dev(sc->sc_dev,
   1835 			    "using 82547 Tx FIFO stall work-around\n");
   1836 		}
   1837 	} else if (sc->sc_type >= WM_T_82571) {
   1838 		sc->sc_flags |= WM_F_PCIE;
   1839 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1840 		    && (sc->sc_type != WM_T_ICH10)
   1841 		    && (sc->sc_type != WM_T_PCH)
   1842 		    && (sc->sc_type != WM_T_PCH2)
   1843 		    && (sc->sc_type != WM_T_PCH_LPT)
   1844 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1845 			/* ICH* and PCH* have no PCIe capability registers */
   1846 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1847 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1848 				NULL) == 0)
   1849 				aprint_error_dev(sc->sc_dev,
   1850 				    "unable to find PCIe capability\n");
   1851 		}
   1852 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1853 	} else {
   1854 		reg = CSR_READ(sc, WMREG_STATUS);
   1855 		if (reg & STATUS_BUS64)
   1856 			sc->sc_flags |= WM_F_BUS64;
   1857 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1858 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1859 
   1860 			sc->sc_flags |= WM_F_PCIX;
   1861 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1862 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1863 				aprint_error_dev(sc->sc_dev,
   1864 				    "unable to find PCIX capability\n");
   1865 			else if (sc->sc_type != WM_T_82545_3 &&
   1866 				 sc->sc_type != WM_T_82546_3) {
   1867 				/*
   1868 				 * Work around a problem caused by the BIOS
   1869 				 * setting the max memory read byte count
   1870 				 * incorrectly.
   1871 				 */
   1872 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1873 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1874 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1875 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1876 
   1877 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1878 				    PCIX_CMD_BYTECNT_SHIFT;
   1879 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1880 				    PCIX_STATUS_MAXB_SHIFT;
   1881 				if (bytecnt > maxb) {
   1882 					aprint_verbose_dev(sc->sc_dev,
   1883 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1884 					    512 << bytecnt, 512 << maxb);
   1885 					pcix_cmd = (pcix_cmd &
   1886 					    ~PCIX_CMD_BYTECNT_MASK) |
   1887 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1888 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1889 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1890 					    pcix_cmd);
   1891 				}
   1892 			}
   1893 		}
   1894 		/*
   1895 		 * The quad port adapter is special; it has a PCIX-PCIX
   1896 		 * bridge on the board, and can run the secondary bus at
   1897 		 * a higher speed.
   1898 		 */
   1899 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1900 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1901 								      : 66;
   1902 		} else if (sc->sc_flags & WM_F_PCIX) {
   1903 			switch (reg & STATUS_PCIXSPD_MASK) {
   1904 			case STATUS_PCIXSPD_50_66:
   1905 				sc->sc_bus_speed = 66;
   1906 				break;
   1907 			case STATUS_PCIXSPD_66_100:
   1908 				sc->sc_bus_speed = 100;
   1909 				break;
   1910 			case STATUS_PCIXSPD_100_133:
   1911 				sc->sc_bus_speed = 133;
   1912 				break;
   1913 			default:
   1914 				aprint_error_dev(sc->sc_dev,
   1915 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1916 				    reg & STATUS_PCIXSPD_MASK);
   1917 				sc->sc_bus_speed = 66;
   1918 				break;
   1919 			}
   1920 		} else
   1921 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1922 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1923 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1924 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1925 	}
   1926 
   1927 	/* clear interesting stat counters */
   1928 	CSR_READ(sc, WMREG_COLC);
   1929 	CSR_READ(sc, WMREG_RXERRC);
   1930 
   1931 	/* get PHY control from SMBus to PCIe */
   1932 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1933 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   1934 		wm_smbustopci(sc);
   1935 
   1936 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   1937 	    || (sc->sc_type >= WM_T_ICH8))
   1938 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1939 	if (sc->sc_type >= WM_T_ICH8)
   1940 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1941 
   1942 	/* Set PHY, NVM mutex related stuff */
   1943 	switch (sc->sc_type) {
   1944 	case WM_T_82542_2_0:
   1945 	case WM_T_82542_2_1:
   1946 	case WM_T_82543:
   1947 	case WM_T_82544:
   1948 		/* Microwire */
   1949 		sc->sc_nvm_wordsize = 64;
   1950 		sc->sc_nvm_addrbits = 6;
   1951 		break;
   1952 	case WM_T_82540:
   1953 	case WM_T_82545:
   1954 	case WM_T_82545_3:
   1955 	case WM_T_82546:
   1956 	case WM_T_82546_3:
   1957 		/* Microwire */
   1958 		reg = CSR_READ(sc, WMREG_EECD);
   1959 		if (reg & EECD_EE_SIZE) {
   1960 			sc->sc_nvm_wordsize = 256;
   1961 			sc->sc_nvm_addrbits = 8;
   1962 		} else {
   1963 			sc->sc_nvm_wordsize = 64;
   1964 			sc->sc_nvm_addrbits = 6;
   1965 		}
   1966 		sc->sc_flags |= WM_F_LOCK_EECD;
   1967 		break;
   1968 	case WM_T_82541:
   1969 	case WM_T_82541_2:
   1970 	case WM_T_82547:
   1971 	case WM_T_82547_2:
   1972 		sc->sc_flags |= WM_F_LOCK_EECD;
   1973 		reg = CSR_READ(sc, WMREG_EECD);
   1974 		if (reg & EECD_EE_TYPE) {
   1975 			/* SPI */
   1976 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1977 			wm_nvm_set_addrbits_size_eecd(sc);
   1978 		} else {
   1979 			/* Microwire */
   1980 			if ((reg & EECD_EE_ABITS) != 0) {
   1981 				sc->sc_nvm_wordsize = 256;
   1982 				sc->sc_nvm_addrbits = 8;
   1983 			} else {
   1984 				sc->sc_nvm_wordsize = 64;
   1985 				sc->sc_nvm_addrbits = 6;
   1986 			}
   1987 		}
   1988 		break;
   1989 	case WM_T_82571:
   1990 	case WM_T_82572:
   1991 		/* SPI */
   1992 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1993 		wm_nvm_set_addrbits_size_eecd(sc);
   1994 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1995 		sc->phy.acquire = wm_get_swsm_semaphore;
   1996 		sc->phy.release = wm_put_swsm_semaphore;
   1997 		break;
   1998 	case WM_T_82573:
   1999 	case WM_T_82574:
   2000 	case WM_T_82583:
   2001 		if (sc->sc_type == WM_T_82573) {
   2002 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2003 			sc->phy.acquire = wm_get_swsm_semaphore;
   2004 			sc->phy.release = wm_put_swsm_semaphore;
   2005 		} else {
   2006 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2007 			/* Both PHY and NVM use the same semaphore. */
   2008 			sc->phy.acquire
   2009 			    = wm_get_swfwhw_semaphore;
   2010 			sc->phy.release
   2011 			    = wm_put_swfwhw_semaphore;
   2012 		}
   2013 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2014 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2015 			sc->sc_nvm_wordsize = 2048;
   2016 		} else {
   2017 			/* SPI */
   2018 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2019 			wm_nvm_set_addrbits_size_eecd(sc);
   2020 		}
   2021 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2022 		break;
   2023 	case WM_T_82575:
   2024 	case WM_T_82576:
   2025 	case WM_T_82580:
   2026 	case WM_T_I350:
   2027 	case WM_T_I354:
   2028 	case WM_T_80003:
   2029 		/* SPI */
   2030 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2031 		wm_nvm_set_addrbits_size_eecd(sc);
   2032 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2033 		    | WM_F_LOCK_SWSM;
   2034 		sc->phy.acquire = wm_get_phy_82575;
   2035 		sc->phy.release = wm_put_phy_82575;
   2036 		break;
   2037 	case WM_T_ICH8:
   2038 	case WM_T_ICH9:
   2039 	case WM_T_ICH10:
   2040 	case WM_T_PCH:
   2041 	case WM_T_PCH2:
   2042 	case WM_T_PCH_LPT:
   2043 		/* FLASH */
   2044 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2045 		sc->sc_nvm_wordsize = 2048;
   2046 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2047 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2048 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2049 			aprint_error_dev(sc->sc_dev,
   2050 			    "can't map FLASH registers\n");
   2051 			goto out;
   2052 		}
   2053 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2054 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2055 		    ICH_FLASH_SECTOR_SIZE;
   2056 		sc->sc_ich8_flash_bank_size =
   2057 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2058 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2059 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2060 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2061 		sc->sc_flashreg_offset = 0;
   2062 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2063 		sc->phy.release = wm_put_swflag_ich8lan;
   2064 		break;
   2065 	case WM_T_PCH_SPT:
   2066 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2067 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2068 		sc->sc_flasht = sc->sc_st;
   2069 		sc->sc_flashh = sc->sc_sh;
   2070 		sc->sc_ich8_flash_base = 0;
   2071 		sc->sc_nvm_wordsize =
   2072 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2073 			* NVM_SIZE_MULTIPLIER;
   2074 		/* It is size in bytes, we want words */
   2075 		sc->sc_nvm_wordsize /= 2;
   2076 		/* assume 2 banks */
   2077 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2078 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2079 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2080 		sc->phy.release = wm_put_swflag_ich8lan;
   2081 		break;
   2082 	case WM_T_I210:
   2083 	case WM_T_I211:
   2084 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2085 			wm_nvm_set_addrbits_size_eecd(sc);
   2086 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2087 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2088 		} else {
   2089 			sc->sc_nvm_wordsize = INVM_SIZE;
   2090 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2091 		}
   2092 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2093 		sc->phy.acquire = wm_get_phy_82575;
   2094 		sc->phy.release = wm_put_phy_82575;
   2095 		break;
   2096 	default:
   2097 		break;
   2098 	}
   2099 
   2100 	/* Reset the chip to a known state. */
   2101 	wm_reset(sc);
   2102 
   2103 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2104 	switch (sc->sc_type) {
   2105 	case WM_T_82571:
   2106 	case WM_T_82572:
   2107 		reg = CSR_READ(sc, WMREG_SWSM2);
   2108 		if ((reg & SWSM2_LOCK) == 0) {
   2109 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2110 			force_clear_smbi = true;
   2111 		} else
   2112 			force_clear_smbi = false;
   2113 		break;
   2114 	case WM_T_82573:
   2115 	case WM_T_82574:
   2116 	case WM_T_82583:
   2117 		force_clear_smbi = true;
   2118 		break;
   2119 	default:
   2120 		force_clear_smbi = false;
   2121 		break;
   2122 	}
   2123 	if (force_clear_smbi) {
   2124 		reg = CSR_READ(sc, WMREG_SWSM);
   2125 		if ((reg & SWSM_SMBI) != 0)
   2126 			aprint_error_dev(sc->sc_dev,
   2127 			    "Please update the Bootagent\n");
   2128 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2129 	}
   2130 
   2131 	/*
   2132 	 * Defer printing the EEPROM type until after verifying the checksum
   2133 	 * This allows the EEPROM type to be printed correctly in the case
   2134 	 * that no EEPROM is attached.
   2135 	 */
   2136 	/*
   2137 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2138 	 * this for later, so we can fail future reads from the EEPROM.
   2139 	 */
   2140 	if (wm_nvm_validate_checksum(sc)) {
   2141 		/*
   2142 		 * Read twice again because some PCI-e parts fail the
   2143 		 * first check due to the link being in sleep state.
   2144 		 */
   2145 		if (wm_nvm_validate_checksum(sc))
   2146 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2147 	}
   2148 
   2149 	/* Set device properties (macflags) */
   2150 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2151 
   2152 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2153 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2154 	else {
   2155 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2156 		    sc->sc_nvm_wordsize);
   2157 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2158 			aprint_verbose("iNVM");
   2159 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2160 			aprint_verbose("FLASH(HW)");
   2161 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2162 			aprint_verbose("FLASH");
   2163 		else {
   2164 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2165 				eetype = "SPI";
   2166 			else
   2167 				eetype = "MicroWire";
   2168 			aprint_verbose("(%d address bits) %s EEPROM",
   2169 			    sc->sc_nvm_addrbits, eetype);
   2170 		}
   2171 	}
   2172 	wm_nvm_version(sc);
   2173 	aprint_verbose("\n");
   2174 
   2175 	/* Check for I21[01] PLL workaround */
   2176 	if (sc->sc_type == WM_T_I210)
   2177 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2178 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2179 		/* NVM image release 3.25 has a workaround */
   2180 		if ((sc->sc_nvm_ver_major < 3)
   2181 		    || ((sc->sc_nvm_ver_major == 3)
   2182 			&& (sc->sc_nvm_ver_minor < 25))) {
   2183 			aprint_verbose_dev(sc->sc_dev,
   2184 			    "ROM image version %d.%d is older than 3.25\n",
   2185 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2186 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2187 		}
   2188 	}
   2189 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2190 		wm_pll_workaround_i210(sc);
   2191 
   2192 	wm_get_wakeup(sc);
   2193 	switch (sc->sc_type) {
   2194 	case WM_T_82571:
   2195 	case WM_T_82572:
   2196 	case WM_T_82573:
   2197 	case WM_T_82574:
   2198 	case WM_T_82583:
   2199 	case WM_T_80003:
   2200 	case WM_T_ICH8:
   2201 	case WM_T_ICH9:
   2202 	case WM_T_ICH10:
   2203 	case WM_T_PCH:
   2204 	case WM_T_PCH2:
   2205 	case WM_T_PCH_LPT:
   2206 	case WM_T_PCH_SPT:
   2207 		/* Non-AMT based hardware can now take control from firmware */
   2208 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2209 			wm_get_hw_control(sc);
   2210 		break;
   2211 	default:
   2212 		break;
   2213 	}
   2214 
   2215 	/*
   2216 	 * Read the Ethernet address from the EEPROM, if not first found
   2217 	 * in device properties.
   2218 	 */
   2219 	ea = prop_dictionary_get(dict, "mac-address");
   2220 	if (ea != NULL) {
   2221 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2222 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2223 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2224 	} else {
   2225 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2226 			aprint_error_dev(sc->sc_dev,
   2227 			    "unable to read Ethernet address\n");
   2228 			goto out;
   2229 		}
   2230 	}
   2231 
   2232 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2233 	    ether_sprintf(enaddr));
   2234 
   2235 	/*
   2236 	 * Read the config info from the EEPROM, and set up various
   2237 	 * bits in the control registers based on their contents.
   2238 	 */
   2239 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2240 	if (pn != NULL) {
   2241 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2242 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2243 	} else {
   2244 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2245 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2246 			goto out;
   2247 		}
   2248 	}
   2249 
   2250 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2251 	if (pn != NULL) {
   2252 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2253 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2254 	} else {
   2255 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2256 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2257 			goto out;
   2258 		}
   2259 	}
   2260 
   2261 	/* check for WM_F_WOL */
   2262 	switch (sc->sc_type) {
   2263 	case WM_T_82542_2_0:
   2264 	case WM_T_82542_2_1:
   2265 	case WM_T_82543:
   2266 		/* dummy? */
   2267 		eeprom_data = 0;
   2268 		apme_mask = NVM_CFG3_APME;
   2269 		break;
   2270 	case WM_T_82544:
   2271 		apme_mask = NVM_CFG2_82544_APM_EN;
   2272 		eeprom_data = cfg2;
   2273 		break;
   2274 	case WM_T_82546:
   2275 	case WM_T_82546_3:
   2276 	case WM_T_82571:
   2277 	case WM_T_82572:
   2278 	case WM_T_82573:
   2279 	case WM_T_82574:
   2280 	case WM_T_82583:
   2281 	case WM_T_80003:
   2282 	default:
   2283 		apme_mask = NVM_CFG3_APME;
   2284 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2285 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2286 		break;
   2287 	case WM_T_82575:
   2288 	case WM_T_82576:
   2289 	case WM_T_82580:
   2290 	case WM_T_I350:
   2291 	case WM_T_I354: /* XXX ok? */
   2292 	case WM_T_ICH8:
   2293 	case WM_T_ICH9:
   2294 	case WM_T_ICH10:
   2295 	case WM_T_PCH:
   2296 	case WM_T_PCH2:
   2297 	case WM_T_PCH_LPT:
   2298 	case WM_T_PCH_SPT:
   2299 		/* XXX The funcid should be checked on some devices */
   2300 		apme_mask = WUC_APME;
   2301 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2302 		break;
   2303 	}
   2304 
   2305 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2306 	if ((eeprom_data & apme_mask) != 0)
   2307 		sc->sc_flags |= WM_F_WOL;
   2308 #ifdef WM_DEBUG
   2309 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2310 		printf("WOL\n");
   2311 #endif
   2312 
   2313 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2314 		/* Check NVM for autonegotiation */
   2315 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2316 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2317 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2318 		}
   2319 	}
   2320 
   2321 	/*
   2322 	 * XXX need special handling for some multiple port cards
   2323 	 * to disable a paticular port.
   2324 	 */
   2325 
   2326 	if (sc->sc_type >= WM_T_82544) {
   2327 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2328 		if (pn != NULL) {
   2329 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2330 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2331 		} else {
   2332 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2333 				aprint_error_dev(sc->sc_dev,
   2334 				    "unable to read SWDPIN\n");
   2335 				goto out;
   2336 			}
   2337 		}
   2338 	}
   2339 
   2340 	if (cfg1 & NVM_CFG1_ILOS)
   2341 		sc->sc_ctrl |= CTRL_ILOS;
   2342 
   2343 	/*
   2344 	 * XXX
   2345 	 * This code isn't correct because pin 2 and 3 are located
   2346 	 * in different position on newer chips. Check all datasheet.
   2347 	 *
   2348 	 * Until resolve this problem, check if a chip < 82580
   2349 	 */
   2350 	if (sc->sc_type <= WM_T_82580) {
   2351 		if (sc->sc_type >= WM_T_82544) {
   2352 			sc->sc_ctrl |=
   2353 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2354 			    CTRL_SWDPIO_SHIFT;
   2355 			sc->sc_ctrl |=
   2356 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2357 			    CTRL_SWDPINS_SHIFT;
   2358 		} else {
   2359 			sc->sc_ctrl |=
   2360 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2361 			    CTRL_SWDPIO_SHIFT;
   2362 		}
   2363 	}
   2364 
   2365 	/* XXX For other than 82580? */
   2366 	if (sc->sc_type == WM_T_82580) {
   2367 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2368 		if (nvmword & __BIT(13))
   2369 			sc->sc_ctrl |= CTRL_ILOS;
   2370 	}
   2371 
   2372 #if 0
   2373 	if (sc->sc_type >= WM_T_82544) {
   2374 		if (cfg1 & NVM_CFG1_IPS0)
   2375 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2376 		if (cfg1 & NVM_CFG1_IPS1)
   2377 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2378 		sc->sc_ctrl_ext |=
   2379 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2380 		    CTRL_EXT_SWDPIO_SHIFT;
   2381 		sc->sc_ctrl_ext |=
   2382 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2383 		    CTRL_EXT_SWDPINS_SHIFT;
   2384 	} else {
   2385 		sc->sc_ctrl_ext |=
   2386 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2387 		    CTRL_EXT_SWDPIO_SHIFT;
   2388 	}
   2389 #endif
   2390 
   2391 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2392 #if 0
   2393 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2394 #endif
   2395 
   2396 	if (sc->sc_type == WM_T_PCH) {
   2397 		uint16_t val;
   2398 
   2399 		/* Save the NVM K1 bit setting */
   2400 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2401 
   2402 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2403 			sc->sc_nvm_k1_enabled = 1;
   2404 		else
   2405 			sc->sc_nvm_k1_enabled = 0;
   2406 	}
   2407 
   2408 	/*
   2409 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2410 	 * media structures accordingly.
   2411 	 */
   2412 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2413 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2414 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2415 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2416 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2417 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2418 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2419 	} else if (sc->sc_type < WM_T_82543 ||
   2420 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2421 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2422 			aprint_error_dev(sc->sc_dev,
   2423 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2424 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2425 		}
   2426 		wm_tbi_mediainit(sc);
   2427 	} else {
   2428 		switch (sc->sc_type) {
   2429 		case WM_T_82575:
   2430 		case WM_T_82576:
   2431 		case WM_T_82580:
   2432 		case WM_T_I350:
   2433 		case WM_T_I354:
   2434 		case WM_T_I210:
   2435 		case WM_T_I211:
   2436 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2437 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2438 			switch (link_mode) {
   2439 			case CTRL_EXT_LINK_MODE_1000KX:
   2440 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2441 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2442 				break;
   2443 			case CTRL_EXT_LINK_MODE_SGMII:
   2444 				if (wm_sgmii_uses_mdio(sc)) {
   2445 					aprint_verbose_dev(sc->sc_dev,
   2446 					    "SGMII(MDIO)\n");
   2447 					sc->sc_flags |= WM_F_SGMII;
   2448 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2449 					break;
   2450 				}
   2451 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2452 				/*FALLTHROUGH*/
   2453 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2454 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2455 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2456 					if (link_mode
   2457 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2458 						sc->sc_mediatype
   2459 						    = WM_MEDIATYPE_COPPER;
   2460 						sc->sc_flags |= WM_F_SGMII;
   2461 					} else {
   2462 						sc->sc_mediatype
   2463 						    = WM_MEDIATYPE_SERDES;
   2464 						aprint_verbose_dev(sc->sc_dev,
   2465 						    "SERDES\n");
   2466 					}
   2467 					break;
   2468 				}
   2469 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2470 					aprint_verbose_dev(sc->sc_dev,
   2471 					    "SERDES\n");
   2472 
   2473 				/* Change current link mode setting */
   2474 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2475 				switch (sc->sc_mediatype) {
   2476 				case WM_MEDIATYPE_COPPER:
   2477 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2478 					break;
   2479 				case WM_MEDIATYPE_SERDES:
   2480 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2481 					break;
   2482 				default:
   2483 					break;
   2484 				}
   2485 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2486 				break;
   2487 			case CTRL_EXT_LINK_MODE_GMII:
   2488 			default:
   2489 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2490 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2491 				break;
   2492 			}
   2493 
   2494 			reg &= ~CTRL_EXT_I2C_ENA;
   2495 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2496 				reg |= CTRL_EXT_I2C_ENA;
   2497 			else
   2498 				reg &= ~CTRL_EXT_I2C_ENA;
   2499 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2500 
   2501 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2502 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2503 			else
   2504 				wm_tbi_mediainit(sc);
   2505 			break;
   2506 		default:
   2507 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2508 				aprint_error_dev(sc->sc_dev,
   2509 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2510 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2511 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2512 		}
   2513 	}
   2514 
   2515 	ifp = &sc->sc_ethercom.ec_if;
   2516 	xname = device_xname(sc->sc_dev);
   2517 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2518 	ifp->if_softc = sc;
   2519 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2520 	ifp->if_extflags = IFEF_START_MPSAFE;
   2521 	ifp->if_ioctl = wm_ioctl;
   2522 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2523 		ifp->if_start = wm_nq_start;
   2524 		if (sc->sc_nqueues > 1)
   2525 			ifp->if_transmit = wm_nq_transmit;
   2526 	} else
   2527 		ifp->if_start = wm_start;
   2528 	ifp->if_watchdog = wm_watchdog;
   2529 	ifp->if_init = wm_init;
   2530 	ifp->if_stop = wm_stop;
   2531 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2532 	IFQ_SET_READY(&ifp->if_snd);
   2533 
   2534 	/* Check for jumbo frame */
   2535 	switch (sc->sc_type) {
   2536 	case WM_T_82573:
   2537 		/* XXX limited to 9234 if ASPM is disabled */
   2538 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2539 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2540 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2541 		break;
   2542 	case WM_T_82571:
   2543 	case WM_T_82572:
   2544 	case WM_T_82574:
   2545 	case WM_T_82575:
   2546 	case WM_T_82576:
   2547 	case WM_T_82580:
   2548 	case WM_T_I350:
   2549 	case WM_T_I354: /* XXXX ok? */
   2550 	case WM_T_I210:
   2551 	case WM_T_I211:
   2552 	case WM_T_80003:
   2553 	case WM_T_ICH9:
   2554 	case WM_T_ICH10:
   2555 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2556 	case WM_T_PCH_LPT:
   2557 	case WM_T_PCH_SPT:
   2558 		/* XXX limited to 9234 */
   2559 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2560 		break;
   2561 	case WM_T_PCH:
   2562 		/* XXX limited to 4096 */
   2563 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2564 		break;
   2565 	case WM_T_82542_2_0:
   2566 	case WM_T_82542_2_1:
   2567 	case WM_T_82583:
   2568 	case WM_T_ICH8:
   2569 		/* No support for jumbo frame */
   2570 		break;
   2571 	default:
   2572 		/* ETHER_MAX_LEN_JUMBO */
   2573 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2574 		break;
   2575 	}
   2576 
   2577 	/* If we're a i82543 or greater, we can support VLANs. */
   2578 	if (sc->sc_type >= WM_T_82543)
   2579 		sc->sc_ethercom.ec_capabilities |=
   2580 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2581 
   2582 	/*
   2583 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2584 	 * on i82543 and later.
   2585 	 */
   2586 	if (sc->sc_type >= WM_T_82543) {
   2587 		ifp->if_capabilities |=
   2588 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2589 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2590 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2591 		    IFCAP_CSUM_TCPv6_Tx |
   2592 		    IFCAP_CSUM_UDPv6_Tx;
   2593 	}
   2594 
   2595 	/*
   2596 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2597 	 *
   2598 	 *	82541GI (8086:1076) ... no
   2599 	 *	82572EI (8086:10b9) ... yes
   2600 	 */
   2601 	if (sc->sc_type >= WM_T_82571) {
   2602 		ifp->if_capabilities |=
   2603 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2604 	}
   2605 
   2606 	/*
   2607 	 * If we're a i82544 or greater (except i82547), we can do
   2608 	 * TCP segmentation offload.
   2609 	 */
   2610 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2611 		ifp->if_capabilities |= IFCAP_TSOv4;
   2612 	}
   2613 
   2614 	if (sc->sc_type >= WM_T_82571) {
   2615 		ifp->if_capabilities |= IFCAP_TSOv6;
   2616 	}
   2617 
   2618 #ifdef WM_MPSAFE
   2619 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2620 #else
   2621 	sc->sc_core_lock = NULL;
   2622 #endif
   2623 
   2624 	/* Attach the interface. */
   2625 	if_initialize(ifp);
   2626 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2627 	ether_ifattach(ifp, enaddr);
   2628 	if_register(ifp);
   2629 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2630 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2631 			  RND_FLAG_DEFAULT);
   2632 
   2633 #ifdef WM_EVENT_COUNTERS
   2634 	/* Attach event counters. */
   2635 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2636 	    NULL, xname, "linkintr");
   2637 
   2638 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2639 	    NULL, xname, "tx_xoff");
   2640 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2641 	    NULL, xname, "tx_xon");
   2642 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2643 	    NULL, xname, "rx_xoff");
   2644 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2645 	    NULL, xname, "rx_xon");
   2646 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2647 	    NULL, xname, "rx_macctl");
   2648 #endif /* WM_EVENT_COUNTERS */
   2649 
   2650 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2651 		pmf_class_network_register(self, ifp);
   2652 	else
   2653 		aprint_error_dev(self, "couldn't establish power handler\n");
   2654 
   2655 	sc->sc_flags |= WM_F_ATTACHED;
   2656  out:
   2657 	return;
   2658 }
   2659 
   2660 /* The detach function (ca_detach) */
   2661 static int
   2662 wm_detach(device_t self, int flags __unused)
   2663 {
   2664 	struct wm_softc *sc = device_private(self);
   2665 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2666 	int i;
   2667 
   2668 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2669 		return 0;
   2670 
   2671 	/* Stop the interface. Callouts are stopped in it. */
   2672 	wm_stop(ifp, 1);
   2673 
   2674 	pmf_device_deregister(self);
   2675 
   2676 	/* Tell the firmware about the release */
   2677 	WM_CORE_LOCK(sc);
   2678 	wm_release_manageability(sc);
   2679 	wm_release_hw_control(sc);
   2680 	wm_enable_wakeup(sc);
   2681 	WM_CORE_UNLOCK(sc);
   2682 
   2683 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2684 
   2685 	/* Delete all remaining media. */
   2686 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2687 
   2688 	ether_ifdetach(ifp);
   2689 	if_detach(ifp);
   2690 	if_percpuq_destroy(sc->sc_ipq);
   2691 
   2692 	/* Unload RX dmamaps and free mbufs */
   2693 	for (i = 0; i < sc->sc_nqueues; i++) {
   2694 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2695 		mutex_enter(rxq->rxq_lock);
   2696 		wm_rxdrain(rxq);
   2697 		mutex_exit(rxq->rxq_lock);
   2698 	}
   2699 	/* Must unlock here */
   2700 
   2701 	/* Disestablish the interrupt handler */
   2702 	for (i = 0; i < sc->sc_nintrs; i++) {
   2703 		if (sc->sc_ihs[i] != NULL) {
   2704 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2705 			sc->sc_ihs[i] = NULL;
   2706 		}
   2707 	}
   2708 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2709 
   2710 	wm_free_txrx_queues(sc);
   2711 
   2712 	/* Unmap the registers */
   2713 	if (sc->sc_ss) {
   2714 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2715 		sc->sc_ss = 0;
   2716 	}
   2717 	if (sc->sc_ios) {
   2718 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2719 		sc->sc_ios = 0;
   2720 	}
   2721 	if (sc->sc_flashs) {
   2722 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2723 		sc->sc_flashs = 0;
   2724 	}
   2725 
   2726 	if (sc->sc_core_lock)
   2727 		mutex_obj_free(sc->sc_core_lock);
   2728 	if (sc->sc_ich_phymtx)
   2729 		mutex_obj_free(sc->sc_ich_phymtx);
   2730 	if (sc->sc_ich_nvmmtx)
   2731 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2732 
   2733 	return 0;
   2734 }
   2735 
   2736 static bool
   2737 wm_suspend(device_t self, const pmf_qual_t *qual)
   2738 {
   2739 	struct wm_softc *sc = device_private(self);
   2740 
   2741 	wm_release_manageability(sc);
   2742 	wm_release_hw_control(sc);
   2743 	wm_enable_wakeup(sc);
   2744 
   2745 	return true;
   2746 }
   2747 
   2748 static bool
   2749 wm_resume(device_t self, const pmf_qual_t *qual)
   2750 {
   2751 	struct wm_softc *sc = device_private(self);
   2752 
   2753 	wm_init_manageability(sc);
   2754 
   2755 	return true;
   2756 }
   2757 
   2758 /*
   2759  * wm_watchdog:		[ifnet interface function]
   2760  *
   2761  *	Watchdog timer handler.
   2762  */
   2763 static void
   2764 wm_watchdog(struct ifnet *ifp)
   2765 {
   2766 	int qid;
   2767 	struct wm_softc *sc = ifp->if_softc;
   2768 
   2769 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2770 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2771 
   2772 		wm_watchdog_txq(ifp, txq);
   2773 	}
   2774 
   2775 	/* Reset the interface. */
   2776 	(void) wm_init(ifp);
   2777 
   2778 	/*
   2779 	 * There are still some upper layer processing which call
   2780 	 * ifp->if_start(). e.g. ALTQ
   2781 	 */
   2782 	/* Try to get more packets going. */
   2783 	ifp->if_start(ifp);
   2784 }
   2785 
   2786 static void
   2787 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2788 {
   2789 	struct wm_softc *sc = ifp->if_softc;
   2790 
   2791 	/*
   2792 	 * Since we're using delayed interrupts, sweep up
   2793 	 * before we report an error.
   2794 	 */
   2795 	mutex_enter(txq->txq_lock);
   2796 	wm_txeof(sc, txq);
   2797 	mutex_exit(txq->txq_lock);
   2798 
   2799 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2800 #ifdef WM_DEBUG
   2801 		int i, j;
   2802 		struct wm_txsoft *txs;
   2803 #endif
   2804 		log(LOG_ERR,
   2805 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2806 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2807 		    txq->txq_next);
   2808 		ifp->if_oerrors++;
   2809 #ifdef WM_DEBUG
   2810 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2811 		    i = WM_NEXTTXS(txq, i)) {
   2812 		    txs = &txq->txq_soft[i];
   2813 		    printf("txs %d tx %d -> %d\n",
   2814 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2815 		    for (j = txs->txs_firstdesc; ;
   2816 			j = WM_NEXTTX(txq, j)) {
   2817 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2818 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2819 			printf("\t %#08x%08x\n",
   2820 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2821 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2822 			if (j == txs->txs_lastdesc)
   2823 				break;
   2824 			}
   2825 		}
   2826 #endif
   2827 	}
   2828 }
   2829 
   2830 /*
   2831  * wm_tick:
   2832  *
   2833  *	One second timer, used to check link status, sweep up
   2834  *	completed transmit jobs, etc.
   2835  */
   2836 static void
   2837 wm_tick(void *arg)
   2838 {
   2839 	struct wm_softc *sc = arg;
   2840 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2841 #ifndef WM_MPSAFE
   2842 	int s = splnet();
   2843 #endif
   2844 
   2845 	WM_CORE_LOCK(sc);
   2846 
   2847 	if (sc->sc_core_stopping)
   2848 		goto out;
   2849 
   2850 	if (sc->sc_type >= WM_T_82542_2_1) {
   2851 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2852 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2853 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2854 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2855 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2856 	}
   2857 
   2858 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2859 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2860 	    + CSR_READ(sc, WMREG_CRCERRS)
   2861 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2862 	    + CSR_READ(sc, WMREG_SYMERRC)
   2863 	    + CSR_READ(sc, WMREG_RXERRC)
   2864 	    + CSR_READ(sc, WMREG_SEC)
   2865 	    + CSR_READ(sc, WMREG_CEXTERR)
   2866 	    + CSR_READ(sc, WMREG_RLEC);
   2867 	/*
   2868 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2869 	 * memory. It does not mean the number of dropped packet. Because
   2870 	 * ethernet controller can receive packets in such case if there is
   2871 	 * space in phy's FIFO.
   2872 	 *
   2873 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2874 	 * own EVCNT instead of if_iqdrops.
   2875 	 */
   2876 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2877 
   2878 	if (sc->sc_flags & WM_F_HAS_MII)
   2879 		mii_tick(&sc->sc_mii);
   2880 	else if ((sc->sc_type >= WM_T_82575)
   2881 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2882 		wm_serdes_tick(sc);
   2883 	else
   2884 		wm_tbi_tick(sc);
   2885 
   2886 out:
   2887 	WM_CORE_UNLOCK(sc);
   2888 #ifndef WM_MPSAFE
   2889 	splx(s);
   2890 #endif
   2891 
   2892 	if (!sc->sc_core_stopping)
   2893 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2894 }
   2895 
   2896 static int
   2897 wm_ifflags_cb(struct ethercom *ec)
   2898 {
   2899 	struct ifnet *ifp = &ec->ec_if;
   2900 	struct wm_softc *sc = ifp->if_softc;
   2901 	int rc = 0;
   2902 
   2903 	WM_CORE_LOCK(sc);
   2904 
   2905 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2906 	sc->sc_if_flags = ifp->if_flags;
   2907 
   2908 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2909 		rc = ENETRESET;
   2910 		goto out;
   2911 	}
   2912 
   2913 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2914 		wm_set_filter(sc);
   2915 
   2916 	wm_set_vlan(sc);
   2917 
   2918 out:
   2919 	WM_CORE_UNLOCK(sc);
   2920 
   2921 	return rc;
   2922 }
   2923 
   2924 /*
   2925  * wm_ioctl:		[ifnet interface function]
   2926  *
   2927  *	Handle control requests from the operator.
   2928  */
   2929 static int
   2930 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2931 {
   2932 	struct wm_softc *sc = ifp->if_softc;
   2933 	struct ifreq *ifr = (struct ifreq *) data;
   2934 	struct ifaddr *ifa = (struct ifaddr *)data;
   2935 	struct sockaddr_dl *sdl;
   2936 	int s, error;
   2937 
   2938 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2939 		device_xname(sc->sc_dev), __func__));
   2940 
   2941 #ifndef WM_MPSAFE
   2942 	s = splnet();
   2943 #endif
   2944 	switch (cmd) {
   2945 	case SIOCSIFMEDIA:
   2946 	case SIOCGIFMEDIA:
   2947 		WM_CORE_LOCK(sc);
   2948 		/* Flow control requires full-duplex mode. */
   2949 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2950 		    (ifr->ifr_media & IFM_FDX) == 0)
   2951 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2952 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2953 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2954 				/* We can do both TXPAUSE and RXPAUSE. */
   2955 				ifr->ifr_media |=
   2956 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2957 			}
   2958 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2959 		}
   2960 		WM_CORE_UNLOCK(sc);
   2961 #ifdef WM_MPSAFE
   2962 		s = splnet();
   2963 #endif
   2964 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2965 #ifdef WM_MPSAFE
   2966 		splx(s);
   2967 #endif
   2968 		break;
   2969 	case SIOCINITIFADDR:
   2970 		WM_CORE_LOCK(sc);
   2971 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2972 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2973 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2974 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2975 			/* unicast address is first multicast entry */
   2976 			wm_set_filter(sc);
   2977 			error = 0;
   2978 			WM_CORE_UNLOCK(sc);
   2979 			break;
   2980 		}
   2981 		WM_CORE_UNLOCK(sc);
   2982 		/*FALLTHROUGH*/
   2983 	default:
   2984 #ifdef WM_MPSAFE
   2985 		s = splnet();
   2986 #endif
   2987 		/* It may call wm_start, so unlock here */
   2988 		error = ether_ioctl(ifp, cmd, data);
   2989 #ifdef WM_MPSAFE
   2990 		splx(s);
   2991 #endif
   2992 		if (error != ENETRESET)
   2993 			break;
   2994 
   2995 		error = 0;
   2996 
   2997 		if (cmd == SIOCSIFCAP) {
   2998 			error = (*ifp->if_init)(ifp);
   2999 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3000 			;
   3001 		else if (ifp->if_flags & IFF_RUNNING) {
   3002 			/*
   3003 			 * Multicast list has changed; set the hardware filter
   3004 			 * accordingly.
   3005 			 */
   3006 			WM_CORE_LOCK(sc);
   3007 			wm_set_filter(sc);
   3008 			WM_CORE_UNLOCK(sc);
   3009 		}
   3010 		break;
   3011 	}
   3012 
   3013 #ifndef WM_MPSAFE
   3014 	splx(s);
   3015 #endif
   3016 	return error;
   3017 }
   3018 
   3019 /* MAC address related */
   3020 
   3021 /*
   3022  * Get the offset of MAC address and return it.
   3023  * If error occured, use offset 0.
   3024  */
   3025 static uint16_t
   3026 wm_check_alt_mac_addr(struct wm_softc *sc)
   3027 {
   3028 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3029 	uint16_t offset = NVM_OFF_MACADDR;
   3030 
   3031 	/* Try to read alternative MAC address pointer */
   3032 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3033 		return 0;
   3034 
   3035 	/* Check pointer if it's valid or not. */
   3036 	if ((offset == 0x0000) || (offset == 0xffff))
   3037 		return 0;
   3038 
   3039 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3040 	/*
   3041 	 * Check whether alternative MAC address is valid or not.
   3042 	 * Some cards have non 0xffff pointer but those don't use
   3043 	 * alternative MAC address in reality.
   3044 	 *
   3045 	 * Check whether the broadcast bit is set or not.
   3046 	 */
   3047 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3048 		if (((myea[0] & 0xff) & 0x01) == 0)
   3049 			return offset; /* Found */
   3050 
   3051 	/* Not found */
   3052 	return 0;
   3053 }
   3054 
   3055 static int
   3056 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3057 {
   3058 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3059 	uint16_t offset = NVM_OFF_MACADDR;
   3060 	int do_invert = 0;
   3061 
   3062 	switch (sc->sc_type) {
   3063 	case WM_T_82580:
   3064 	case WM_T_I350:
   3065 	case WM_T_I354:
   3066 		/* EEPROM Top Level Partitioning */
   3067 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3068 		break;
   3069 	case WM_T_82571:
   3070 	case WM_T_82575:
   3071 	case WM_T_82576:
   3072 	case WM_T_80003:
   3073 	case WM_T_I210:
   3074 	case WM_T_I211:
   3075 		offset = wm_check_alt_mac_addr(sc);
   3076 		if (offset == 0)
   3077 			if ((sc->sc_funcid & 0x01) == 1)
   3078 				do_invert = 1;
   3079 		break;
   3080 	default:
   3081 		if ((sc->sc_funcid & 0x01) == 1)
   3082 			do_invert = 1;
   3083 		break;
   3084 	}
   3085 
   3086 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3087 		goto bad;
   3088 
   3089 	enaddr[0] = myea[0] & 0xff;
   3090 	enaddr[1] = myea[0] >> 8;
   3091 	enaddr[2] = myea[1] & 0xff;
   3092 	enaddr[3] = myea[1] >> 8;
   3093 	enaddr[4] = myea[2] & 0xff;
   3094 	enaddr[5] = myea[2] >> 8;
   3095 
   3096 	/*
   3097 	 * Toggle the LSB of the MAC address on the second port
   3098 	 * of some dual port cards.
   3099 	 */
   3100 	if (do_invert != 0)
   3101 		enaddr[5] ^= 1;
   3102 
   3103 	return 0;
   3104 
   3105  bad:
   3106 	return -1;
   3107 }
   3108 
   3109 /*
   3110  * wm_set_ral:
   3111  *
   3112  *	Set an entery in the receive address list.
   3113  */
   3114 static void
   3115 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3116 {
   3117 	uint32_t ral_lo, ral_hi;
   3118 
   3119 	if (enaddr != NULL) {
   3120 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3121 		    (enaddr[3] << 24);
   3122 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3123 		ral_hi |= RAL_AV;
   3124 	} else {
   3125 		ral_lo = 0;
   3126 		ral_hi = 0;
   3127 	}
   3128 
   3129 	if (sc->sc_type >= WM_T_82544) {
   3130 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3131 		    ral_lo);
   3132 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3133 		    ral_hi);
   3134 	} else {
   3135 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3136 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3137 	}
   3138 }
   3139 
   3140 /*
   3141  * wm_mchash:
   3142  *
   3143  *	Compute the hash of the multicast address for the 4096-bit
   3144  *	multicast filter.
   3145  */
   3146 static uint32_t
   3147 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3148 {
   3149 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3150 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3151 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3152 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3153 	uint32_t hash;
   3154 
   3155 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3156 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3157 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3158 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3159 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3160 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3161 		return (hash & 0x3ff);
   3162 	}
   3163 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3164 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3165 
   3166 	return (hash & 0xfff);
   3167 }
   3168 
   3169 /*
   3170  * wm_set_filter:
   3171  *
   3172  *	Set up the receive filter.
   3173  */
   3174 static void
   3175 wm_set_filter(struct wm_softc *sc)
   3176 {
   3177 	struct ethercom *ec = &sc->sc_ethercom;
   3178 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3179 	struct ether_multi *enm;
   3180 	struct ether_multistep step;
   3181 	bus_addr_t mta_reg;
   3182 	uint32_t hash, reg, bit;
   3183 	int i, size, ralmax;
   3184 
   3185 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3186 		device_xname(sc->sc_dev), __func__));
   3187 
   3188 	if (sc->sc_type >= WM_T_82544)
   3189 		mta_reg = WMREG_CORDOVA_MTA;
   3190 	else
   3191 		mta_reg = WMREG_MTA;
   3192 
   3193 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3194 
   3195 	if (ifp->if_flags & IFF_BROADCAST)
   3196 		sc->sc_rctl |= RCTL_BAM;
   3197 	if (ifp->if_flags & IFF_PROMISC) {
   3198 		sc->sc_rctl |= RCTL_UPE;
   3199 		goto allmulti;
   3200 	}
   3201 
   3202 	/*
   3203 	 * Set the station address in the first RAL slot, and
   3204 	 * clear the remaining slots.
   3205 	 */
   3206 	if (sc->sc_type == WM_T_ICH8)
   3207 		size = WM_RAL_TABSIZE_ICH8 -1;
   3208 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3209 	    || (sc->sc_type == WM_T_PCH))
   3210 		size = WM_RAL_TABSIZE_ICH8;
   3211 	else if (sc->sc_type == WM_T_PCH2)
   3212 		size = WM_RAL_TABSIZE_PCH2;
   3213 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3214 		size = WM_RAL_TABSIZE_PCH_LPT;
   3215 	else if (sc->sc_type == WM_T_82575)
   3216 		size = WM_RAL_TABSIZE_82575;
   3217 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3218 		size = WM_RAL_TABSIZE_82576;
   3219 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3220 		size = WM_RAL_TABSIZE_I350;
   3221 	else
   3222 		size = WM_RAL_TABSIZE;
   3223 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3224 
   3225 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3226 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3227 		switch (i) {
   3228 		case 0:
   3229 			/* We can use all entries */
   3230 			ralmax = size;
   3231 			break;
   3232 		case 1:
   3233 			/* Only RAR[0] */
   3234 			ralmax = 1;
   3235 			break;
   3236 		default:
   3237 			/* available SHRA + RAR[0] */
   3238 			ralmax = i + 1;
   3239 		}
   3240 	} else
   3241 		ralmax = size;
   3242 	for (i = 1; i < size; i++) {
   3243 		if (i < ralmax)
   3244 			wm_set_ral(sc, NULL, i);
   3245 	}
   3246 
   3247 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3248 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3249 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3250 	    || (sc->sc_type == WM_T_PCH_SPT))
   3251 		size = WM_ICH8_MC_TABSIZE;
   3252 	else
   3253 		size = WM_MC_TABSIZE;
   3254 	/* Clear out the multicast table. */
   3255 	for (i = 0; i < size; i++)
   3256 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3257 
   3258 	ETHER_FIRST_MULTI(step, ec, enm);
   3259 	while (enm != NULL) {
   3260 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3261 			/*
   3262 			 * We must listen to a range of multicast addresses.
   3263 			 * For now, just accept all multicasts, rather than
   3264 			 * trying to set only those filter bits needed to match
   3265 			 * the range.  (At this time, the only use of address
   3266 			 * ranges is for IP multicast routing, for which the
   3267 			 * range is big enough to require all bits set.)
   3268 			 */
   3269 			goto allmulti;
   3270 		}
   3271 
   3272 		hash = wm_mchash(sc, enm->enm_addrlo);
   3273 
   3274 		reg = (hash >> 5);
   3275 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3276 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3277 		    || (sc->sc_type == WM_T_PCH2)
   3278 		    || (sc->sc_type == WM_T_PCH_LPT)
   3279 		    || (sc->sc_type == WM_T_PCH_SPT))
   3280 			reg &= 0x1f;
   3281 		else
   3282 			reg &= 0x7f;
   3283 		bit = hash & 0x1f;
   3284 
   3285 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3286 		hash |= 1U << bit;
   3287 
   3288 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3289 			/*
   3290 			 * 82544 Errata 9: Certain register cannot be written
   3291 			 * with particular alignments in PCI-X bus operation
   3292 			 * (FCAH, MTA and VFTA).
   3293 			 */
   3294 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3295 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3296 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3297 		} else
   3298 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3299 
   3300 		ETHER_NEXT_MULTI(step, enm);
   3301 	}
   3302 
   3303 	ifp->if_flags &= ~IFF_ALLMULTI;
   3304 	goto setit;
   3305 
   3306  allmulti:
   3307 	ifp->if_flags |= IFF_ALLMULTI;
   3308 	sc->sc_rctl |= RCTL_MPE;
   3309 
   3310  setit:
   3311 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3312 }
   3313 
   3314 /* Reset and init related */
   3315 
   3316 static void
   3317 wm_set_vlan(struct wm_softc *sc)
   3318 {
   3319 
   3320 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3321 		device_xname(sc->sc_dev), __func__));
   3322 
   3323 	/* Deal with VLAN enables. */
   3324 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3325 		sc->sc_ctrl |= CTRL_VME;
   3326 	else
   3327 		sc->sc_ctrl &= ~CTRL_VME;
   3328 
   3329 	/* Write the control registers. */
   3330 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3331 }
   3332 
   3333 static void
   3334 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3335 {
   3336 	uint32_t gcr;
   3337 	pcireg_t ctrl2;
   3338 
   3339 	gcr = CSR_READ(sc, WMREG_GCR);
   3340 
   3341 	/* Only take action if timeout value is defaulted to 0 */
   3342 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3343 		goto out;
   3344 
   3345 	if ((gcr & GCR_CAP_VER2) == 0) {
   3346 		gcr |= GCR_CMPL_TMOUT_10MS;
   3347 		goto out;
   3348 	}
   3349 
   3350 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3351 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3352 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3353 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3354 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3355 
   3356 out:
   3357 	/* Disable completion timeout resend */
   3358 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3359 
   3360 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3361 }
   3362 
   3363 void
   3364 wm_get_auto_rd_done(struct wm_softc *sc)
   3365 {
   3366 	int i;
   3367 
   3368 	/* wait for eeprom to reload */
   3369 	switch (sc->sc_type) {
   3370 	case WM_T_82571:
   3371 	case WM_T_82572:
   3372 	case WM_T_82573:
   3373 	case WM_T_82574:
   3374 	case WM_T_82583:
   3375 	case WM_T_82575:
   3376 	case WM_T_82576:
   3377 	case WM_T_82580:
   3378 	case WM_T_I350:
   3379 	case WM_T_I354:
   3380 	case WM_T_I210:
   3381 	case WM_T_I211:
   3382 	case WM_T_80003:
   3383 	case WM_T_ICH8:
   3384 	case WM_T_ICH9:
   3385 		for (i = 0; i < 10; i++) {
   3386 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3387 				break;
   3388 			delay(1000);
   3389 		}
   3390 		if (i == 10) {
   3391 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3392 			    "complete\n", device_xname(sc->sc_dev));
   3393 		}
   3394 		break;
   3395 	default:
   3396 		break;
   3397 	}
   3398 }
   3399 
   3400 void
   3401 wm_lan_init_done(struct wm_softc *sc)
   3402 {
   3403 	uint32_t reg = 0;
   3404 	int i;
   3405 
   3406 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3407 		device_xname(sc->sc_dev), __func__));
   3408 
   3409 	/* Wait for eeprom to reload */
   3410 	switch (sc->sc_type) {
   3411 	case WM_T_ICH10:
   3412 	case WM_T_PCH:
   3413 	case WM_T_PCH2:
   3414 	case WM_T_PCH_LPT:
   3415 	case WM_T_PCH_SPT:
   3416 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3417 			reg = CSR_READ(sc, WMREG_STATUS);
   3418 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3419 				break;
   3420 			delay(100);
   3421 		}
   3422 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3423 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3424 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3425 		}
   3426 		break;
   3427 	default:
   3428 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3429 		    __func__);
   3430 		break;
   3431 	}
   3432 
   3433 	reg &= ~STATUS_LAN_INIT_DONE;
   3434 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3435 }
   3436 
   3437 void
   3438 wm_get_cfg_done(struct wm_softc *sc)
   3439 {
   3440 	int mask;
   3441 	uint32_t reg;
   3442 	int i;
   3443 
   3444 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3445 		device_xname(sc->sc_dev), __func__));
   3446 
   3447 	/* Wait for eeprom to reload */
   3448 	switch (sc->sc_type) {
   3449 	case WM_T_82542_2_0:
   3450 	case WM_T_82542_2_1:
   3451 		/* null */
   3452 		break;
   3453 	case WM_T_82543:
   3454 	case WM_T_82544:
   3455 	case WM_T_82540:
   3456 	case WM_T_82545:
   3457 	case WM_T_82545_3:
   3458 	case WM_T_82546:
   3459 	case WM_T_82546_3:
   3460 	case WM_T_82541:
   3461 	case WM_T_82541_2:
   3462 	case WM_T_82547:
   3463 	case WM_T_82547_2:
   3464 	case WM_T_82573:
   3465 	case WM_T_82574:
   3466 	case WM_T_82583:
   3467 		/* generic */
   3468 		delay(10*1000);
   3469 		break;
   3470 	case WM_T_80003:
   3471 	case WM_T_82571:
   3472 	case WM_T_82572:
   3473 	case WM_T_82575:
   3474 	case WM_T_82576:
   3475 	case WM_T_82580:
   3476 	case WM_T_I350:
   3477 	case WM_T_I354:
   3478 	case WM_T_I210:
   3479 	case WM_T_I211:
   3480 		if (sc->sc_type == WM_T_82571) {
   3481 			/* Only 82571 shares port 0 */
   3482 			mask = EEMNGCTL_CFGDONE_0;
   3483 		} else
   3484 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3485 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3486 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3487 				break;
   3488 			delay(1000);
   3489 		}
   3490 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3491 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3492 				device_xname(sc->sc_dev), __func__));
   3493 		}
   3494 		break;
   3495 	case WM_T_ICH8:
   3496 	case WM_T_ICH9:
   3497 	case WM_T_ICH10:
   3498 	case WM_T_PCH:
   3499 	case WM_T_PCH2:
   3500 	case WM_T_PCH_LPT:
   3501 	case WM_T_PCH_SPT:
   3502 		delay(10*1000);
   3503 		if (sc->sc_type >= WM_T_ICH10)
   3504 			wm_lan_init_done(sc);
   3505 		else
   3506 			wm_get_auto_rd_done(sc);
   3507 
   3508 		reg = CSR_READ(sc, WMREG_STATUS);
   3509 		if ((reg & STATUS_PHYRA) != 0)
   3510 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3511 		break;
   3512 	default:
   3513 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3514 		    __func__);
   3515 		break;
   3516 	}
   3517 }
   3518 
   3519 /* Init hardware bits */
   3520 void
   3521 wm_initialize_hardware_bits(struct wm_softc *sc)
   3522 {
   3523 	uint32_t tarc0, tarc1, reg;
   3524 
   3525 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3526 		device_xname(sc->sc_dev), __func__));
   3527 
   3528 	/* For 82571 variant, 80003 and ICHs */
   3529 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3530 	    || (sc->sc_type >= WM_T_80003)) {
   3531 
   3532 		/* Transmit Descriptor Control 0 */
   3533 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3534 		reg |= TXDCTL_COUNT_DESC;
   3535 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3536 
   3537 		/* Transmit Descriptor Control 1 */
   3538 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3539 		reg |= TXDCTL_COUNT_DESC;
   3540 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3541 
   3542 		/* TARC0 */
   3543 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3544 		switch (sc->sc_type) {
   3545 		case WM_T_82571:
   3546 		case WM_T_82572:
   3547 		case WM_T_82573:
   3548 		case WM_T_82574:
   3549 		case WM_T_82583:
   3550 		case WM_T_80003:
   3551 			/* Clear bits 30..27 */
   3552 			tarc0 &= ~__BITS(30, 27);
   3553 			break;
   3554 		default:
   3555 			break;
   3556 		}
   3557 
   3558 		switch (sc->sc_type) {
   3559 		case WM_T_82571:
   3560 		case WM_T_82572:
   3561 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3562 
   3563 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3564 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3565 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3566 			/* 8257[12] Errata No.7 */
   3567 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3568 
   3569 			/* TARC1 bit 28 */
   3570 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3571 				tarc1 &= ~__BIT(28);
   3572 			else
   3573 				tarc1 |= __BIT(28);
   3574 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3575 
   3576 			/*
   3577 			 * 8257[12] Errata No.13
   3578 			 * Disable Dyamic Clock Gating.
   3579 			 */
   3580 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3581 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3582 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3583 			break;
   3584 		case WM_T_82573:
   3585 		case WM_T_82574:
   3586 		case WM_T_82583:
   3587 			if ((sc->sc_type == WM_T_82574)
   3588 			    || (sc->sc_type == WM_T_82583))
   3589 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3590 
   3591 			/* Extended Device Control */
   3592 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3593 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3594 			reg |= __BIT(22);	/* Set bit 22 */
   3595 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3596 
   3597 			/* Device Control */
   3598 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3599 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3600 
   3601 			/* PCIe Control Register */
   3602 			/*
   3603 			 * 82573 Errata (unknown).
   3604 			 *
   3605 			 * 82574 Errata 25 and 82583 Errata 12
   3606 			 * "Dropped Rx Packets":
   3607 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3608 			 */
   3609 			reg = CSR_READ(sc, WMREG_GCR);
   3610 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3611 			CSR_WRITE(sc, WMREG_GCR, reg);
   3612 
   3613 			if ((sc->sc_type == WM_T_82574)
   3614 			    || (sc->sc_type == WM_T_82583)) {
   3615 				/*
   3616 				 * Document says this bit must be set for
   3617 				 * proper operation.
   3618 				 */
   3619 				reg = CSR_READ(sc, WMREG_GCR);
   3620 				reg |= __BIT(22);
   3621 				CSR_WRITE(sc, WMREG_GCR, reg);
   3622 
   3623 				/*
   3624 				 * Apply workaround for hardware errata
   3625 				 * documented in errata docs Fixes issue where
   3626 				 * some error prone or unreliable PCIe
   3627 				 * completions are occurring, particularly
   3628 				 * with ASPM enabled. Without fix, issue can
   3629 				 * cause Tx timeouts.
   3630 				 */
   3631 				reg = CSR_READ(sc, WMREG_GCR2);
   3632 				reg |= __BIT(0);
   3633 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3634 			}
   3635 			break;
   3636 		case WM_T_80003:
   3637 			/* TARC0 */
   3638 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3639 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3640 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3641 
   3642 			/* TARC1 bit 28 */
   3643 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3644 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3645 				tarc1 &= ~__BIT(28);
   3646 			else
   3647 				tarc1 |= __BIT(28);
   3648 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3649 			break;
   3650 		case WM_T_ICH8:
   3651 		case WM_T_ICH9:
   3652 		case WM_T_ICH10:
   3653 		case WM_T_PCH:
   3654 		case WM_T_PCH2:
   3655 		case WM_T_PCH_LPT:
   3656 		case WM_T_PCH_SPT:
   3657 			/* TARC0 */
   3658 			if ((sc->sc_type == WM_T_ICH8)
   3659 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3660 				/* Set TARC0 bits 29 and 28 */
   3661 				tarc0 |= __BITS(29, 28);
   3662 			}
   3663 			/* Set TARC0 bits 23,24,26,27 */
   3664 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3665 
   3666 			/* CTRL_EXT */
   3667 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3668 			reg |= __BIT(22);	/* Set bit 22 */
   3669 			/*
   3670 			 * Enable PHY low-power state when MAC is at D3
   3671 			 * w/o WoL
   3672 			 */
   3673 			if (sc->sc_type >= WM_T_PCH)
   3674 				reg |= CTRL_EXT_PHYPDEN;
   3675 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3676 
   3677 			/* TARC1 */
   3678 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3679 			/* bit 28 */
   3680 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3681 				tarc1 &= ~__BIT(28);
   3682 			else
   3683 				tarc1 |= __BIT(28);
   3684 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3685 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3686 
   3687 			/* Device Status */
   3688 			if (sc->sc_type == WM_T_ICH8) {
   3689 				reg = CSR_READ(sc, WMREG_STATUS);
   3690 				reg &= ~__BIT(31);
   3691 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3692 
   3693 			}
   3694 
   3695 			/* IOSFPC */
   3696 			if (sc->sc_type == WM_T_PCH_SPT) {
   3697 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3698 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3699 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3700 			}
   3701 			/*
   3702 			 * Work-around descriptor data corruption issue during
   3703 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3704 			 * capability.
   3705 			 */
   3706 			reg = CSR_READ(sc, WMREG_RFCTL);
   3707 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3708 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3709 			break;
   3710 		default:
   3711 			break;
   3712 		}
   3713 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3714 
   3715 		/*
   3716 		 * 8257[12] Errata No.52 and some others.
   3717 		 * Avoid RSS Hash Value bug.
   3718 		 */
   3719 		switch (sc->sc_type) {
   3720 		case WM_T_82571:
   3721 		case WM_T_82572:
   3722 		case WM_T_82573:
   3723 		case WM_T_80003:
   3724 		case WM_T_ICH8:
   3725 			reg = CSR_READ(sc, WMREG_RFCTL);
   3726 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3727 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3728 			break;
   3729 		default:
   3730 			break;
   3731 		}
   3732 	}
   3733 }
   3734 
   3735 static uint32_t
   3736 wm_rxpbs_adjust_82580(uint32_t val)
   3737 {
   3738 	uint32_t rv = 0;
   3739 
   3740 	if (val < __arraycount(wm_82580_rxpbs_table))
   3741 		rv = wm_82580_rxpbs_table[val];
   3742 
   3743 	return rv;
   3744 }
   3745 
   3746 static void
   3747 wm_flush_desc_rings(struct wm_softc *sc)
   3748 {
   3749 	pcireg_t preg;
   3750 	uint32_t reg;
   3751 	int nexttx;
   3752 
   3753 	/* First, disable MULR fix in FEXTNVM11 */
   3754 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3755 	reg |= FEXTNVM11_DIS_MULRFIX;
   3756 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3757 
   3758 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3759 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3760 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3761 		struct wm_txqueue *txq;
   3762 		wiseman_txdesc_t *txd;
   3763 
   3764 		/* TX */
   3765 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3766 		    device_xname(sc->sc_dev), preg, reg);
   3767 		reg = CSR_READ(sc, WMREG_TCTL);
   3768 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3769 
   3770 		txq = &sc->sc_queue[0].wmq_txq;
   3771 		nexttx = txq->txq_next;
   3772 		txd = &txq->txq_descs[nexttx];
   3773 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3774 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3775 		txd->wtx_fields.wtxu_status = 0;
   3776 		txd->wtx_fields.wtxu_options = 0;
   3777 		txd->wtx_fields.wtxu_vlan = 0;
   3778 
   3779 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3780 			BUS_SPACE_BARRIER_WRITE);
   3781 
   3782 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3783 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3784 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3785 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3786 		delay(250);
   3787 	}
   3788 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3789 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3790 		uint32_t rctl;
   3791 
   3792 		/* RX */
   3793 		printf("%s: Need RX flush (reg = %08x)\n",
   3794 		    device_xname(sc->sc_dev), preg);
   3795 		rctl = CSR_READ(sc, WMREG_RCTL);
   3796 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3797 		CSR_WRITE_FLUSH(sc);
   3798 		delay(150);
   3799 
   3800 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3801 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3802 		reg &= 0xffffc000;
   3803 		/*
   3804 		 * update thresholds: prefetch threshold to 31, host threshold
   3805 		 * to 1 and make sure the granularity is "descriptors" and not
   3806 		 * "cache lines"
   3807 		 */
   3808 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   3809 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   3810 
   3811 		/*
   3812 		 * momentarily enable the RX ring for the changes to take
   3813 		 * effect
   3814 		 */
   3815 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   3816 		CSR_WRITE_FLUSH(sc);
   3817 		delay(150);
   3818 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3819 	}
   3820 }
   3821 
   3822 /*
   3823  * wm_reset:
   3824  *
   3825  *	Reset the i82542 chip.
   3826  */
   3827 static void
   3828 wm_reset(struct wm_softc *sc)
   3829 {
   3830 	int phy_reset = 0;
   3831 	int i, error = 0;
   3832 	uint32_t reg;
   3833 
   3834 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3835 		device_xname(sc->sc_dev), __func__));
   3836 	KASSERT(sc->sc_type != 0);
   3837 
   3838 	/*
   3839 	 * Allocate on-chip memory according to the MTU size.
   3840 	 * The Packet Buffer Allocation register must be written
   3841 	 * before the chip is reset.
   3842 	 */
   3843 	switch (sc->sc_type) {
   3844 	case WM_T_82547:
   3845 	case WM_T_82547_2:
   3846 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3847 		    PBA_22K : PBA_30K;
   3848 		for (i = 0; i < sc->sc_nqueues; i++) {
   3849 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3850 			txq->txq_fifo_head = 0;
   3851 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3852 			txq->txq_fifo_size =
   3853 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3854 			txq->txq_fifo_stall = 0;
   3855 		}
   3856 		break;
   3857 	case WM_T_82571:
   3858 	case WM_T_82572:
   3859 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3860 	case WM_T_80003:
   3861 		sc->sc_pba = PBA_32K;
   3862 		break;
   3863 	case WM_T_82573:
   3864 		sc->sc_pba = PBA_12K;
   3865 		break;
   3866 	case WM_T_82574:
   3867 	case WM_T_82583:
   3868 		sc->sc_pba = PBA_20K;
   3869 		break;
   3870 	case WM_T_82576:
   3871 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3872 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3873 		break;
   3874 	case WM_T_82580:
   3875 	case WM_T_I350:
   3876 	case WM_T_I354:
   3877 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3878 		break;
   3879 	case WM_T_I210:
   3880 	case WM_T_I211:
   3881 		sc->sc_pba = PBA_34K;
   3882 		break;
   3883 	case WM_T_ICH8:
   3884 		/* Workaround for a bit corruption issue in FIFO memory */
   3885 		sc->sc_pba = PBA_8K;
   3886 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3887 		break;
   3888 	case WM_T_ICH9:
   3889 	case WM_T_ICH10:
   3890 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3891 		    PBA_14K : PBA_10K;
   3892 		break;
   3893 	case WM_T_PCH:
   3894 	case WM_T_PCH2:
   3895 	case WM_T_PCH_LPT:
   3896 	case WM_T_PCH_SPT:
   3897 		sc->sc_pba = PBA_26K;
   3898 		break;
   3899 	default:
   3900 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3901 		    PBA_40K : PBA_48K;
   3902 		break;
   3903 	}
   3904 	/*
   3905 	 * Only old or non-multiqueue devices have the PBA register
   3906 	 * XXX Need special handling for 82575.
   3907 	 */
   3908 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3909 	    || (sc->sc_type == WM_T_82575))
   3910 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3911 
   3912 	/* Prevent the PCI-E bus from sticking */
   3913 	if (sc->sc_flags & WM_F_PCIE) {
   3914 		int timeout = 800;
   3915 
   3916 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3917 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3918 
   3919 		while (timeout--) {
   3920 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3921 			    == 0)
   3922 				break;
   3923 			delay(100);
   3924 		}
   3925 	}
   3926 
   3927 	/* Set the completion timeout for interface */
   3928 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3929 	    || (sc->sc_type == WM_T_82580)
   3930 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3931 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3932 		wm_set_pcie_completion_timeout(sc);
   3933 
   3934 	/* Clear interrupt */
   3935 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3936 	if (sc->sc_nintrs > 1) {
   3937 		if (sc->sc_type != WM_T_82574) {
   3938 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3939 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3940 		} else {
   3941 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3942 		}
   3943 	}
   3944 
   3945 	/* Stop the transmit and receive processes. */
   3946 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3947 	sc->sc_rctl &= ~RCTL_EN;
   3948 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3949 	CSR_WRITE_FLUSH(sc);
   3950 
   3951 	/* XXX set_tbi_sbp_82543() */
   3952 
   3953 	delay(10*1000);
   3954 
   3955 	/* Must acquire the MDIO ownership before MAC reset */
   3956 	switch (sc->sc_type) {
   3957 	case WM_T_82573:
   3958 	case WM_T_82574:
   3959 	case WM_T_82583:
   3960 		error = wm_get_hw_semaphore_82573(sc);
   3961 		break;
   3962 	default:
   3963 		break;
   3964 	}
   3965 
   3966 	/*
   3967 	 * 82541 Errata 29? & 82547 Errata 28?
   3968 	 * See also the description about PHY_RST bit in CTRL register
   3969 	 * in 8254x_GBe_SDM.pdf.
   3970 	 */
   3971 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3972 		CSR_WRITE(sc, WMREG_CTRL,
   3973 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3974 		CSR_WRITE_FLUSH(sc);
   3975 		delay(5000);
   3976 	}
   3977 
   3978 	switch (sc->sc_type) {
   3979 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3980 	case WM_T_82541:
   3981 	case WM_T_82541_2:
   3982 	case WM_T_82547:
   3983 	case WM_T_82547_2:
   3984 		/*
   3985 		 * On some chipsets, a reset through a memory-mapped write
   3986 		 * cycle can cause the chip to reset before completing the
   3987 		 * write cycle.  This causes major headache that can be
   3988 		 * avoided by issuing the reset via indirect register writes
   3989 		 * through I/O space.
   3990 		 *
   3991 		 * So, if we successfully mapped the I/O BAR at attach time,
   3992 		 * use that.  Otherwise, try our luck with a memory-mapped
   3993 		 * reset.
   3994 		 */
   3995 		if (sc->sc_flags & WM_F_IOH_VALID)
   3996 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3997 		else
   3998 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3999 		break;
   4000 	case WM_T_82545_3:
   4001 	case WM_T_82546_3:
   4002 		/* Use the shadow control register on these chips. */
   4003 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4004 		break;
   4005 	case WM_T_80003:
   4006 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4007 		sc->phy.acquire(sc);
   4008 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4009 		sc->phy.release(sc);
   4010 		break;
   4011 	case WM_T_ICH8:
   4012 	case WM_T_ICH9:
   4013 	case WM_T_ICH10:
   4014 	case WM_T_PCH:
   4015 	case WM_T_PCH2:
   4016 	case WM_T_PCH_LPT:
   4017 	case WM_T_PCH_SPT:
   4018 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4019 		if (wm_phy_resetisblocked(sc) == false) {
   4020 			/*
   4021 			 * Gate automatic PHY configuration by hardware on
   4022 			 * non-managed 82579
   4023 			 */
   4024 			if ((sc->sc_type == WM_T_PCH2)
   4025 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4026 				== 0))
   4027 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4028 
   4029 			reg |= CTRL_PHY_RESET;
   4030 			phy_reset = 1;
   4031 		} else
   4032 			printf("XXX reset is blocked!!!\n");
   4033 		sc->phy.acquire(sc);
   4034 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4035 		/* Don't insert a completion barrier when reset */
   4036 		delay(20*1000);
   4037 		mutex_exit(sc->sc_ich_phymtx);
   4038 		break;
   4039 	case WM_T_82580:
   4040 	case WM_T_I350:
   4041 	case WM_T_I354:
   4042 	case WM_T_I210:
   4043 	case WM_T_I211:
   4044 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4045 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4046 			CSR_WRITE_FLUSH(sc);
   4047 		delay(5000);
   4048 		break;
   4049 	case WM_T_82542_2_0:
   4050 	case WM_T_82542_2_1:
   4051 	case WM_T_82543:
   4052 	case WM_T_82540:
   4053 	case WM_T_82545:
   4054 	case WM_T_82546:
   4055 	case WM_T_82571:
   4056 	case WM_T_82572:
   4057 	case WM_T_82573:
   4058 	case WM_T_82574:
   4059 	case WM_T_82575:
   4060 	case WM_T_82576:
   4061 	case WM_T_82583:
   4062 	default:
   4063 		/* Everything else can safely use the documented method. */
   4064 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4065 		break;
   4066 	}
   4067 
   4068 	/* Must release the MDIO ownership after MAC reset */
   4069 	switch (sc->sc_type) {
   4070 	case WM_T_82573:
   4071 	case WM_T_82574:
   4072 	case WM_T_82583:
   4073 		if (error == 0)
   4074 			wm_put_hw_semaphore_82573(sc);
   4075 		break;
   4076 	default:
   4077 		break;
   4078 	}
   4079 
   4080 	if (phy_reset != 0)
   4081 		wm_get_cfg_done(sc);
   4082 
   4083 	/* reload EEPROM */
   4084 	switch (sc->sc_type) {
   4085 	case WM_T_82542_2_0:
   4086 	case WM_T_82542_2_1:
   4087 	case WM_T_82543:
   4088 	case WM_T_82544:
   4089 		delay(10);
   4090 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4091 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4092 		CSR_WRITE_FLUSH(sc);
   4093 		delay(2000);
   4094 		break;
   4095 	case WM_T_82540:
   4096 	case WM_T_82545:
   4097 	case WM_T_82545_3:
   4098 	case WM_T_82546:
   4099 	case WM_T_82546_3:
   4100 		delay(5*1000);
   4101 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4102 		break;
   4103 	case WM_T_82541:
   4104 	case WM_T_82541_2:
   4105 	case WM_T_82547:
   4106 	case WM_T_82547_2:
   4107 		delay(20000);
   4108 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4109 		break;
   4110 	case WM_T_82571:
   4111 	case WM_T_82572:
   4112 	case WM_T_82573:
   4113 	case WM_T_82574:
   4114 	case WM_T_82583:
   4115 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4116 			delay(10);
   4117 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4118 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4119 			CSR_WRITE_FLUSH(sc);
   4120 		}
   4121 		/* check EECD_EE_AUTORD */
   4122 		wm_get_auto_rd_done(sc);
   4123 		/*
   4124 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4125 		 * is set.
   4126 		 */
   4127 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4128 		    || (sc->sc_type == WM_T_82583))
   4129 			delay(25*1000);
   4130 		break;
   4131 	case WM_T_82575:
   4132 	case WM_T_82576:
   4133 	case WM_T_82580:
   4134 	case WM_T_I350:
   4135 	case WM_T_I354:
   4136 	case WM_T_I210:
   4137 	case WM_T_I211:
   4138 	case WM_T_80003:
   4139 		/* check EECD_EE_AUTORD */
   4140 		wm_get_auto_rd_done(sc);
   4141 		break;
   4142 	case WM_T_ICH8:
   4143 	case WM_T_ICH9:
   4144 	case WM_T_ICH10:
   4145 	case WM_T_PCH:
   4146 	case WM_T_PCH2:
   4147 	case WM_T_PCH_LPT:
   4148 	case WM_T_PCH_SPT:
   4149 		break;
   4150 	default:
   4151 		panic("%s: unknown type\n", __func__);
   4152 	}
   4153 
   4154 	/* Check whether EEPROM is present or not */
   4155 	switch (sc->sc_type) {
   4156 	case WM_T_82575:
   4157 	case WM_T_82576:
   4158 	case WM_T_82580:
   4159 	case WM_T_I350:
   4160 	case WM_T_I354:
   4161 	case WM_T_ICH8:
   4162 	case WM_T_ICH9:
   4163 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4164 			/* Not found */
   4165 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4166 			if (sc->sc_type == WM_T_82575)
   4167 				wm_reset_init_script_82575(sc);
   4168 		}
   4169 		break;
   4170 	default:
   4171 		break;
   4172 	}
   4173 
   4174 	if ((sc->sc_type == WM_T_82580)
   4175 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4176 		/* clear global device reset status bit */
   4177 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4178 	}
   4179 
   4180 	/* Clear any pending interrupt events. */
   4181 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4182 	reg = CSR_READ(sc, WMREG_ICR);
   4183 	if (sc->sc_nintrs > 1) {
   4184 		if (sc->sc_type != WM_T_82574) {
   4185 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4186 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4187 		} else
   4188 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4189 	}
   4190 
   4191 	/* reload sc_ctrl */
   4192 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4193 
   4194 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4195 		wm_set_eee_i350(sc);
   4196 
   4197 	/* Clear the host wakeup bit after lcd reset */
   4198 	if (sc->sc_type >= WM_T_PCH) {
   4199 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4200 		    BM_PORT_GEN_CFG);
   4201 		reg &= ~BM_WUC_HOST_WU_BIT;
   4202 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4203 		    BM_PORT_GEN_CFG, reg);
   4204 	}
   4205 
   4206 	/*
   4207 	 * For PCH, this write will make sure that any noise will be detected
   4208 	 * as a CRC error and be dropped rather than show up as a bad packet
   4209 	 * to the DMA engine
   4210 	 */
   4211 	if (sc->sc_type == WM_T_PCH)
   4212 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4213 
   4214 	if (sc->sc_type >= WM_T_82544)
   4215 		CSR_WRITE(sc, WMREG_WUC, 0);
   4216 
   4217 	wm_reset_mdicnfg_82580(sc);
   4218 
   4219 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4220 		wm_pll_workaround_i210(sc);
   4221 }
   4222 
   4223 /*
   4224  * wm_add_rxbuf:
   4225  *
   4226  *	Add a receive buffer to the indiciated descriptor.
   4227  */
   4228 static int
   4229 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4230 {
   4231 	struct wm_softc *sc = rxq->rxq_sc;
   4232 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4233 	struct mbuf *m;
   4234 	int error;
   4235 
   4236 	KASSERT(mutex_owned(rxq->rxq_lock));
   4237 
   4238 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4239 	if (m == NULL)
   4240 		return ENOBUFS;
   4241 
   4242 	MCLGET(m, M_DONTWAIT);
   4243 	if ((m->m_flags & M_EXT) == 0) {
   4244 		m_freem(m);
   4245 		return ENOBUFS;
   4246 	}
   4247 
   4248 	if (rxs->rxs_mbuf != NULL)
   4249 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4250 
   4251 	rxs->rxs_mbuf = m;
   4252 
   4253 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4254 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4255 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4256 	if (error) {
   4257 		/* XXX XXX XXX */
   4258 		aprint_error_dev(sc->sc_dev,
   4259 		    "unable to load rx DMA map %d, error = %d\n",
   4260 		    idx, error);
   4261 		panic("wm_add_rxbuf");
   4262 	}
   4263 
   4264 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4265 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4266 
   4267 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4268 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4269 			wm_init_rxdesc(rxq, idx);
   4270 	} else
   4271 		wm_init_rxdesc(rxq, idx);
   4272 
   4273 	return 0;
   4274 }
   4275 
   4276 /*
   4277  * wm_rxdrain:
   4278  *
   4279  *	Drain the receive queue.
   4280  */
   4281 static void
   4282 wm_rxdrain(struct wm_rxqueue *rxq)
   4283 {
   4284 	struct wm_softc *sc = rxq->rxq_sc;
   4285 	struct wm_rxsoft *rxs;
   4286 	int i;
   4287 
   4288 	KASSERT(mutex_owned(rxq->rxq_lock));
   4289 
   4290 	for (i = 0; i < WM_NRXDESC; i++) {
   4291 		rxs = &rxq->rxq_soft[i];
   4292 		if (rxs->rxs_mbuf != NULL) {
   4293 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4294 			m_freem(rxs->rxs_mbuf);
   4295 			rxs->rxs_mbuf = NULL;
   4296 		}
   4297 	}
   4298 }
   4299 
   4300 
   4301 /*
   4302  * XXX copy from FreeBSD's sys/net/rss_config.c
   4303  */
   4304 /*
   4305  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4306  * effectiveness may be limited by algorithm choice and available entropy
   4307  * during the boot.
   4308  *
   4309  * XXXRW: And that we don't randomize it yet!
   4310  *
   4311  * This is the default Microsoft RSS specification key which is also
   4312  * the Chelsio T5 firmware default key.
   4313  */
   4314 #define RSS_KEYSIZE 40
   4315 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4316 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4317 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4318 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4319 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4320 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4321 };
   4322 
   4323 /*
   4324  * Caller must pass an array of size sizeof(rss_key).
   4325  *
   4326  * XXX
   4327  * As if_ixgbe may use this function, this function should not be
   4328  * if_wm specific function.
   4329  */
   4330 static void
   4331 wm_rss_getkey(uint8_t *key)
   4332 {
   4333 
   4334 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4335 }
   4336 
   4337 /*
   4338  * Setup registers for RSS.
   4339  *
   4340  * XXX not yet VMDq support
   4341  */
   4342 static void
   4343 wm_init_rss(struct wm_softc *sc)
   4344 {
   4345 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4346 	int i;
   4347 
   4348 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4349 
   4350 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4351 		int qid, reta_ent;
   4352 
   4353 		qid  = i % sc->sc_nqueues;
   4354 		switch(sc->sc_type) {
   4355 		case WM_T_82574:
   4356 			reta_ent = __SHIFTIN(qid,
   4357 			    RETA_ENT_QINDEX_MASK_82574);
   4358 			break;
   4359 		case WM_T_82575:
   4360 			reta_ent = __SHIFTIN(qid,
   4361 			    RETA_ENT_QINDEX1_MASK_82575);
   4362 			break;
   4363 		default:
   4364 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4365 			break;
   4366 		}
   4367 
   4368 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4369 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4370 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4371 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4372 	}
   4373 
   4374 	wm_rss_getkey((uint8_t *)rss_key);
   4375 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4376 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4377 
   4378 	if (sc->sc_type == WM_T_82574)
   4379 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4380 	else
   4381 		mrqc = MRQC_ENABLE_RSS_MQ;
   4382 
   4383 	/* XXXX
   4384 	 * The same as FreeBSD igb.
   4385 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4386 	 */
   4387 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4388 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4389 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4390 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4391 
   4392 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4393 }
   4394 
   4395 /*
   4396  * Adjust TX and RX queue numbers which the system actulally uses.
   4397  *
   4398  * The numbers are affected by below parameters.
   4399  *     - The nubmer of hardware queues
   4400  *     - The number of MSI-X vectors (= "nvectors" argument)
   4401  *     - ncpu
   4402  */
   4403 static void
   4404 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4405 {
   4406 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4407 
   4408 	if (nvectors < 2) {
   4409 		sc->sc_nqueues = 1;
   4410 		return;
   4411 	}
   4412 
   4413 	switch(sc->sc_type) {
   4414 	case WM_T_82572:
   4415 		hw_ntxqueues = 2;
   4416 		hw_nrxqueues = 2;
   4417 		break;
   4418 	case WM_T_82574:
   4419 		hw_ntxqueues = 2;
   4420 		hw_nrxqueues = 2;
   4421 		break;
   4422 	case WM_T_82575:
   4423 		hw_ntxqueues = 4;
   4424 		hw_nrxqueues = 4;
   4425 		break;
   4426 	case WM_T_82576:
   4427 		hw_ntxqueues = 16;
   4428 		hw_nrxqueues = 16;
   4429 		break;
   4430 	case WM_T_82580:
   4431 	case WM_T_I350:
   4432 	case WM_T_I354:
   4433 		hw_ntxqueues = 8;
   4434 		hw_nrxqueues = 8;
   4435 		break;
   4436 	case WM_T_I210:
   4437 		hw_ntxqueues = 4;
   4438 		hw_nrxqueues = 4;
   4439 		break;
   4440 	case WM_T_I211:
   4441 		hw_ntxqueues = 2;
   4442 		hw_nrxqueues = 2;
   4443 		break;
   4444 		/*
   4445 		 * As below ethernet controllers does not support MSI-X,
   4446 		 * this driver let them not use multiqueue.
   4447 		 *     - WM_T_80003
   4448 		 *     - WM_T_ICH8
   4449 		 *     - WM_T_ICH9
   4450 		 *     - WM_T_ICH10
   4451 		 *     - WM_T_PCH
   4452 		 *     - WM_T_PCH2
   4453 		 *     - WM_T_PCH_LPT
   4454 		 */
   4455 	default:
   4456 		hw_ntxqueues = 1;
   4457 		hw_nrxqueues = 1;
   4458 		break;
   4459 	}
   4460 
   4461 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4462 
   4463 	/*
   4464 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4465 	 * the number of queues used actually.
   4466 	 */
   4467 	if (nvectors < hw_nqueues + 1) {
   4468 		sc->sc_nqueues = nvectors - 1;
   4469 	} else {
   4470 		sc->sc_nqueues = hw_nqueues;
   4471 	}
   4472 
   4473 	/*
   4474 	 * As queues more then cpus cannot improve scaling, we limit
   4475 	 * the number of queues used actually.
   4476 	 */
   4477 	if (ncpu < sc->sc_nqueues)
   4478 		sc->sc_nqueues = ncpu;
   4479 }
   4480 
   4481 /*
   4482  * Both single interrupt MSI and INTx can use this function.
   4483  */
   4484 static int
   4485 wm_setup_legacy(struct wm_softc *sc)
   4486 {
   4487 	pci_chipset_tag_t pc = sc->sc_pc;
   4488 	const char *intrstr = NULL;
   4489 	char intrbuf[PCI_INTRSTR_LEN];
   4490 	int error;
   4491 
   4492 	error = wm_alloc_txrx_queues(sc);
   4493 	if (error) {
   4494 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4495 		    error);
   4496 		return ENOMEM;
   4497 	}
   4498 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4499 	    sizeof(intrbuf));
   4500 #ifdef WM_MPSAFE
   4501 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4502 #endif
   4503 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4504 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4505 	if (sc->sc_ihs[0] == NULL) {
   4506 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4507 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4508 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4509 		return ENOMEM;
   4510 	}
   4511 
   4512 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4513 	sc->sc_nintrs = 1;
   4514 	return 0;
   4515 }
   4516 
   4517 static int
   4518 wm_setup_msix(struct wm_softc *sc)
   4519 {
   4520 	void *vih;
   4521 	kcpuset_t *affinity;
   4522 	int qidx, error, intr_idx, txrx_established;
   4523 	pci_chipset_tag_t pc = sc->sc_pc;
   4524 	const char *intrstr = NULL;
   4525 	char intrbuf[PCI_INTRSTR_LEN];
   4526 	char intr_xname[INTRDEVNAMEBUF];
   4527 
   4528 	if (sc->sc_nqueues < ncpu) {
   4529 		/*
   4530 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4531 		 * interrupts start from CPU#1.
   4532 		 */
   4533 		sc->sc_affinity_offset = 1;
   4534 	} else {
   4535 		/*
   4536 		 * In this case, this device use all CPUs. So, we unify
   4537 		 * affinitied cpu_index to msix vector number for readability.
   4538 		 */
   4539 		sc->sc_affinity_offset = 0;
   4540 	}
   4541 
   4542 	error = wm_alloc_txrx_queues(sc);
   4543 	if (error) {
   4544 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4545 		    error);
   4546 		return ENOMEM;
   4547 	}
   4548 
   4549 	kcpuset_create(&affinity, false);
   4550 	intr_idx = 0;
   4551 
   4552 	/*
   4553 	 * TX and RX
   4554 	 */
   4555 	txrx_established = 0;
   4556 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4557 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4558 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4559 
   4560 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4561 		    sizeof(intrbuf));
   4562 #ifdef WM_MPSAFE
   4563 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4564 		    PCI_INTR_MPSAFE, true);
   4565 #endif
   4566 		memset(intr_xname, 0, sizeof(intr_xname));
   4567 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4568 		    device_xname(sc->sc_dev), qidx);
   4569 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4570 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4571 		if (vih == NULL) {
   4572 			aprint_error_dev(sc->sc_dev,
   4573 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4574 			    intrstr ? " at " : "",
   4575 			    intrstr ? intrstr : "");
   4576 
   4577 			goto fail;
   4578 		}
   4579 		kcpuset_zero(affinity);
   4580 		/* Round-robin affinity */
   4581 		kcpuset_set(affinity, affinity_to);
   4582 		error = interrupt_distribute(vih, affinity, NULL);
   4583 		if (error == 0) {
   4584 			aprint_normal_dev(sc->sc_dev,
   4585 			    "for TX and RX interrupting at %s affinity to %u\n",
   4586 			    intrstr, affinity_to);
   4587 		} else {
   4588 			aprint_normal_dev(sc->sc_dev,
   4589 			    "for TX and RX interrupting at %s\n", intrstr);
   4590 		}
   4591 		sc->sc_ihs[intr_idx] = vih;
   4592 		wmq->wmq_id= qidx;
   4593 		wmq->wmq_intr_idx = intr_idx;
   4594 
   4595 		txrx_established++;
   4596 		intr_idx++;
   4597 	}
   4598 
   4599 	/*
   4600 	 * LINK
   4601 	 */
   4602 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4603 	    sizeof(intrbuf));
   4604 #ifdef WM_MPSAFE
   4605 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4606 #endif
   4607 	memset(intr_xname, 0, sizeof(intr_xname));
   4608 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4609 	    device_xname(sc->sc_dev));
   4610 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4611 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4612 	if (vih == NULL) {
   4613 		aprint_error_dev(sc->sc_dev,
   4614 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4615 		    intrstr ? " at " : "",
   4616 		    intrstr ? intrstr : "");
   4617 
   4618 		goto fail;
   4619 	}
   4620 	/* keep default affinity to LINK interrupt */
   4621 	aprint_normal_dev(sc->sc_dev,
   4622 	    "for LINK interrupting at %s\n", intrstr);
   4623 	sc->sc_ihs[intr_idx] = vih;
   4624 	sc->sc_link_intr_idx = intr_idx;
   4625 
   4626 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4627 	kcpuset_destroy(affinity);
   4628 	return 0;
   4629 
   4630  fail:
   4631 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4632 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4633 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4634 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4635 	}
   4636 
   4637 	kcpuset_destroy(affinity);
   4638 	return ENOMEM;
   4639 }
   4640 
   4641 static void
   4642 wm_turnon(struct wm_softc *sc)
   4643 {
   4644 	int i;
   4645 
   4646 	KASSERT(WM_CORE_LOCKED(sc));
   4647 
   4648 	for(i = 0; i < sc->sc_nqueues; i++) {
   4649 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4650 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4651 
   4652 		mutex_enter(txq->txq_lock);
   4653 		txq->txq_stopping = false;
   4654 		mutex_exit(txq->txq_lock);
   4655 
   4656 		mutex_enter(rxq->rxq_lock);
   4657 		rxq->rxq_stopping = false;
   4658 		mutex_exit(rxq->rxq_lock);
   4659 	}
   4660 
   4661 	sc->sc_core_stopping = false;
   4662 }
   4663 
   4664 static void
   4665 wm_turnoff(struct wm_softc *sc)
   4666 {
   4667 	int i;
   4668 
   4669 	KASSERT(WM_CORE_LOCKED(sc));
   4670 
   4671 	sc->sc_core_stopping = true;
   4672 
   4673 	for(i = 0; i < sc->sc_nqueues; i++) {
   4674 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4675 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4676 
   4677 		mutex_enter(rxq->rxq_lock);
   4678 		rxq->rxq_stopping = true;
   4679 		mutex_exit(rxq->rxq_lock);
   4680 
   4681 		mutex_enter(txq->txq_lock);
   4682 		txq->txq_stopping = true;
   4683 		mutex_exit(txq->txq_lock);
   4684 	}
   4685 }
   4686 
   4687 /*
   4688  * wm_init:		[ifnet interface function]
   4689  *
   4690  *	Initialize the interface.
   4691  */
   4692 static int
   4693 wm_init(struct ifnet *ifp)
   4694 {
   4695 	struct wm_softc *sc = ifp->if_softc;
   4696 	int ret;
   4697 
   4698 	WM_CORE_LOCK(sc);
   4699 	ret = wm_init_locked(ifp);
   4700 	WM_CORE_UNLOCK(sc);
   4701 
   4702 	return ret;
   4703 }
   4704 
   4705 static int
   4706 wm_init_locked(struct ifnet *ifp)
   4707 {
   4708 	struct wm_softc *sc = ifp->if_softc;
   4709 	int i, j, trynum, error = 0;
   4710 	uint32_t reg;
   4711 
   4712 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4713 		device_xname(sc->sc_dev), __func__));
   4714 	KASSERT(WM_CORE_LOCKED(sc));
   4715 
   4716 	/*
   4717 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4718 	 * There is a small but measurable benefit to avoiding the adjusment
   4719 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4720 	 * on such platforms.  One possibility is that the DMA itself is
   4721 	 * slightly more efficient if the front of the entire packet (instead
   4722 	 * of the front of the headers) is aligned.
   4723 	 *
   4724 	 * Note we must always set align_tweak to 0 if we are using
   4725 	 * jumbo frames.
   4726 	 */
   4727 #ifdef __NO_STRICT_ALIGNMENT
   4728 	sc->sc_align_tweak = 0;
   4729 #else
   4730 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4731 		sc->sc_align_tweak = 0;
   4732 	else
   4733 		sc->sc_align_tweak = 2;
   4734 #endif /* __NO_STRICT_ALIGNMENT */
   4735 
   4736 	/* Cancel any pending I/O. */
   4737 	wm_stop_locked(ifp, 0);
   4738 
   4739 	/* update statistics before reset */
   4740 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4741 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4742 
   4743 	/* PCH_SPT hardware workaround */
   4744 	if (sc->sc_type == WM_T_PCH_SPT)
   4745 		wm_flush_desc_rings(sc);
   4746 
   4747 	/* Reset the chip to a known state. */
   4748 	wm_reset(sc);
   4749 
   4750 	switch (sc->sc_type) {
   4751 	case WM_T_82571:
   4752 	case WM_T_82572:
   4753 	case WM_T_82573:
   4754 	case WM_T_82574:
   4755 	case WM_T_82583:
   4756 	case WM_T_80003:
   4757 	case WM_T_ICH8:
   4758 	case WM_T_ICH9:
   4759 	case WM_T_ICH10:
   4760 	case WM_T_PCH:
   4761 	case WM_T_PCH2:
   4762 	case WM_T_PCH_LPT:
   4763 	case WM_T_PCH_SPT:
   4764 		/* AMT based hardware can now take control from firmware */
   4765 		if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4766 			wm_get_hw_control(sc);
   4767 		break;
   4768 	default:
   4769 		break;
   4770 	}
   4771 
   4772 	/* Init hardware bits */
   4773 	wm_initialize_hardware_bits(sc);
   4774 
   4775 	/* Reset the PHY. */
   4776 	if (sc->sc_flags & WM_F_HAS_MII)
   4777 		wm_gmii_reset(sc);
   4778 
   4779 	/* Calculate (E)ITR value */
   4780 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4781 		sc->sc_itr = 450;	/* For EITR */
   4782 	} else if (sc->sc_type >= WM_T_82543) {
   4783 		/*
   4784 		 * Set up the interrupt throttling register (units of 256ns)
   4785 		 * Note that a footnote in Intel's documentation says this
   4786 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4787 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4788 		 * that that is also true for the 1024ns units of the other
   4789 		 * interrupt-related timer registers -- so, really, we ought
   4790 		 * to divide this value by 4 when the link speed is low.
   4791 		 *
   4792 		 * XXX implement this division at link speed change!
   4793 		 */
   4794 
   4795 		/*
   4796 		 * For N interrupts/sec, set this value to:
   4797 		 * 1000000000 / (N * 256).  Note that we set the
   4798 		 * absolute and packet timer values to this value
   4799 		 * divided by 4 to get "simple timer" behavior.
   4800 		 */
   4801 
   4802 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4803 	}
   4804 
   4805 	error = wm_init_txrx_queues(sc);
   4806 	if (error)
   4807 		goto out;
   4808 
   4809 	/*
   4810 	 * Clear out the VLAN table -- we don't use it (yet).
   4811 	 */
   4812 	CSR_WRITE(sc, WMREG_VET, 0);
   4813 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4814 		trynum = 10; /* Due to hw errata */
   4815 	else
   4816 		trynum = 1;
   4817 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4818 		for (j = 0; j < trynum; j++)
   4819 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4820 
   4821 	/*
   4822 	 * Set up flow-control parameters.
   4823 	 *
   4824 	 * XXX Values could probably stand some tuning.
   4825 	 */
   4826 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4827 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4828 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4829 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4830 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4831 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4832 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4833 	}
   4834 
   4835 	sc->sc_fcrtl = FCRTL_DFLT;
   4836 	if (sc->sc_type < WM_T_82543) {
   4837 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4838 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4839 	} else {
   4840 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4841 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4842 	}
   4843 
   4844 	if (sc->sc_type == WM_T_80003)
   4845 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4846 	else
   4847 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4848 
   4849 	/* Writes the control register. */
   4850 	wm_set_vlan(sc);
   4851 
   4852 	if (sc->sc_flags & WM_F_HAS_MII) {
   4853 		int val;
   4854 
   4855 		switch (sc->sc_type) {
   4856 		case WM_T_80003:
   4857 		case WM_T_ICH8:
   4858 		case WM_T_ICH9:
   4859 		case WM_T_ICH10:
   4860 		case WM_T_PCH:
   4861 		case WM_T_PCH2:
   4862 		case WM_T_PCH_LPT:
   4863 		case WM_T_PCH_SPT:
   4864 			/*
   4865 			 * Set the mac to wait the maximum time between each
   4866 			 * iteration and increase the max iterations when
   4867 			 * polling the phy; this fixes erroneous timeouts at
   4868 			 * 10Mbps.
   4869 			 */
   4870 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4871 			    0xFFFF);
   4872 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4873 			val |= 0x3F;
   4874 			wm_kmrn_writereg(sc,
   4875 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4876 			break;
   4877 		default:
   4878 			break;
   4879 		}
   4880 
   4881 		if (sc->sc_type == WM_T_80003) {
   4882 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4883 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4884 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4885 
   4886 			/* Bypass RX and TX FIFO's */
   4887 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4888 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4889 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4890 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4891 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4892 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4893 		}
   4894 	}
   4895 #if 0
   4896 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4897 #endif
   4898 
   4899 	/* Set up checksum offload parameters. */
   4900 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4901 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4902 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4903 		reg |= RXCSUM_IPOFL;
   4904 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4905 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4906 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4907 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4908 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4909 
   4910 	/* Set up MSI-X */
   4911 	if (sc->sc_nintrs > 1) {
   4912 		uint32_t ivar;
   4913 		struct wm_queue *wmq;
   4914 		int qid, qintr_idx;
   4915 
   4916 		if (sc->sc_type == WM_T_82575) {
   4917 			/* Interrupt control */
   4918 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4919 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4920 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4921 
   4922 			/* TX and RX */
   4923 			for (i = 0; i < sc->sc_nqueues; i++) {
   4924 				wmq = &sc->sc_queue[i];
   4925 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   4926 				    EITR_TX_QUEUE(wmq->wmq_id)
   4927 				    | EITR_RX_QUEUE(wmq->wmq_id));
   4928 			}
   4929 			/* Link status */
   4930 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4931 			    EITR_OTHER);
   4932 		} else if (sc->sc_type == WM_T_82574) {
   4933 			/* Interrupt control */
   4934 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4935 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4936 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4937 
   4938 			ivar = 0;
   4939 			/* TX and RX */
   4940 			for (i = 0; i < sc->sc_nqueues; i++) {
   4941 				wmq = &sc->sc_queue[i];
   4942 				qid = wmq->wmq_id;
   4943 				qintr_idx = wmq->wmq_intr_idx;
   4944 
   4945 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4946 				    IVAR_TX_MASK_Q_82574(qid));
   4947 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4948 				    IVAR_RX_MASK_Q_82574(qid));
   4949 			}
   4950 			/* Link status */
   4951 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4952 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   4953 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4954 		} else {
   4955 			/* Interrupt control */
   4956 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   4957 			    | GPIE_EIAME | GPIE_PBA);
   4958 
   4959 			switch (sc->sc_type) {
   4960 			case WM_T_82580:
   4961 			case WM_T_I350:
   4962 			case WM_T_I354:
   4963 			case WM_T_I210:
   4964 			case WM_T_I211:
   4965 				/* TX and RX */
   4966 				for (i = 0; i < sc->sc_nqueues; i++) {
   4967 					wmq = &sc->sc_queue[i];
   4968 					qid = wmq->wmq_id;
   4969 					qintr_idx = wmq->wmq_intr_idx;
   4970 
   4971 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4972 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4973 					ivar |= __SHIFTIN((qintr_idx
   4974 						| IVAR_VALID),
   4975 					    IVAR_TX_MASK_Q(qid));
   4976 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4977 					ivar |= __SHIFTIN((qintr_idx
   4978 						| IVAR_VALID),
   4979 					    IVAR_RX_MASK_Q(qid));
   4980 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4981 				}
   4982 				break;
   4983 			case WM_T_82576:
   4984 				/* TX and RX */
   4985 				for (i = 0; i < sc->sc_nqueues; i++) {
   4986 					wmq = &sc->sc_queue[i];
   4987 					qid = wmq->wmq_id;
   4988 					qintr_idx = wmq->wmq_intr_idx;
   4989 
   4990 					ivar = CSR_READ(sc,
   4991 					    WMREG_IVAR_Q_82576(qid));
   4992 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   4993 					ivar |= __SHIFTIN((qintr_idx
   4994 						| IVAR_VALID),
   4995 					    IVAR_TX_MASK_Q_82576(qid));
   4996 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   4997 					ivar |= __SHIFTIN((qintr_idx
   4998 						| IVAR_VALID),
   4999 					    IVAR_RX_MASK_Q_82576(qid));
   5000 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5001 					    ivar);
   5002 				}
   5003 				break;
   5004 			default:
   5005 				break;
   5006 			}
   5007 
   5008 			/* Link status */
   5009 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5010 			    IVAR_MISC_OTHER);
   5011 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5012 		}
   5013 
   5014 		if (sc->sc_nqueues > 1) {
   5015 			wm_init_rss(sc);
   5016 
   5017 			/*
   5018 			** NOTE: Receive Full-Packet Checksum Offload
   5019 			** is mutually exclusive with Multiqueue. However
   5020 			** this is not the same as TCP/IP checksums which
   5021 			** still work.
   5022 			*/
   5023 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5024 			reg |= RXCSUM_PCSD;
   5025 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5026 		}
   5027 	}
   5028 
   5029 	/* Set up the interrupt registers. */
   5030 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5031 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5032 	    ICR_RXO | ICR_RXT0;
   5033 	if (sc->sc_nintrs > 1) {
   5034 		uint32_t mask;
   5035 		struct wm_queue *wmq;
   5036 
   5037 		switch (sc->sc_type) {
   5038 		case WM_T_82574:
   5039 			CSR_WRITE(sc, WMREG_EIAC_82574,
   5040 			    WMREG_EIAC_82574_MSIX_MASK);
   5041 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   5042 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5043 			break;
   5044 		default:
   5045 			if (sc->sc_type == WM_T_82575) {
   5046 				mask = 0;
   5047 				for (i = 0; i < sc->sc_nqueues; i++) {
   5048 					wmq = &sc->sc_queue[i];
   5049 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5050 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5051 				}
   5052 				mask |= EITR_OTHER;
   5053 			} else {
   5054 				mask = 0;
   5055 				for (i = 0; i < sc->sc_nqueues; i++) {
   5056 					wmq = &sc->sc_queue[i];
   5057 					mask |= 1 << wmq->wmq_intr_idx;
   5058 				}
   5059 				mask |= 1 << sc->sc_link_intr_idx;
   5060 			}
   5061 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5062 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5063 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5064 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5065 			break;
   5066 		}
   5067 	} else
   5068 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5069 
   5070 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5071 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5072 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5073 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   5074 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5075 		reg |= KABGTXD_BGSQLBIAS;
   5076 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5077 	}
   5078 
   5079 	/* Set up the inter-packet gap. */
   5080 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5081 
   5082 	if (sc->sc_type >= WM_T_82543) {
   5083 		/*
   5084 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   5085 		 * the multi queue function with MSI-X.
   5086 		 */
   5087 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5088 			int qidx;
   5089 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5090 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   5091 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
   5092 				    sc->sc_itr);
   5093 			}
   5094 			/*
   5095 			 * Link interrupts occur much less than TX
   5096 			 * interrupts and RX interrupts. So, we don't
   5097 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5098 			 * FreeBSD's if_igb.
   5099 			 */
   5100 		} else
   5101 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   5102 	}
   5103 
   5104 	/* Set the VLAN ethernetype. */
   5105 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5106 
   5107 	/*
   5108 	 * Set up the transmit control register; we start out with
   5109 	 * a collision distance suitable for FDX, but update it whe
   5110 	 * we resolve the media type.
   5111 	 */
   5112 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5113 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5114 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5115 	if (sc->sc_type >= WM_T_82571)
   5116 		sc->sc_tctl |= TCTL_MULR;
   5117 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5118 
   5119 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5120 		/* Write TDT after TCTL.EN is set. See the document. */
   5121 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5122 	}
   5123 
   5124 	if (sc->sc_type == WM_T_80003) {
   5125 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5126 		reg &= ~TCTL_EXT_GCEX_MASK;
   5127 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5128 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5129 	}
   5130 
   5131 	/* Set the media. */
   5132 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5133 		goto out;
   5134 
   5135 	/* Configure for OS presence */
   5136 	wm_init_manageability(sc);
   5137 
   5138 	/*
   5139 	 * Set up the receive control register; we actually program
   5140 	 * the register when we set the receive filter.  Use multicast
   5141 	 * address offset type 0.
   5142 	 *
   5143 	 * Only the i82544 has the ability to strip the incoming
   5144 	 * CRC, so we don't enable that feature.
   5145 	 */
   5146 	sc->sc_mchash_type = 0;
   5147 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5148 	    | RCTL_MO(sc->sc_mchash_type);
   5149 
   5150 	/*
   5151 	 * The I350 has a bug where it always strips the CRC whether
   5152 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5153 	 */
   5154 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5155 	    || (sc->sc_type == WM_T_I210))
   5156 		sc->sc_rctl |= RCTL_SECRC;
   5157 
   5158 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5159 	    && (ifp->if_mtu > ETHERMTU)) {
   5160 		sc->sc_rctl |= RCTL_LPE;
   5161 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5162 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5163 	}
   5164 
   5165 	if (MCLBYTES == 2048) {
   5166 		sc->sc_rctl |= RCTL_2k;
   5167 	} else {
   5168 		if (sc->sc_type >= WM_T_82543) {
   5169 			switch (MCLBYTES) {
   5170 			case 4096:
   5171 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5172 				break;
   5173 			case 8192:
   5174 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5175 				break;
   5176 			case 16384:
   5177 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5178 				break;
   5179 			default:
   5180 				panic("wm_init: MCLBYTES %d unsupported",
   5181 				    MCLBYTES);
   5182 				break;
   5183 			}
   5184 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5185 	}
   5186 
   5187 	/* Set the receive filter. */
   5188 	wm_set_filter(sc);
   5189 
   5190 	/* Enable ECC */
   5191 	switch (sc->sc_type) {
   5192 	case WM_T_82571:
   5193 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5194 		reg |= PBA_ECC_CORR_EN;
   5195 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5196 		break;
   5197 	case WM_T_PCH_LPT:
   5198 	case WM_T_PCH_SPT:
   5199 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5200 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5201 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5202 
   5203 		sc->sc_ctrl |= CTRL_MEHE;
   5204 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5205 		break;
   5206 	default:
   5207 		break;
   5208 	}
   5209 
   5210 	/* On 575 and later set RDT only if RX enabled */
   5211 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5212 		int qidx;
   5213 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5214 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5215 			for (i = 0; i < WM_NRXDESC; i++) {
   5216 				mutex_enter(rxq->rxq_lock);
   5217 				wm_init_rxdesc(rxq, i);
   5218 				mutex_exit(rxq->rxq_lock);
   5219 
   5220 			}
   5221 		}
   5222 	}
   5223 
   5224 	wm_turnon(sc);
   5225 
   5226 	/* Start the one second link check clock. */
   5227 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5228 
   5229 	/* ...all done! */
   5230 	ifp->if_flags |= IFF_RUNNING;
   5231 	ifp->if_flags &= ~IFF_OACTIVE;
   5232 
   5233  out:
   5234 	sc->sc_if_flags = ifp->if_flags;
   5235 	if (error)
   5236 		log(LOG_ERR, "%s: interface not running\n",
   5237 		    device_xname(sc->sc_dev));
   5238 	return error;
   5239 }
   5240 
   5241 /*
   5242  * wm_stop:		[ifnet interface function]
   5243  *
   5244  *	Stop transmission on the interface.
   5245  */
   5246 static void
   5247 wm_stop(struct ifnet *ifp, int disable)
   5248 {
   5249 	struct wm_softc *sc = ifp->if_softc;
   5250 
   5251 	WM_CORE_LOCK(sc);
   5252 	wm_stop_locked(ifp, disable);
   5253 	WM_CORE_UNLOCK(sc);
   5254 }
   5255 
   5256 static void
   5257 wm_stop_locked(struct ifnet *ifp, int disable)
   5258 {
   5259 	struct wm_softc *sc = ifp->if_softc;
   5260 	struct wm_txsoft *txs;
   5261 	int i, qidx;
   5262 
   5263 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5264 		device_xname(sc->sc_dev), __func__));
   5265 	KASSERT(WM_CORE_LOCKED(sc));
   5266 
   5267 	wm_turnoff(sc);
   5268 
   5269 	/* Stop the one second clock. */
   5270 	callout_stop(&sc->sc_tick_ch);
   5271 
   5272 	/* Stop the 82547 Tx FIFO stall check timer. */
   5273 	if (sc->sc_type == WM_T_82547)
   5274 		callout_stop(&sc->sc_txfifo_ch);
   5275 
   5276 	if (sc->sc_flags & WM_F_HAS_MII) {
   5277 		/* Down the MII. */
   5278 		mii_down(&sc->sc_mii);
   5279 	} else {
   5280 #if 0
   5281 		/* Should we clear PHY's status properly? */
   5282 		wm_reset(sc);
   5283 #endif
   5284 	}
   5285 
   5286 	/* Stop the transmit and receive processes. */
   5287 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5288 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5289 	sc->sc_rctl &= ~RCTL_EN;
   5290 
   5291 	/*
   5292 	 * Clear the interrupt mask to ensure the device cannot assert its
   5293 	 * interrupt line.
   5294 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5295 	 * service any currently pending or shared interrupt.
   5296 	 */
   5297 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5298 	sc->sc_icr = 0;
   5299 	if (sc->sc_nintrs > 1) {
   5300 		if (sc->sc_type != WM_T_82574) {
   5301 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5302 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5303 		} else
   5304 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5305 	}
   5306 
   5307 	/* Release any queued transmit buffers. */
   5308 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5309 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5310 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5311 		mutex_enter(txq->txq_lock);
   5312 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5313 			txs = &txq->txq_soft[i];
   5314 			if (txs->txs_mbuf != NULL) {
   5315 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5316 				m_freem(txs->txs_mbuf);
   5317 				txs->txs_mbuf = NULL;
   5318 			}
   5319 		}
   5320 		mutex_exit(txq->txq_lock);
   5321 	}
   5322 
   5323 	/* Mark the interface as down and cancel the watchdog timer. */
   5324 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5325 	ifp->if_timer = 0;
   5326 
   5327 	if (disable) {
   5328 		for (i = 0; i < sc->sc_nqueues; i++) {
   5329 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5330 			mutex_enter(rxq->rxq_lock);
   5331 			wm_rxdrain(rxq);
   5332 			mutex_exit(rxq->rxq_lock);
   5333 		}
   5334 	}
   5335 
   5336 #if 0 /* notyet */
   5337 	if (sc->sc_type >= WM_T_82544)
   5338 		CSR_WRITE(sc, WMREG_WUC, 0);
   5339 #endif
   5340 }
   5341 
   5342 static void
   5343 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5344 {
   5345 	struct mbuf *m;
   5346 	int i;
   5347 
   5348 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5349 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5350 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5351 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5352 		    m->m_data, m->m_len, m->m_flags);
   5353 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5354 	    i, i == 1 ? "" : "s");
   5355 }
   5356 
   5357 /*
   5358  * wm_82547_txfifo_stall:
   5359  *
   5360  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5361  *	reset the FIFO pointers, and restart packet transmission.
   5362  */
   5363 static void
   5364 wm_82547_txfifo_stall(void *arg)
   5365 {
   5366 	struct wm_softc *sc = arg;
   5367 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5368 
   5369 	mutex_enter(txq->txq_lock);
   5370 
   5371 	if (txq->txq_stopping)
   5372 		goto out;
   5373 
   5374 	if (txq->txq_fifo_stall) {
   5375 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5376 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5377 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5378 			/*
   5379 			 * Packets have drained.  Stop transmitter, reset
   5380 			 * FIFO pointers, restart transmitter, and kick
   5381 			 * the packet queue.
   5382 			 */
   5383 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5384 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5385 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5386 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5387 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5388 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5389 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5390 			CSR_WRITE_FLUSH(sc);
   5391 
   5392 			txq->txq_fifo_head = 0;
   5393 			txq->txq_fifo_stall = 0;
   5394 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5395 		} else {
   5396 			/*
   5397 			 * Still waiting for packets to drain; try again in
   5398 			 * another tick.
   5399 			 */
   5400 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5401 		}
   5402 	}
   5403 
   5404 out:
   5405 	mutex_exit(txq->txq_lock);
   5406 }
   5407 
   5408 /*
   5409  * wm_82547_txfifo_bugchk:
   5410  *
   5411  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5412  *	prevent enqueueing a packet that would wrap around the end
   5413  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5414  *
   5415  *	We do this by checking the amount of space before the end
   5416  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5417  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5418  *	the internal FIFO pointers to the beginning, and restart
   5419  *	transmission on the interface.
   5420  */
   5421 #define	WM_FIFO_HDR		0x10
   5422 #define	WM_82547_PAD_LEN	0x3e0
   5423 static int
   5424 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5425 {
   5426 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5427 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5428 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5429 
   5430 	/* Just return if already stalled. */
   5431 	if (txq->txq_fifo_stall)
   5432 		return 1;
   5433 
   5434 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5435 		/* Stall only occurs in half-duplex mode. */
   5436 		goto send_packet;
   5437 	}
   5438 
   5439 	if (len >= WM_82547_PAD_LEN + space) {
   5440 		txq->txq_fifo_stall = 1;
   5441 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5442 		return 1;
   5443 	}
   5444 
   5445  send_packet:
   5446 	txq->txq_fifo_head += len;
   5447 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5448 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5449 
   5450 	return 0;
   5451 }
   5452 
   5453 static int
   5454 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5455 {
   5456 	int error;
   5457 
   5458 	/*
   5459 	 * Allocate the control data structures, and create and load the
   5460 	 * DMA map for it.
   5461 	 *
   5462 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5463 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5464 	 * both sets within the same 4G segment.
   5465 	 */
   5466 	if (sc->sc_type < WM_T_82544)
   5467 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5468 	else
   5469 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5470 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5471 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5472 	else
   5473 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5474 
   5475 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5476 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5477 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5478 		aprint_error_dev(sc->sc_dev,
   5479 		    "unable to allocate TX control data, error = %d\n",
   5480 		    error);
   5481 		goto fail_0;
   5482 	}
   5483 
   5484 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5485 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5486 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5487 		aprint_error_dev(sc->sc_dev,
   5488 		    "unable to map TX control data, error = %d\n", error);
   5489 		goto fail_1;
   5490 	}
   5491 
   5492 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5493 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5494 		aprint_error_dev(sc->sc_dev,
   5495 		    "unable to create TX control data DMA map, error = %d\n",
   5496 		    error);
   5497 		goto fail_2;
   5498 	}
   5499 
   5500 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5501 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5502 		aprint_error_dev(sc->sc_dev,
   5503 		    "unable to load TX control data DMA map, error = %d\n",
   5504 		    error);
   5505 		goto fail_3;
   5506 	}
   5507 
   5508 	return 0;
   5509 
   5510  fail_3:
   5511 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5512  fail_2:
   5513 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5514 	    WM_TXDESCS_SIZE(txq));
   5515  fail_1:
   5516 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5517  fail_0:
   5518 	return error;
   5519 }
   5520 
   5521 static void
   5522 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5523 {
   5524 
   5525 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5526 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5527 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5528 	    WM_TXDESCS_SIZE(txq));
   5529 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5530 }
   5531 
   5532 static int
   5533 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5534 {
   5535 	int error;
   5536 
   5537 	/*
   5538 	 * Allocate the control data structures, and create and load the
   5539 	 * DMA map for it.
   5540 	 *
   5541 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5542 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5543 	 * both sets within the same 4G segment.
   5544 	 */
   5545 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5546 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
   5547 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5548 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5549 		aprint_error_dev(sc->sc_dev,
   5550 		    "unable to allocate RX control data, error = %d\n",
   5551 		    error);
   5552 		goto fail_0;
   5553 	}
   5554 
   5555 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5556 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5557 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5558 		aprint_error_dev(sc->sc_dev,
   5559 		    "unable to map RX control data, error = %d\n", error);
   5560 		goto fail_1;
   5561 	}
   5562 
   5563 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5564 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5565 		aprint_error_dev(sc->sc_dev,
   5566 		    "unable to create RX control data DMA map, error = %d\n",
   5567 		    error);
   5568 		goto fail_2;
   5569 	}
   5570 
   5571 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5572 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5573 		aprint_error_dev(sc->sc_dev,
   5574 		    "unable to load RX control data DMA map, error = %d\n",
   5575 		    error);
   5576 		goto fail_3;
   5577 	}
   5578 
   5579 	return 0;
   5580 
   5581  fail_3:
   5582 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5583  fail_2:
   5584 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5585 	    rxq->rxq_desc_size);
   5586  fail_1:
   5587 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5588  fail_0:
   5589 	return error;
   5590 }
   5591 
   5592 static void
   5593 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5594 {
   5595 
   5596 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5597 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5598 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5599 	    rxq->rxq_desc_size);
   5600 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5601 }
   5602 
   5603 
   5604 static int
   5605 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5606 {
   5607 	int i, error;
   5608 
   5609 	/* Create the transmit buffer DMA maps. */
   5610 	WM_TXQUEUELEN(txq) =
   5611 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5612 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5613 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5614 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5615 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5616 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5617 			aprint_error_dev(sc->sc_dev,
   5618 			    "unable to create Tx DMA map %d, error = %d\n",
   5619 			    i, error);
   5620 			goto fail;
   5621 		}
   5622 	}
   5623 
   5624 	return 0;
   5625 
   5626  fail:
   5627 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5628 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5629 			bus_dmamap_destroy(sc->sc_dmat,
   5630 			    txq->txq_soft[i].txs_dmamap);
   5631 	}
   5632 	return error;
   5633 }
   5634 
   5635 static void
   5636 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5637 {
   5638 	int i;
   5639 
   5640 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5641 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5642 			bus_dmamap_destroy(sc->sc_dmat,
   5643 			    txq->txq_soft[i].txs_dmamap);
   5644 	}
   5645 }
   5646 
   5647 static int
   5648 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5649 {
   5650 	int i, error;
   5651 
   5652 	/* Create the receive buffer DMA maps. */
   5653 	for (i = 0; i < WM_NRXDESC; i++) {
   5654 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5655 			    MCLBYTES, 0, 0,
   5656 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5657 			aprint_error_dev(sc->sc_dev,
   5658 			    "unable to create Rx DMA map %d error = %d\n",
   5659 			    i, error);
   5660 			goto fail;
   5661 		}
   5662 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5663 	}
   5664 
   5665 	return 0;
   5666 
   5667  fail:
   5668 	for (i = 0; i < WM_NRXDESC; i++) {
   5669 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5670 			bus_dmamap_destroy(sc->sc_dmat,
   5671 			    rxq->rxq_soft[i].rxs_dmamap);
   5672 	}
   5673 	return error;
   5674 }
   5675 
   5676 static void
   5677 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5678 {
   5679 	int i;
   5680 
   5681 	for (i = 0; i < WM_NRXDESC; i++) {
   5682 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5683 			bus_dmamap_destroy(sc->sc_dmat,
   5684 			    rxq->rxq_soft[i].rxs_dmamap);
   5685 	}
   5686 }
   5687 
   5688 /*
   5689  * wm_alloc_quques:
   5690  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5691  */
   5692 static int
   5693 wm_alloc_txrx_queues(struct wm_softc *sc)
   5694 {
   5695 	int i, error, tx_done, rx_done;
   5696 
   5697 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5698 	    KM_SLEEP);
   5699 	if (sc->sc_queue == NULL) {
   5700 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5701 		error = ENOMEM;
   5702 		goto fail_0;
   5703 	}
   5704 
   5705 	/*
   5706 	 * For transmission
   5707 	 */
   5708 	error = 0;
   5709 	tx_done = 0;
   5710 	for (i = 0; i < sc->sc_nqueues; i++) {
   5711 #ifdef WM_EVENT_COUNTERS
   5712 		int j;
   5713 		const char *xname;
   5714 #endif
   5715 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5716 		txq->txq_sc = sc;
   5717 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5718 
   5719 		error = wm_alloc_tx_descs(sc, txq);
   5720 		if (error)
   5721 			break;
   5722 		error = wm_alloc_tx_buffer(sc, txq);
   5723 		if (error) {
   5724 			wm_free_tx_descs(sc, txq);
   5725 			break;
   5726 		}
   5727 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5728 		if (txq->txq_interq == NULL) {
   5729 			wm_free_tx_descs(sc, txq);
   5730 			wm_free_tx_buffer(sc, txq);
   5731 			error = ENOMEM;
   5732 			break;
   5733 		}
   5734 
   5735 #ifdef WM_EVENT_COUNTERS
   5736 		xname = device_xname(sc->sc_dev);
   5737 
   5738 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5739 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5740 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5741 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5742 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5743 
   5744 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5745 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5746 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5747 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5748 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5749 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5750 
   5751 		for (j = 0; j < WM_NTXSEGS; j++) {
   5752 			snprintf(txq->txq_txseg_evcnt_names[j],
   5753 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   5754 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   5755 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   5756 		}
   5757 
   5758 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   5759 
   5760 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   5761 #endif /* WM_EVENT_COUNTERS */
   5762 
   5763 		tx_done++;
   5764 	}
   5765 	if (error)
   5766 		goto fail_1;
   5767 
   5768 	/*
   5769 	 * For recieve
   5770 	 */
   5771 	error = 0;
   5772 	rx_done = 0;
   5773 	for (i = 0; i < sc->sc_nqueues; i++) {
   5774 #ifdef WM_EVENT_COUNTERS
   5775 		const char *xname;
   5776 #endif
   5777 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5778 		rxq->rxq_sc = sc;
   5779 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5780 
   5781 		error = wm_alloc_rx_descs(sc, rxq);
   5782 		if (error)
   5783 			break;
   5784 
   5785 		error = wm_alloc_rx_buffer(sc, rxq);
   5786 		if (error) {
   5787 			wm_free_rx_descs(sc, rxq);
   5788 			break;
   5789 		}
   5790 
   5791 #ifdef WM_EVENT_COUNTERS
   5792 		xname = device_xname(sc->sc_dev);
   5793 
   5794 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   5795 
   5796 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   5797 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   5798 #endif /* WM_EVENT_COUNTERS */
   5799 
   5800 		rx_done++;
   5801 	}
   5802 	if (error)
   5803 		goto fail_2;
   5804 
   5805 	return 0;
   5806 
   5807  fail_2:
   5808 	for (i = 0; i < rx_done; i++) {
   5809 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5810 		wm_free_rx_buffer(sc, rxq);
   5811 		wm_free_rx_descs(sc, rxq);
   5812 		if (rxq->rxq_lock)
   5813 			mutex_obj_free(rxq->rxq_lock);
   5814 	}
   5815  fail_1:
   5816 	for (i = 0; i < tx_done; i++) {
   5817 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5818 		pcq_destroy(txq->txq_interq);
   5819 		wm_free_tx_buffer(sc, txq);
   5820 		wm_free_tx_descs(sc, txq);
   5821 		if (txq->txq_lock)
   5822 			mutex_obj_free(txq->txq_lock);
   5823 	}
   5824 
   5825 	kmem_free(sc->sc_queue,
   5826 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   5827  fail_0:
   5828 	return error;
   5829 }
   5830 
   5831 /*
   5832  * wm_free_quques:
   5833  *	Free {tx,rx}descs and {tx,rx} buffers
   5834  */
   5835 static void
   5836 wm_free_txrx_queues(struct wm_softc *sc)
   5837 {
   5838 	int i;
   5839 
   5840 	for (i = 0; i < sc->sc_nqueues; i++) {
   5841 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5842 		wm_free_rx_buffer(sc, rxq);
   5843 		wm_free_rx_descs(sc, rxq);
   5844 		if (rxq->rxq_lock)
   5845 			mutex_obj_free(rxq->rxq_lock);
   5846 	}
   5847 
   5848 	for (i = 0; i < sc->sc_nqueues; i++) {
   5849 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5850 		wm_free_tx_buffer(sc, txq);
   5851 		wm_free_tx_descs(sc, txq);
   5852 		if (txq->txq_lock)
   5853 			mutex_obj_free(txq->txq_lock);
   5854 	}
   5855 
   5856 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   5857 }
   5858 
   5859 static void
   5860 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5861 {
   5862 
   5863 	KASSERT(mutex_owned(txq->txq_lock));
   5864 
   5865 	/* Initialize the transmit descriptor ring. */
   5866 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5867 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5868 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5869 	txq->txq_free = WM_NTXDESC(txq);
   5870 	txq->txq_next = 0;
   5871 }
   5872 
   5873 static void
   5874 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5875     struct wm_txqueue *txq)
   5876 {
   5877 
   5878 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5879 		device_xname(sc->sc_dev), __func__));
   5880 	KASSERT(mutex_owned(txq->txq_lock));
   5881 
   5882 	if (sc->sc_type < WM_T_82543) {
   5883 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5884 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5885 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5886 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5887 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5888 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5889 	} else {
   5890 		int qid = wmq->wmq_id;
   5891 
   5892 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5893 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5894 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5895 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5896 
   5897 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5898 			/*
   5899 			 * Don't write TDT before TCTL.EN is set.
   5900 			 * See the document.
   5901 			 */
   5902 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5903 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5904 			    | TXDCTL_WTHRESH(0));
   5905 		else {
   5906 			/* ITR / 4 */
   5907 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5908 			if (sc->sc_type >= WM_T_82540) {
   5909 				/* should be same */
   5910 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5911 			}
   5912 
   5913 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5914 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5915 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5916 		}
   5917 	}
   5918 }
   5919 
   5920 static void
   5921 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5922 {
   5923 	int i;
   5924 
   5925 	KASSERT(mutex_owned(txq->txq_lock));
   5926 
   5927 	/* Initialize the transmit job descriptors. */
   5928 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5929 		txq->txq_soft[i].txs_mbuf = NULL;
   5930 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5931 	txq->txq_snext = 0;
   5932 	txq->txq_sdirty = 0;
   5933 }
   5934 
   5935 static void
   5936 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5937     struct wm_txqueue *txq)
   5938 {
   5939 
   5940 	KASSERT(mutex_owned(txq->txq_lock));
   5941 
   5942 	/*
   5943 	 * Set up some register offsets that are different between
   5944 	 * the i82542 and the i82543 and later chips.
   5945 	 */
   5946 	if (sc->sc_type < WM_T_82543)
   5947 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5948 	else
   5949 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   5950 
   5951 	wm_init_tx_descs(sc, txq);
   5952 	wm_init_tx_regs(sc, wmq, txq);
   5953 	wm_init_tx_buffer(sc, txq);
   5954 }
   5955 
   5956 static void
   5957 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5958     struct wm_rxqueue *rxq)
   5959 {
   5960 
   5961 	KASSERT(mutex_owned(rxq->rxq_lock));
   5962 
   5963 	/*
   5964 	 * Initialize the receive descriptor and receive job
   5965 	 * descriptor rings.
   5966 	 */
   5967 	if (sc->sc_type < WM_T_82543) {
   5968 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5969 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5970 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5971 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5972 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5973 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5974 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5975 
   5976 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5977 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5978 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5979 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5980 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5981 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5982 	} else {
   5983 		int qid = wmq->wmq_id;
   5984 
   5985 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5986 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5987 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   5988 
   5989 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5990 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   5991 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   5992 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   5993 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   5994 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   5995 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   5996 			    | RXDCTL_WTHRESH(1));
   5997 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5998 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5999 		} else {
   6000 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6001 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6002 			/* ITR / 4 */
   6003 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   6004 			/* MUST be same */
   6005 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   6006 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6007 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6008 		}
   6009 	}
   6010 }
   6011 
   6012 static int
   6013 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6014 {
   6015 	struct wm_rxsoft *rxs;
   6016 	int error, i;
   6017 
   6018 	KASSERT(mutex_owned(rxq->rxq_lock));
   6019 
   6020 	for (i = 0; i < WM_NRXDESC; i++) {
   6021 		rxs = &rxq->rxq_soft[i];
   6022 		if (rxs->rxs_mbuf == NULL) {
   6023 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6024 				log(LOG_ERR, "%s: unable to allocate or map "
   6025 				    "rx buffer %d, error = %d\n",
   6026 				    device_xname(sc->sc_dev), i, error);
   6027 				/*
   6028 				 * XXX Should attempt to run with fewer receive
   6029 				 * XXX buffers instead of just failing.
   6030 				 */
   6031 				wm_rxdrain(rxq);
   6032 				return ENOMEM;
   6033 			}
   6034 		} else {
   6035 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6036 				wm_init_rxdesc(rxq, i);
   6037 			/*
   6038 			 * For 82575 and newer device, the RX descriptors
   6039 			 * must be initialized after the setting of RCTL.EN in
   6040 			 * wm_set_filter()
   6041 			 */
   6042 		}
   6043 	}
   6044 	rxq->rxq_ptr = 0;
   6045 	rxq->rxq_discard = 0;
   6046 	WM_RXCHAIN_RESET(rxq);
   6047 
   6048 	return 0;
   6049 }
   6050 
   6051 static int
   6052 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6053     struct wm_rxqueue *rxq)
   6054 {
   6055 
   6056 	KASSERT(mutex_owned(rxq->rxq_lock));
   6057 
   6058 	/*
   6059 	 * Set up some register offsets that are different between
   6060 	 * the i82542 and the i82543 and later chips.
   6061 	 */
   6062 	if (sc->sc_type < WM_T_82543)
   6063 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6064 	else
   6065 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6066 
   6067 	wm_init_rx_regs(sc, wmq, rxq);
   6068 	return wm_init_rx_buffer(sc, rxq);
   6069 }
   6070 
   6071 /*
   6072  * wm_init_quques:
   6073  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6074  */
   6075 static int
   6076 wm_init_txrx_queues(struct wm_softc *sc)
   6077 {
   6078 	int i, error = 0;
   6079 
   6080 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6081 		device_xname(sc->sc_dev), __func__));
   6082 
   6083 	for (i = 0; i < sc->sc_nqueues; i++) {
   6084 		struct wm_queue *wmq = &sc->sc_queue[i];
   6085 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6086 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6087 
   6088 		mutex_enter(txq->txq_lock);
   6089 		wm_init_tx_queue(sc, wmq, txq);
   6090 		mutex_exit(txq->txq_lock);
   6091 
   6092 		mutex_enter(rxq->rxq_lock);
   6093 		error = wm_init_rx_queue(sc, wmq, rxq);
   6094 		mutex_exit(rxq->rxq_lock);
   6095 		if (error)
   6096 			break;
   6097 	}
   6098 
   6099 	return error;
   6100 }
   6101 
   6102 /*
   6103  * wm_tx_offload:
   6104  *
   6105  *	Set up TCP/IP checksumming parameters for the
   6106  *	specified packet.
   6107  */
   6108 static int
   6109 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   6110     uint8_t *fieldsp)
   6111 {
   6112 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6113 	struct mbuf *m0 = txs->txs_mbuf;
   6114 	struct livengood_tcpip_ctxdesc *t;
   6115 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6116 	uint32_t ipcse;
   6117 	struct ether_header *eh;
   6118 	int offset, iphl;
   6119 	uint8_t fields;
   6120 
   6121 	/*
   6122 	 * XXX It would be nice if the mbuf pkthdr had offset
   6123 	 * fields for the protocol headers.
   6124 	 */
   6125 
   6126 	eh = mtod(m0, struct ether_header *);
   6127 	switch (htons(eh->ether_type)) {
   6128 	case ETHERTYPE_IP:
   6129 	case ETHERTYPE_IPV6:
   6130 		offset = ETHER_HDR_LEN;
   6131 		break;
   6132 
   6133 	case ETHERTYPE_VLAN:
   6134 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6135 		break;
   6136 
   6137 	default:
   6138 		/*
   6139 		 * Don't support this protocol or encapsulation.
   6140 		 */
   6141 		*fieldsp = 0;
   6142 		*cmdp = 0;
   6143 		return 0;
   6144 	}
   6145 
   6146 	if ((m0->m_pkthdr.csum_flags &
   6147 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   6148 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6149 	} else {
   6150 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6151 	}
   6152 	ipcse = offset + iphl - 1;
   6153 
   6154 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6155 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6156 	seg = 0;
   6157 	fields = 0;
   6158 
   6159 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6160 		int hlen = offset + iphl;
   6161 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6162 
   6163 		if (__predict_false(m0->m_len <
   6164 				    (hlen + sizeof(struct tcphdr)))) {
   6165 			/*
   6166 			 * TCP/IP headers are not in the first mbuf; we need
   6167 			 * to do this the slow and painful way.  Let's just
   6168 			 * hope this doesn't happen very often.
   6169 			 */
   6170 			struct tcphdr th;
   6171 
   6172 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6173 
   6174 			m_copydata(m0, hlen, sizeof(th), &th);
   6175 			if (v4) {
   6176 				struct ip ip;
   6177 
   6178 				m_copydata(m0, offset, sizeof(ip), &ip);
   6179 				ip.ip_len = 0;
   6180 				m_copyback(m0,
   6181 				    offset + offsetof(struct ip, ip_len),
   6182 				    sizeof(ip.ip_len), &ip.ip_len);
   6183 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6184 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6185 			} else {
   6186 				struct ip6_hdr ip6;
   6187 
   6188 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6189 				ip6.ip6_plen = 0;
   6190 				m_copyback(m0,
   6191 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6192 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6193 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6194 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6195 			}
   6196 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6197 			    sizeof(th.th_sum), &th.th_sum);
   6198 
   6199 			hlen += th.th_off << 2;
   6200 		} else {
   6201 			/*
   6202 			 * TCP/IP headers are in the first mbuf; we can do
   6203 			 * this the easy way.
   6204 			 */
   6205 			struct tcphdr *th;
   6206 
   6207 			if (v4) {
   6208 				struct ip *ip =
   6209 				    (void *)(mtod(m0, char *) + offset);
   6210 				th = (void *)(mtod(m0, char *) + hlen);
   6211 
   6212 				ip->ip_len = 0;
   6213 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6214 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6215 			} else {
   6216 				struct ip6_hdr *ip6 =
   6217 				    (void *)(mtod(m0, char *) + offset);
   6218 				th = (void *)(mtod(m0, char *) + hlen);
   6219 
   6220 				ip6->ip6_plen = 0;
   6221 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6222 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6223 			}
   6224 			hlen += th->th_off << 2;
   6225 		}
   6226 
   6227 		if (v4) {
   6228 			WM_Q_EVCNT_INCR(txq, txtso);
   6229 			cmdlen |= WTX_TCPIP_CMD_IP;
   6230 		} else {
   6231 			WM_Q_EVCNT_INCR(txq, txtso6);
   6232 			ipcse = 0;
   6233 		}
   6234 		cmd |= WTX_TCPIP_CMD_TSE;
   6235 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6236 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6237 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6238 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6239 	}
   6240 
   6241 	/*
   6242 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6243 	 * offload feature, if we load the context descriptor, we
   6244 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6245 	 */
   6246 
   6247 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6248 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6249 	    WTX_TCPIP_IPCSE(ipcse);
   6250 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6251 		WM_Q_EVCNT_INCR(txq, txipsum);
   6252 		fields |= WTX_IXSM;
   6253 	}
   6254 
   6255 	offset += iphl;
   6256 
   6257 	if (m0->m_pkthdr.csum_flags &
   6258 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6259 		WM_Q_EVCNT_INCR(txq, txtusum);
   6260 		fields |= WTX_TXSM;
   6261 		tucs = WTX_TCPIP_TUCSS(offset) |
   6262 		    WTX_TCPIP_TUCSO(offset +
   6263 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6264 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6265 	} else if ((m0->m_pkthdr.csum_flags &
   6266 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6267 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6268 		fields |= WTX_TXSM;
   6269 		tucs = WTX_TCPIP_TUCSS(offset) |
   6270 		    WTX_TCPIP_TUCSO(offset +
   6271 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6272 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6273 	} else {
   6274 		/* Just initialize it to a valid TCP context. */
   6275 		tucs = WTX_TCPIP_TUCSS(offset) |
   6276 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6277 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6278 	}
   6279 
   6280 	/* Fill in the context descriptor. */
   6281 	t = (struct livengood_tcpip_ctxdesc *)
   6282 	    &txq->txq_descs[txq->txq_next];
   6283 	t->tcpip_ipcs = htole32(ipcs);
   6284 	t->tcpip_tucs = htole32(tucs);
   6285 	t->tcpip_cmdlen = htole32(cmdlen);
   6286 	t->tcpip_seg = htole32(seg);
   6287 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6288 
   6289 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6290 	txs->txs_ndesc++;
   6291 
   6292 	*cmdp = cmd;
   6293 	*fieldsp = fields;
   6294 
   6295 	return 0;
   6296 }
   6297 
   6298 /*
   6299  * wm_start:		[ifnet interface function]
   6300  *
   6301  *	Start packet transmission on the interface.
   6302  */
   6303 static void
   6304 wm_start(struct ifnet *ifp)
   6305 {
   6306 	struct wm_softc *sc = ifp->if_softc;
   6307 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6308 
   6309 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6310 
   6311 	mutex_enter(txq->txq_lock);
   6312 	if (!txq->txq_stopping)
   6313 		wm_start_locked(ifp);
   6314 	mutex_exit(txq->txq_lock);
   6315 }
   6316 
   6317 static void
   6318 wm_start_locked(struct ifnet *ifp)
   6319 {
   6320 	struct wm_softc *sc = ifp->if_softc;
   6321 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6322 	struct mbuf *m0;
   6323 	struct m_tag *mtag;
   6324 	struct wm_txsoft *txs;
   6325 	bus_dmamap_t dmamap;
   6326 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6327 	bus_addr_t curaddr;
   6328 	bus_size_t seglen, curlen;
   6329 	uint32_t cksumcmd;
   6330 	uint8_t cksumfields;
   6331 
   6332 	KASSERT(mutex_owned(txq->txq_lock));
   6333 
   6334 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6335 		return;
   6336 
   6337 	/* Remember the previous number of free descriptors. */
   6338 	ofree = txq->txq_free;
   6339 
   6340 	/*
   6341 	 * Loop through the send queue, setting up transmit descriptors
   6342 	 * until we drain the queue, or use up all available transmit
   6343 	 * descriptors.
   6344 	 */
   6345 	for (;;) {
   6346 		m0 = NULL;
   6347 
   6348 		/* Get a work queue entry. */
   6349 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6350 			wm_txeof(sc, txq);
   6351 			if (txq->txq_sfree == 0) {
   6352 				DPRINTF(WM_DEBUG_TX,
   6353 				    ("%s: TX: no free job descriptors\n",
   6354 					device_xname(sc->sc_dev)));
   6355 				WM_Q_EVCNT_INCR(txq, txsstall);
   6356 				break;
   6357 			}
   6358 		}
   6359 
   6360 		/* Grab a packet off the queue. */
   6361 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6362 		if (m0 == NULL)
   6363 			break;
   6364 
   6365 		DPRINTF(WM_DEBUG_TX,
   6366 		    ("%s: TX: have packet to transmit: %p\n",
   6367 		    device_xname(sc->sc_dev), m0));
   6368 
   6369 		txs = &txq->txq_soft[txq->txq_snext];
   6370 		dmamap = txs->txs_dmamap;
   6371 
   6372 		use_tso = (m0->m_pkthdr.csum_flags &
   6373 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6374 
   6375 		/*
   6376 		 * So says the Linux driver:
   6377 		 * The controller does a simple calculation to make sure
   6378 		 * there is enough room in the FIFO before initiating the
   6379 		 * DMA for each buffer.  The calc is:
   6380 		 *	4 = ceil(buffer len / MSS)
   6381 		 * To make sure we don't overrun the FIFO, adjust the max
   6382 		 * buffer len if the MSS drops.
   6383 		 */
   6384 		dmamap->dm_maxsegsz =
   6385 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6386 		    ? m0->m_pkthdr.segsz << 2
   6387 		    : WTX_MAX_LEN;
   6388 
   6389 		/*
   6390 		 * Load the DMA map.  If this fails, the packet either
   6391 		 * didn't fit in the allotted number of segments, or we
   6392 		 * were short on resources.  For the too-many-segments
   6393 		 * case, we simply report an error and drop the packet,
   6394 		 * since we can't sanely copy a jumbo packet to a single
   6395 		 * buffer.
   6396 		 */
   6397 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6398 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6399 		if (error) {
   6400 			if (error == EFBIG) {
   6401 				WM_Q_EVCNT_INCR(txq, txdrop);
   6402 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6403 				    "DMA segments, dropping...\n",
   6404 				    device_xname(sc->sc_dev));
   6405 				wm_dump_mbuf_chain(sc, m0);
   6406 				m_freem(m0);
   6407 				continue;
   6408 			}
   6409 			/*  Short on resources, just stop for now. */
   6410 			DPRINTF(WM_DEBUG_TX,
   6411 			    ("%s: TX: dmamap load failed: %d\n",
   6412 			    device_xname(sc->sc_dev), error));
   6413 			break;
   6414 		}
   6415 
   6416 		segs_needed = dmamap->dm_nsegs;
   6417 		if (use_tso) {
   6418 			/* For sentinel descriptor; see below. */
   6419 			segs_needed++;
   6420 		}
   6421 
   6422 		/*
   6423 		 * Ensure we have enough descriptors free to describe
   6424 		 * the packet.  Note, we always reserve one descriptor
   6425 		 * at the end of the ring due to the semantics of the
   6426 		 * TDT register, plus one more in the event we need
   6427 		 * to load offload context.
   6428 		 */
   6429 		if (segs_needed > txq->txq_free - 2) {
   6430 			/*
   6431 			 * Not enough free descriptors to transmit this
   6432 			 * packet.  We haven't committed anything yet,
   6433 			 * so just unload the DMA map, put the packet
   6434 			 * pack on the queue, and punt.  Notify the upper
   6435 			 * layer that there are no more slots left.
   6436 			 */
   6437 			DPRINTF(WM_DEBUG_TX,
   6438 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6439 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6440 			    segs_needed, txq->txq_free - 1));
   6441 			ifp->if_flags |= IFF_OACTIVE;
   6442 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6443 			WM_Q_EVCNT_INCR(txq, txdstall);
   6444 			break;
   6445 		}
   6446 
   6447 		/*
   6448 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6449 		 * once we know we can transmit the packet, since we
   6450 		 * do some internal FIFO space accounting here.
   6451 		 */
   6452 		if (sc->sc_type == WM_T_82547 &&
   6453 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6454 			DPRINTF(WM_DEBUG_TX,
   6455 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6456 			    device_xname(sc->sc_dev)));
   6457 			ifp->if_flags |= IFF_OACTIVE;
   6458 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6459 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6460 			break;
   6461 		}
   6462 
   6463 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6464 
   6465 		DPRINTF(WM_DEBUG_TX,
   6466 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6467 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6468 
   6469 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6470 
   6471 		/*
   6472 		 * Store a pointer to the packet so that we can free it
   6473 		 * later.
   6474 		 *
   6475 		 * Initially, we consider the number of descriptors the
   6476 		 * packet uses the number of DMA segments.  This may be
   6477 		 * incremented by 1 if we do checksum offload (a descriptor
   6478 		 * is used to set the checksum context).
   6479 		 */
   6480 		txs->txs_mbuf = m0;
   6481 		txs->txs_firstdesc = txq->txq_next;
   6482 		txs->txs_ndesc = segs_needed;
   6483 
   6484 		/* Set up offload parameters for this packet. */
   6485 		if (m0->m_pkthdr.csum_flags &
   6486 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6487 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6488 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6489 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6490 					  &cksumfields) != 0) {
   6491 				/* Error message already displayed. */
   6492 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6493 				continue;
   6494 			}
   6495 		} else {
   6496 			cksumcmd = 0;
   6497 			cksumfields = 0;
   6498 		}
   6499 
   6500 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6501 
   6502 		/* Sync the DMA map. */
   6503 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6504 		    BUS_DMASYNC_PREWRITE);
   6505 
   6506 		/* Initialize the transmit descriptor. */
   6507 		for (nexttx = txq->txq_next, seg = 0;
   6508 		     seg < dmamap->dm_nsegs; seg++) {
   6509 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6510 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6511 			     seglen != 0;
   6512 			     curaddr += curlen, seglen -= curlen,
   6513 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6514 				curlen = seglen;
   6515 
   6516 				/*
   6517 				 * So says the Linux driver:
   6518 				 * Work around for premature descriptor
   6519 				 * write-backs in TSO mode.  Append a
   6520 				 * 4-byte sentinel descriptor.
   6521 				 */
   6522 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6523 				    curlen > 8)
   6524 					curlen -= 4;
   6525 
   6526 				wm_set_dma_addr(
   6527 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6528 				txq->txq_descs[nexttx].wtx_cmdlen
   6529 				    = htole32(cksumcmd | curlen);
   6530 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6531 				    = 0;
   6532 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6533 				    = cksumfields;
   6534 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6535 				lasttx = nexttx;
   6536 
   6537 				DPRINTF(WM_DEBUG_TX,
   6538 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6539 				     "len %#04zx\n",
   6540 				    device_xname(sc->sc_dev), nexttx,
   6541 				    (uint64_t)curaddr, curlen));
   6542 			}
   6543 		}
   6544 
   6545 		KASSERT(lasttx != -1);
   6546 
   6547 		/*
   6548 		 * Set up the command byte on the last descriptor of
   6549 		 * the packet.  If we're in the interrupt delay window,
   6550 		 * delay the interrupt.
   6551 		 */
   6552 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6553 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6554 
   6555 		/*
   6556 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6557 		 * up the descriptor to encapsulate the packet for us.
   6558 		 *
   6559 		 * This is only valid on the last descriptor of the packet.
   6560 		 */
   6561 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6562 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6563 			    htole32(WTX_CMD_VLE);
   6564 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6565 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6566 		}
   6567 
   6568 		txs->txs_lastdesc = lasttx;
   6569 
   6570 		DPRINTF(WM_DEBUG_TX,
   6571 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6572 		    device_xname(sc->sc_dev),
   6573 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6574 
   6575 		/* Sync the descriptors we're using. */
   6576 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6577 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6578 
   6579 		/* Give the packet to the chip. */
   6580 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6581 
   6582 		DPRINTF(WM_DEBUG_TX,
   6583 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6584 
   6585 		DPRINTF(WM_DEBUG_TX,
   6586 		    ("%s: TX: finished transmitting packet, job %d\n",
   6587 		    device_xname(sc->sc_dev), txq->txq_snext));
   6588 
   6589 		/* Advance the tx pointer. */
   6590 		txq->txq_free -= txs->txs_ndesc;
   6591 		txq->txq_next = nexttx;
   6592 
   6593 		txq->txq_sfree--;
   6594 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6595 
   6596 		/* Pass the packet to any BPF listeners. */
   6597 		bpf_mtap(ifp, m0);
   6598 	}
   6599 
   6600 	if (m0 != NULL) {
   6601 		ifp->if_flags |= IFF_OACTIVE;
   6602 		WM_Q_EVCNT_INCR(txq, txdrop);
   6603 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6604 			__func__));
   6605 		m_freem(m0);
   6606 	}
   6607 
   6608 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6609 		/* No more slots; notify upper layer. */
   6610 		ifp->if_flags |= IFF_OACTIVE;
   6611 	}
   6612 
   6613 	if (txq->txq_free != ofree) {
   6614 		/* Set a watchdog timer in case the chip flakes out. */
   6615 		ifp->if_timer = 5;
   6616 	}
   6617 }
   6618 
   6619 /*
   6620  * wm_nq_tx_offload:
   6621  *
   6622  *	Set up TCP/IP checksumming parameters for the
   6623  *	specified packet, for NEWQUEUE devices
   6624  */
   6625 static int
   6626 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6627     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6628 {
   6629 	struct mbuf *m0 = txs->txs_mbuf;
   6630 	struct m_tag *mtag;
   6631 	uint32_t vl_len, mssidx, cmdc;
   6632 	struct ether_header *eh;
   6633 	int offset, iphl;
   6634 
   6635 	/*
   6636 	 * XXX It would be nice if the mbuf pkthdr had offset
   6637 	 * fields for the protocol headers.
   6638 	 */
   6639 	*cmdlenp = 0;
   6640 	*fieldsp = 0;
   6641 
   6642 	eh = mtod(m0, struct ether_header *);
   6643 	switch (htons(eh->ether_type)) {
   6644 	case ETHERTYPE_IP:
   6645 	case ETHERTYPE_IPV6:
   6646 		offset = ETHER_HDR_LEN;
   6647 		break;
   6648 
   6649 	case ETHERTYPE_VLAN:
   6650 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6651 		break;
   6652 
   6653 	default:
   6654 		/* Don't support this protocol or encapsulation. */
   6655 		*do_csum = false;
   6656 		return 0;
   6657 	}
   6658 	*do_csum = true;
   6659 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6660 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6661 
   6662 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6663 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6664 
   6665 	if ((m0->m_pkthdr.csum_flags &
   6666 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6667 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6668 	} else {
   6669 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6670 	}
   6671 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6672 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6673 
   6674 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6675 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6676 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6677 		*cmdlenp |= NQTX_CMD_VLE;
   6678 	}
   6679 
   6680 	mssidx = 0;
   6681 
   6682 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6683 		int hlen = offset + iphl;
   6684 		int tcp_hlen;
   6685 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6686 
   6687 		if (__predict_false(m0->m_len <
   6688 				    (hlen + sizeof(struct tcphdr)))) {
   6689 			/*
   6690 			 * TCP/IP headers are not in the first mbuf; we need
   6691 			 * to do this the slow and painful way.  Let's just
   6692 			 * hope this doesn't happen very often.
   6693 			 */
   6694 			struct tcphdr th;
   6695 
   6696 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6697 
   6698 			m_copydata(m0, hlen, sizeof(th), &th);
   6699 			if (v4) {
   6700 				struct ip ip;
   6701 
   6702 				m_copydata(m0, offset, sizeof(ip), &ip);
   6703 				ip.ip_len = 0;
   6704 				m_copyback(m0,
   6705 				    offset + offsetof(struct ip, ip_len),
   6706 				    sizeof(ip.ip_len), &ip.ip_len);
   6707 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6708 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6709 			} else {
   6710 				struct ip6_hdr ip6;
   6711 
   6712 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6713 				ip6.ip6_plen = 0;
   6714 				m_copyback(m0,
   6715 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6716 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6717 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6718 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6719 			}
   6720 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6721 			    sizeof(th.th_sum), &th.th_sum);
   6722 
   6723 			tcp_hlen = th.th_off << 2;
   6724 		} else {
   6725 			/*
   6726 			 * TCP/IP headers are in the first mbuf; we can do
   6727 			 * this the easy way.
   6728 			 */
   6729 			struct tcphdr *th;
   6730 
   6731 			if (v4) {
   6732 				struct ip *ip =
   6733 				    (void *)(mtod(m0, char *) + offset);
   6734 				th = (void *)(mtod(m0, char *) + hlen);
   6735 
   6736 				ip->ip_len = 0;
   6737 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6738 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6739 			} else {
   6740 				struct ip6_hdr *ip6 =
   6741 				    (void *)(mtod(m0, char *) + offset);
   6742 				th = (void *)(mtod(m0, char *) + hlen);
   6743 
   6744 				ip6->ip6_plen = 0;
   6745 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6746 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6747 			}
   6748 			tcp_hlen = th->th_off << 2;
   6749 		}
   6750 		hlen += tcp_hlen;
   6751 		*cmdlenp |= NQTX_CMD_TSE;
   6752 
   6753 		if (v4) {
   6754 			WM_Q_EVCNT_INCR(txq, txtso);
   6755 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6756 		} else {
   6757 			WM_Q_EVCNT_INCR(txq, txtso6);
   6758 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6759 		}
   6760 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6761 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6762 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6763 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6764 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6765 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6766 	} else {
   6767 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6768 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6769 	}
   6770 
   6771 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6772 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6773 		cmdc |= NQTXC_CMD_IP4;
   6774 	}
   6775 
   6776 	if (m0->m_pkthdr.csum_flags &
   6777 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6778 		WM_Q_EVCNT_INCR(txq, txtusum);
   6779 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6780 			cmdc |= NQTXC_CMD_TCP;
   6781 		} else {
   6782 			cmdc |= NQTXC_CMD_UDP;
   6783 		}
   6784 		cmdc |= NQTXC_CMD_IP4;
   6785 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6786 	}
   6787 	if (m0->m_pkthdr.csum_flags &
   6788 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6789 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6790 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6791 			cmdc |= NQTXC_CMD_TCP;
   6792 		} else {
   6793 			cmdc |= NQTXC_CMD_UDP;
   6794 		}
   6795 		cmdc |= NQTXC_CMD_IP6;
   6796 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6797 	}
   6798 
   6799 	/* Fill in the context descriptor. */
   6800 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6801 	    htole32(vl_len);
   6802 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6803 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6804 	    htole32(cmdc);
   6805 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6806 	    htole32(mssidx);
   6807 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6808 	DPRINTF(WM_DEBUG_TX,
   6809 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6810 	    txq->txq_next, 0, vl_len));
   6811 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6812 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6813 	txs->txs_ndesc++;
   6814 	return 0;
   6815 }
   6816 
   6817 /*
   6818  * wm_nq_start:		[ifnet interface function]
   6819  *
   6820  *	Start packet transmission on the interface for NEWQUEUE devices
   6821  */
   6822 static void
   6823 wm_nq_start(struct ifnet *ifp)
   6824 {
   6825 	struct wm_softc *sc = ifp->if_softc;
   6826 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6827 
   6828 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6829 
   6830 	mutex_enter(txq->txq_lock);
   6831 	if (!txq->txq_stopping)
   6832 		wm_nq_start_locked(ifp);
   6833 	mutex_exit(txq->txq_lock);
   6834 }
   6835 
   6836 static void
   6837 wm_nq_start_locked(struct ifnet *ifp)
   6838 {
   6839 	struct wm_softc *sc = ifp->if_softc;
   6840 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6841 
   6842 	wm_nq_send_common_locked(ifp, txq, false);
   6843 }
   6844 
   6845 static inline int
   6846 wm_nq_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6847 {
   6848 	struct wm_softc *sc = ifp->if_softc;
   6849 	u_int cpuid = cpu_index(curcpu());
   6850 
   6851 	/*
   6852 	 * Currently, simple distribute strategy.
   6853 	 * TODO:
   6854 	 * destribute by flowid(RSS has value).
   6855 	 */
   6856 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
   6857 }
   6858 
   6859 static int
   6860 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   6861 {
   6862 	int qid;
   6863 	struct wm_softc *sc = ifp->if_softc;
   6864 	struct wm_txqueue *txq;
   6865 
   6866 	qid = wm_nq_select_txqueue(ifp, m);
   6867 	txq = &sc->sc_queue[qid].wmq_txq;
   6868 
   6869 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6870 		m_freem(m);
   6871 		WM_Q_EVCNT_INCR(txq, txdrop);
   6872 		return ENOBUFS;
   6873 	}
   6874 
   6875 	if (mutex_tryenter(txq->txq_lock)) {
   6876 		/* XXXX should be per TX queue */
   6877 		ifp->if_obytes += m->m_pkthdr.len;
   6878 		if (m->m_flags & M_MCAST)
   6879 			ifp->if_omcasts++;
   6880 
   6881 		if (!txq->txq_stopping)
   6882 			wm_nq_transmit_locked(ifp, txq);
   6883 		mutex_exit(txq->txq_lock);
   6884 	}
   6885 
   6886 	return 0;
   6887 }
   6888 
   6889 static void
   6890 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6891 {
   6892 
   6893 	wm_nq_send_common_locked(ifp, txq, true);
   6894 }
   6895 
   6896 static void
   6897 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6898     bool is_transmit)
   6899 {
   6900 	struct wm_softc *sc = ifp->if_softc;
   6901 	struct mbuf *m0;
   6902 	struct m_tag *mtag;
   6903 	struct wm_txsoft *txs;
   6904 	bus_dmamap_t dmamap;
   6905 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6906 	bool do_csum, sent;
   6907 
   6908 	KASSERT(mutex_owned(txq->txq_lock));
   6909 
   6910 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6911 		return;
   6912 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6913 		return;
   6914 
   6915 	sent = false;
   6916 
   6917 	/*
   6918 	 * Loop through the send queue, setting up transmit descriptors
   6919 	 * until we drain the queue, or use up all available transmit
   6920 	 * descriptors.
   6921 	 */
   6922 	for (;;) {
   6923 		m0 = NULL;
   6924 
   6925 		/* Get a work queue entry. */
   6926 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6927 			wm_txeof(sc, txq);
   6928 			if (txq->txq_sfree == 0) {
   6929 				DPRINTF(WM_DEBUG_TX,
   6930 				    ("%s: TX: no free job descriptors\n",
   6931 					device_xname(sc->sc_dev)));
   6932 				WM_Q_EVCNT_INCR(txq, txsstall);
   6933 				break;
   6934 			}
   6935 		}
   6936 
   6937 		/* Grab a packet off the queue. */
   6938 		if (is_transmit)
   6939 			m0 = pcq_get(txq->txq_interq);
   6940 		else
   6941 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6942 		if (m0 == NULL)
   6943 			break;
   6944 
   6945 		DPRINTF(WM_DEBUG_TX,
   6946 		    ("%s: TX: have packet to transmit: %p\n",
   6947 		    device_xname(sc->sc_dev), m0));
   6948 
   6949 		txs = &txq->txq_soft[txq->txq_snext];
   6950 		dmamap = txs->txs_dmamap;
   6951 
   6952 		/*
   6953 		 * Load the DMA map.  If this fails, the packet either
   6954 		 * didn't fit in the allotted number of segments, or we
   6955 		 * were short on resources.  For the too-many-segments
   6956 		 * case, we simply report an error and drop the packet,
   6957 		 * since we can't sanely copy a jumbo packet to a single
   6958 		 * buffer.
   6959 		 */
   6960 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6961 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6962 		if (error) {
   6963 			if (error == EFBIG) {
   6964 				WM_Q_EVCNT_INCR(txq, txdrop);
   6965 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6966 				    "DMA segments, dropping...\n",
   6967 				    device_xname(sc->sc_dev));
   6968 				wm_dump_mbuf_chain(sc, m0);
   6969 				m_freem(m0);
   6970 				continue;
   6971 			}
   6972 			/* Short on resources, just stop for now. */
   6973 			DPRINTF(WM_DEBUG_TX,
   6974 			    ("%s: TX: dmamap load failed: %d\n",
   6975 			    device_xname(sc->sc_dev), error));
   6976 			break;
   6977 		}
   6978 
   6979 		segs_needed = dmamap->dm_nsegs;
   6980 
   6981 		/*
   6982 		 * Ensure we have enough descriptors free to describe
   6983 		 * the packet.  Note, we always reserve one descriptor
   6984 		 * at the end of the ring due to the semantics of the
   6985 		 * TDT register, plus one more in the event we need
   6986 		 * to load offload context.
   6987 		 */
   6988 		if (segs_needed > txq->txq_free - 2) {
   6989 			/*
   6990 			 * Not enough free descriptors to transmit this
   6991 			 * packet.  We haven't committed anything yet,
   6992 			 * so just unload the DMA map, put the packet
   6993 			 * pack on the queue, and punt.  Notify the upper
   6994 			 * layer that there are no more slots left.
   6995 			 */
   6996 			DPRINTF(WM_DEBUG_TX,
   6997 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6998 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6999 			    segs_needed, txq->txq_free - 1));
   7000 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7001 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7002 			WM_Q_EVCNT_INCR(txq, txdstall);
   7003 			break;
   7004 		}
   7005 
   7006 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7007 
   7008 		DPRINTF(WM_DEBUG_TX,
   7009 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7010 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7011 
   7012 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7013 
   7014 		/*
   7015 		 * Store a pointer to the packet so that we can free it
   7016 		 * later.
   7017 		 *
   7018 		 * Initially, we consider the number of descriptors the
   7019 		 * packet uses the number of DMA segments.  This may be
   7020 		 * incremented by 1 if we do checksum offload (a descriptor
   7021 		 * is used to set the checksum context).
   7022 		 */
   7023 		txs->txs_mbuf = m0;
   7024 		txs->txs_firstdesc = txq->txq_next;
   7025 		txs->txs_ndesc = segs_needed;
   7026 
   7027 		/* Set up offload parameters for this packet. */
   7028 		uint32_t cmdlen, fields, dcmdlen;
   7029 		if (m0->m_pkthdr.csum_flags &
   7030 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7031 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7032 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7033 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7034 			    &do_csum) != 0) {
   7035 				/* Error message already displayed. */
   7036 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7037 				continue;
   7038 			}
   7039 		} else {
   7040 			do_csum = false;
   7041 			cmdlen = 0;
   7042 			fields = 0;
   7043 		}
   7044 
   7045 		/* Sync the DMA map. */
   7046 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7047 		    BUS_DMASYNC_PREWRITE);
   7048 
   7049 		/* Initialize the first transmit descriptor. */
   7050 		nexttx = txq->txq_next;
   7051 		if (!do_csum) {
   7052 			/* setup a legacy descriptor */
   7053 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7054 			    dmamap->dm_segs[0].ds_addr);
   7055 			txq->txq_descs[nexttx].wtx_cmdlen =
   7056 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7057 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7058 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7059 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7060 			    NULL) {
   7061 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7062 				    htole32(WTX_CMD_VLE);
   7063 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7064 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7065 			} else {
   7066 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7067 			}
   7068 			dcmdlen = 0;
   7069 		} else {
   7070 			/* setup an advanced data descriptor */
   7071 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7072 			    htole64(dmamap->dm_segs[0].ds_addr);
   7073 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7074 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7075 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7076 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7077 			    htole32(fields);
   7078 			DPRINTF(WM_DEBUG_TX,
   7079 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7080 			    device_xname(sc->sc_dev), nexttx,
   7081 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7082 			DPRINTF(WM_DEBUG_TX,
   7083 			    ("\t 0x%08x%08x\n", fields,
   7084 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7085 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7086 		}
   7087 
   7088 		lasttx = nexttx;
   7089 		nexttx = WM_NEXTTX(txq, nexttx);
   7090 		/*
   7091 		 * fill in the next descriptors. legacy or adcanced format
   7092 		 * is the same here
   7093 		 */
   7094 		for (seg = 1; seg < dmamap->dm_nsegs;
   7095 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7096 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7097 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7098 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7099 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7100 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7101 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7102 			lasttx = nexttx;
   7103 
   7104 			DPRINTF(WM_DEBUG_TX,
   7105 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7106 			     "len %#04zx\n",
   7107 			    device_xname(sc->sc_dev), nexttx,
   7108 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7109 			    dmamap->dm_segs[seg].ds_len));
   7110 		}
   7111 
   7112 		KASSERT(lasttx != -1);
   7113 
   7114 		/*
   7115 		 * Set up the command byte on the last descriptor of
   7116 		 * the packet.  If we're in the interrupt delay window,
   7117 		 * delay the interrupt.
   7118 		 */
   7119 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7120 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7121 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7122 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7123 
   7124 		txs->txs_lastdesc = lasttx;
   7125 
   7126 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7127 		    device_xname(sc->sc_dev),
   7128 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7129 
   7130 		/* Sync the descriptors we're using. */
   7131 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7132 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7133 
   7134 		/* Give the packet to the chip. */
   7135 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7136 		sent = true;
   7137 
   7138 		DPRINTF(WM_DEBUG_TX,
   7139 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7140 
   7141 		DPRINTF(WM_DEBUG_TX,
   7142 		    ("%s: TX: finished transmitting packet, job %d\n",
   7143 		    device_xname(sc->sc_dev), txq->txq_snext));
   7144 
   7145 		/* Advance the tx pointer. */
   7146 		txq->txq_free -= txs->txs_ndesc;
   7147 		txq->txq_next = nexttx;
   7148 
   7149 		txq->txq_sfree--;
   7150 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7151 
   7152 		/* Pass the packet to any BPF listeners. */
   7153 		bpf_mtap(ifp, m0);
   7154 	}
   7155 
   7156 	if (m0 != NULL) {
   7157 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7158 		WM_Q_EVCNT_INCR(txq, txdrop);
   7159 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7160 			__func__));
   7161 		m_freem(m0);
   7162 	}
   7163 
   7164 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7165 		/* No more slots; notify upper layer. */
   7166 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7167 	}
   7168 
   7169 	if (sent) {
   7170 		/* Set a watchdog timer in case the chip flakes out. */
   7171 		ifp->if_timer = 5;
   7172 	}
   7173 }
   7174 
   7175 /* Interrupt */
   7176 
   7177 /*
   7178  * wm_txeof:
   7179  *
   7180  *	Helper; handle transmit interrupts.
   7181  */
   7182 static int
   7183 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7184 {
   7185 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7186 	struct wm_txsoft *txs;
   7187 	bool processed = false;
   7188 	int count = 0;
   7189 	int i;
   7190 	uint8_t status;
   7191 
   7192 	KASSERT(mutex_owned(txq->txq_lock));
   7193 
   7194 	if (txq->txq_stopping)
   7195 		return 0;
   7196 
   7197 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7198 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7199 	else
   7200 		ifp->if_flags &= ~IFF_OACTIVE;
   7201 
   7202 	/*
   7203 	 * Go through the Tx list and free mbufs for those
   7204 	 * frames which have been transmitted.
   7205 	 */
   7206 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7207 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7208 		txs = &txq->txq_soft[i];
   7209 
   7210 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7211 			device_xname(sc->sc_dev), i));
   7212 
   7213 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7214 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7215 
   7216 		status =
   7217 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7218 		if ((status & WTX_ST_DD) == 0) {
   7219 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7220 			    BUS_DMASYNC_PREREAD);
   7221 			break;
   7222 		}
   7223 
   7224 		processed = true;
   7225 		count++;
   7226 		DPRINTF(WM_DEBUG_TX,
   7227 		    ("%s: TX: job %d done: descs %d..%d\n",
   7228 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7229 		    txs->txs_lastdesc));
   7230 
   7231 		/*
   7232 		 * XXX We should probably be using the statistics
   7233 		 * XXX registers, but I don't know if they exist
   7234 		 * XXX on chips before the i82544.
   7235 		 */
   7236 
   7237 #ifdef WM_EVENT_COUNTERS
   7238 		if (status & WTX_ST_TU)
   7239 			WM_Q_EVCNT_INCR(txq, tu);
   7240 #endif /* WM_EVENT_COUNTERS */
   7241 
   7242 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7243 			ifp->if_oerrors++;
   7244 			if (status & WTX_ST_LC)
   7245 				log(LOG_WARNING, "%s: late collision\n",
   7246 				    device_xname(sc->sc_dev));
   7247 			else if (status & WTX_ST_EC) {
   7248 				ifp->if_collisions += 16;
   7249 				log(LOG_WARNING, "%s: excessive collisions\n",
   7250 				    device_xname(sc->sc_dev));
   7251 			}
   7252 		} else
   7253 			ifp->if_opackets++;
   7254 
   7255 		txq->txq_free += txs->txs_ndesc;
   7256 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7257 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7258 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7259 		m_freem(txs->txs_mbuf);
   7260 		txs->txs_mbuf = NULL;
   7261 	}
   7262 
   7263 	/* Update the dirty transmit buffer pointer. */
   7264 	txq->txq_sdirty = i;
   7265 	DPRINTF(WM_DEBUG_TX,
   7266 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7267 
   7268 	if (count != 0)
   7269 		rnd_add_uint32(&sc->rnd_source, count);
   7270 
   7271 	/*
   7272 	 * If there are no more pending transmissions, cancel the watchdog
   7273 	 * timer.
   7274 	 */
   7275 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7276 		ifp->if_timer = 0;
   7277 
   7278 	return processed;
   7279 }
   7280 
   7281 /*
   7282  * wm_rxeof:
   7283  *
   7284  *	Helper; handle receive interrupts.
   7285  */
   7286 static void
   7287 wm_rxeof(struct wm_rxqueue *rxq)
   7288 {
   7289 	struct wm_softc *sc = rxq->rxq_sc;
   7290 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7291 	struct wm_rxsoft *rxs;
   7292 	struct mbuf *m;
   7293 	int i, len;
   7294 	int count = 0;
   7295 	uint8_t status, errors;
   7296 	uint16_t vlantag;
   7297 
   7298 	KASSERT(mutex_owned(rxq->rxq_lock));
   7299 
   7300 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7301 		rxs = &rxq->rxq_soft[i];
   7302 
   7303 		DPRINTF(WM_DEBUG_RX,
   7304 		    ("%s: RX: checking descriptor %d\n",
   7305 		    device_xname(sc->sc_dev), i));
   7306 
   7307 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7308 
   7309 		status = rxq->rxq_descs[i].wrx_status;
   7310 		errors = rxq->rxq_descs[i].wrx_errors;
   7311 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   7312 		vlantag = rxq->rxq_descs[i].wrx_special;
   7313 
   7314 		if ((status & WRX_ST_DD) == 0) {
   7315 			/* We have processed all of the receive descriptors. */
   7316 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   7317 			break;
   7318 		}
   7319 
   7320 		count++;
   7321 		if (__predict_false(rxq->rxq_discard)) {
   7322 			DPRINTF(WM_DEBUG_RX,
   7323 			    ("%s: RX: discarding contents of descriptor %d\n",
   7324 			    device_xname(sc->sc_dev), i));
   7325 			wm_init_rxdesc(rxq, i);
   7326 			if (status & WRX_ST_EOP) {
   7327 				/* Reset our state. */
   7328 				DPRINTF(WM_DEBUG_RX,
   7329 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7330 				    device_xname(sc->sc_dev)));
   7331 				rxq->rxq_discard = 0;
   7332 			}
   7333 			continue;
   7334 		}
   7335 
   7336 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7337 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7338 
   7339 		m = rxs->rxs_mbuf;
   7340 
   7341 		/*
   7342 		 * Add a new receive buffer to the ring, unless of
   7343 		 * course the length is zero. Treat the latter as a
   7344 		 * failed mapping.
   7345 		 */
   7346 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7347 			/*
   7348 			 * Failed, throw away what we've done so
   7349 			 * far, and discard the rest of the packet.
   7350 			 */
   7351 			ifp->if_ierrors++;
   7352 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7353 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7354 			wm_init_rxdesc(rxq, i);
   7355 			if ((status & WRX_ST_EOP) == 0)
   7356 				rxq->rxq_discard = 1;
   7357 			if (rxq->rxq_head != NULL)
   7358 				m_freem(rxq->rxq_head);
   7359 			WM_RXCHAIN_RESET(rxq);
   7360 			DPRINTF(WM_DEBUG_RX,
   7361 			    ("%s: RX: Rx buffer allocation failed, "
   7362 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7363 			    rxq->rxq_discard ? " (discard)" : ""));
   7364 			continue;
   7365 		}
   7366 
   7367 		m->m_len = len;
   7368 		rxq->rxq_len += len;
   7369 		DPRINTF(WM_DEBUG_RX,
   7370 		    ("%s: RX: buffer at %p len %d\n",
   7371 		    device_xname(sc->sc_dev), m->m_data, len));
   7372 
   7373 		/* If this is not the end of the packet, keep looking. */
   7374 		if ((status & WRX_ST_EOP) == 0) {
   7375 			WM_RXCHAIN_LINK(rxq, m);
   7376 			DPRINTF(WM_DEBUG_RX,
   7377 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7378 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7379 			continue;
   7380 		}
   7381 
   7382 		/*
   7383 		 * Okay, we have the entire packet now.  The chip is
   7384 		 * configured to include the FCS except I350 and I21[01]
   7385 		 * (not all chips can be configured to strip it),
   7386 		 * so we need to trim it.
   7387 		 * May need to adjust length of previous mbuf in the
   7388 		 * chain if the current mbuf is too short.
   7389 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7390 		 * is always set in I350, so we don't trim it.
   7391 		 */
   7392 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7393 		    && (sc->sc_type != WM_T_I210)
   7394 		    && (sc->sc_type != WM_T_I211)) {
   7395 			if (m->m_len < ETHER_CRC_LEN) {
   7396 				rxq->rxq_tail->m_len
   7397 				    -= (ETHER_CRC_LEN - m->m_len);
   7398 				m->m_len = 0;
   7399 			} else
   7400 				m->m_len -= ETHER_CRC_LEN;
   7401 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7402 		} else
   7403 			len = rxq->rxq_len;
   7404 
   7405 		WM_RXCHAIN_LINK(rxq, m);
   7406 
   7407 		*rxq->rxq_tailp = NULL;
   7408 		m = rxq->rxq_head;
   7409 
   7410 		WM_RXCHAIN_RESET(rxq);
   7411 
   7412 		DPRINTF(WM_DEBUG_RX,
   7413 		    ("%s: RX: have entire packet, len -> %d\n",
   7414 		    device_xname(sc->sc_dev), len));
   7415 
   7416 		/* If an error occurred, update stats and drop the packet. */
   7417 		if (errors &
   7418 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7419 			if (errors & WRX_ER_SE)
   7420 				log(LOG_WARNING, "%s: symbol error\n",
   7421 				    device_xname(sc->sc_dev));
   7422 			else if (errors & WRX_ER_SEQ)
   7423 				log(LOG_WARNING, "%s: receive sequence error\n",
   7424 				    device_xname(sc->sc_dev));
   7425 			else if (errors & WRX_ER_CE)
   7426 				log(LOG_WARNING, "%s: CRC error\n",
   7427 				    device_xname(sc->sc_dev));
   7428 			m_freem(m);
   7429 			continue;
   7430 		}
   7431 
   7432 		/* No errors.  Receive the packet. */
   7433 		m_set_rcvif(m, ifp);
   7434 		m->m_pkthdr.len = len;
   7435 
   7436 		/*
   7437 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7438 		 * for us.  Associate the tag with the packet.
   7439 		 */
   7440 		/* XXXX should check for i350 and i354 */
   7441 		if ((status & WRX_ST_VP) != 0) {
   7442 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
   7443 		}
   7444 
   7445 		/* Set up checksum info for this packet. */
   7446 		if ((status & WRX_ST_IXSM) == 0) {
   7447 			if (status & WRX_ST_IPCS) {
   7448 				WM_Q_EVCNT_INCR(rxq, rxipsum);
   7449 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7450 				if (errors & WRX_ER_IPE)
   7451 					m->m_pkthdr.csum_flags |=
   7452 					    M_CSUM_IPv4_BAD;
   7453 			}
   7454 			if (status & WRX_ST_TCPCS) {
   7455 				/*
   7456 				 * Note: we don't know if this was TCP or UDP,
   7457 				 * so we just set both bits, and expect the
   7458 				 * upper layers to deal.
   7459 				 */
   7460 				WM_Q_EVCNT_INCR(rxq, rxtusum);
   7461 				m->m_pkthdr.csum_flags |=
   7462 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7463 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7464 				if (errors & WRX_ER_TCPE)
   7465 					m->m_pkthdr.csum_flags |=
   7466 					    M_CSUM_TCP_UDP_BAD;
   7467 			}
   7468 		}
   7469 
   7470 		ifp->if_ipackets++;
   7471 
   7472 		mutex_exit(rxq->rxq_lock);
   7473 
   7474 		/* Pass this up to any BPF listeners. */
   7475 		bpf_mtap(ifp, m);
   7476 
   7477 		/* Pass it on. */
   7478 		if_percpuq_enqueue(sc->sc_ipq, m);
   7479 
   7480 		mutex_enter(rxq->rxq_lock);
   7481 
   7482 		if (rxq->rxq_stopping)
   7483 			break;
   7484 	}
   7485 
   7486 	/* Update the receive pointer. */
   7487 	rxq->rxq_ptr = i;
   7488 	if (count != 0)
   7489 		rnd_add_uint32(&sc->rnd_source, count);
   7490 
   7491 	DPRINTF(WM_DEBUG_RX,
   7492 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7493 }
   7494 
   7495 /*
   7496  * wm_linkintr_gmii:
   7497  *
   7498  *	Helper; handle link interrupts for GMII.
   7499  */
   7500 static void
   7501 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7502 {
   7503 
   7504 	KASSERT(WM_CORE_LOCKED(sc));
   7505 
   7506 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7507 		__func__));
   7508 
   7509 	if (icr & ICR_LSC) {
   7510 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7511 
   7512 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7513 			wm_gig_downshift_workaround_ich8lan(sc);
   7514 
   7515 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7516 			device_xname(sc->sc_dev)));
   7517 		mii_pollstat(&sc->sc_mii);
   7518 		if (sc->sc_type == WM_T_82543) {
   7519 			int miistatus, active;
   7520 
   7521 			/*
   7522 			 * With 82543, we need to force speed and
   7523 			 * duplex on the MAC equal to what the PHY
   7524 			 * speed and duplex configuration is.
   7525 			 */
   7526 			miistatus = sc->sc_mii.mii_media_status;
   7527 
   7528 			if (miistatus & IFM_ACTIVE) {
   7529 				active = sc->sc_mii.mii_media_active;
   7530 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7531 				switch (IFM_SUBTYPE(active)) {
   7532 				case IFM_10_T:
   7533 					sc->sc_ctrl |= CTRL_SPEED_10;
   7534 					break;
   7535 				case IFM_100_TX:
   7536 					sc->sc_ctrl |= CTRL_SPEED_100;
   7537 					break;
   7538 				case IFM_1000_T:
   7539 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7540 					break;
   7541 				default:
   7542 					/*
   7543 					 * fiber?
   7544 					 * Shoud not enter here.
   7545 					 */
   7546 					printf("unknown media (%x)\n", active);
   7547 					break;
   7548 				}
   7549 				if (active & IFM_FDX)
   7550 					sc->sc_ctrl |= CTRL_FD;
   7551 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7552 			}
   7553 		} else if ((sc->sc_type == WM_T_ICH8)
   7554 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7555 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7556 		} else if (sc->sc_type == WM_T_PCH) {
   7557 			wm_k1_gig_workaround_hv(sc,
   7558 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7559 		}
   7560 
   7561 		if ((sc->sc_phytype == WMPHY_82578)
   7562 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7563 			== IFM_1000_T)) {
   7564 
   7565 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7566 				delay(200*1000); /* XXX too big */
   7567 
   7568 				/* Link stall fix for link up */
   7569 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7570 				    HV_MUX_DATA_CTRL,
   7571 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7572 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7573 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7574 				    HV_MUX_DATA_CTRL,
   7575 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7576 			}
   7577 		}
   7578 	} else if (icr & ICR_RXSEQ) {
   7579 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7580 			device_xname(sc->sc_dev)));
   7581 	}
   7582 }
   7583 
   7584 /*
   7585  * wm_linkintr_tbi:
   7586  *
   7587  *	Helper; handle link interrupts for TBI mode.
   7588  */
   7589 static void
   7590 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7591 {
   7592 	uint32_t status;
   7593 
   7594 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7595 		__func__));
   7596 
   7597 	status = CSR_READ(sc, WMREG_STATUS);
   7598 	if (icr & ICR_LSC) {
   7599 		if (status & STATUS_LU) {
   7600 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7601 			    device_xname(sc->sc_dev),
   7602 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7603 			/*
   7604 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7605 			 * so we should update sc->sc_ctrl
   7606 			 */
   7607 
   7608 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7609 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7610 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7611 			if (status & STATUS_FD)
   7612 				sc->sc_tctl |=
   7613 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7614 			else
   7615 				sc->sc_tctl |=
   7616 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7617 			if (sc->sc_ctrl & CTRL_TFCE)
   7618 				sc->sc_fcrtl |= FCRTL_XONE;
   7619 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7620 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7621 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7622 				      sc->sc_fcrtl);
   7623 			sc->sc_tbi_linkup = 1;
   7624 		} else {
   7625 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7626 			    device_xname(sc->sc_dev)));
   7627 			sc->sc_tbi_linkup = 0;
   7628 		}
   7629 		/* Update LED */
   7630 		wm_tbi_serdes_set_linkled(sc);
   7631 	} else if (icr & ICR_RXSEQ) {
   7632 		DPRINTF(WM_DEBUG_LINK,
   7633 		    ("%s: LINK: Receive sequence error\n",
   7634 		    device_xname(sc->sc_dev)));
   7635 	}
   7636 }
   7637 
   7638 /*
   7639  * wm_linkintr_serdes:
   7640  *
   7641  *	Helper; handle link interrupts for TBI mode.
   7642  */
   7643 static void
   7644 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7645 {
   7646 	struct mii_data *mii = &sc->sc_mii;
   7647 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7648 	uint32_t pcs_adv, pcs_lpab, reg;
   7649 
   7650 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7651 		__func__));
   7652 
   7653 	if (icr & ICR_LSC) {
   7654 		/* Check PCS */
   7655 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7656 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7657 			mii->mii_media_status |= IFM_ACTIVE;
   7658 			sc->sc_tbi_linkup = 1;
   7659 		} else {
   7660 			mii->mii_media_status |= IFM_NONE;
   7661 			sc->sc_tbi_linkup = 0;
   7662 			wm_tbi_serdes_set_linkled(sc);
   7663 			return;
   7664 		}
   7665 		mii->mii_media_active |= IFM_1000_SX;
   7666 		if ((reg & PCS_LSTS_FDX) != 0)
   7667 			mii->mii_media_active |= IFM_FDX;
   7668 		else
   7669 			mii->mii_media_active |= IFM_HDX;
   7670 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7671 			/* Check flow */
   7672 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7673 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7674 				DPRINTF(WM_DEBUG_LINK,
   7675 				    ("XXX LINKOK but not ACOMP\n"));
   7676 				return;
   7677 			}
   7678 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7679 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7680 			DPRINTF(WM_DEBUG_LINK,
   7681 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7682 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7683 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7684 				mii->mii_media_active |= IFM_FLOW
   7685 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7686 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7687 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7688 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7689 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7690 				mii->mii_media_active |= IFM_FLOW
   7691 				    | IFM_ETH_TXPAUSE;
   7692 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7693 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7694 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7695 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7696 				mii->mii_media_active |= IFM_FLOW
   7697 				    | IFM_ETH_RXPAUSE;
   7698 		}
   7699 		/* Update LED */
   7700 		wm_tbi_serdes_set_linkled(sc);
   7701 	} else {
   7702 		DPRINTF(WM_DEBUG_LINK,
   7703 		    ("%s: LINK: Receive sequence error\n",
   7704 		    device_xname(sc->sc_dev)));
   7705 	}
   7706 }
   7707 
   7708 /*
   7709  * wm_linkintr:
   7710  *
   7711  *	Helper; handle link interrupts.
   7712  */
   7713 static void
   7714 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7715 {
   7716 
   7717 	KASSERT(WM_CORE_LOCKED(sc));
   7718 
   7719 	if (sc->sc_flags & WM_F_HAS_MII)
   7720 		wm_linkintr_gmii(sc, icr);
   7721 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7722 	    && (sc->sc_type >= WM_T_82575))
   7723 		wm_linkintr_serdes(sc, icr);
   7724 	else
   7725 		wm_linkintr_tbi(sc, icr);
   7726 }
   7727 
   7728 /*
   7729  * wm_intr_legacy:
   7730  *
   7731  *	Interrupt service routine for INTx and MSI.
   7732  */
   7733 static int
   7734 wm_intr_legacy(void *arg)
   7735 {
   7736 	struct wm_softc *sc = arg;
   7737 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7738 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
   7739 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7740 	uint32_t icr, rndval = 0;
   7741 	int handled = 0;
   7742 
   7743 	DPRINTF(WM_DEBUG_TX,
   7744 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7745 	while (1 /* CONSTCOND */) {
   7746 		icr = CSR_READ(sc, WMREG_ICR);
   7747 		if ((icr & sc->sc_icr) == 0)
   7748 			break;
   7749 		if (rndval == 0)
   7750 			rndval = icr;
   7751 
   7752 		mutex_enter(rxq->rxq_lock);
   7753 
   7754 		if (rxq->rxq_stopping) {
   7755 			mutex_exit(rxq->rxq_lock);
   7756 			break;
   7757 		}
   7758 
   7759 		handled = 1;
   7760 
   7761 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7762 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   7763 			DPRINTF(WM_DEBUG_RX,
   7764 			    ("%s: RX: got Rx intr 0x%08x\n",
   7765 			    device_xname(sc->sc_dev),
   7766 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   7767 			WM_Q_EVCNT_INCR(rxq, rxintr);
   7768 		}
   7769 #endif
   7770 		wm_rxeof(rxq);
   7771 
   7772 		mutex_exit(rxq->rxq_lock);
   7773 		mutex_enter(txq->txq_lock);
   7774 
   7775 		if (txq->txq_stopping) {
   7776 			mutex_exit(txq->txq_lock);
   7777 			break;
   7778 		}
   7779 
   7780 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7781 		if (icr & ICR_TXDW) {
   7782 			DPRINTF(WM_DEBUG_TX,
   7783 			    ("%s: TX: got TXDW interrupt\n",
   7784 			    device_xname(sc->sc_dev)));
   7785 			WM_Q_EVCNT_INCR(txq, txdw);
   7786 		}
   7787 #endif
   7788 		wm_txeof(sc, txq);
   7789 
   7790 		mutex_exit(txq->txq_lock);
   7791 		WM_CORE_LOCK(sc);
   7792 
   7793 		if (sc->sc_core_stopping) {
   7794 			WM_CORE_UNLOCK(sc);
   7795 			break;
   7796 		}
   7797 
   7798 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   7799 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7800 			wm_linkintr(sc, icr);
   7801 		}
   7802 
   7803 		WM_CORE_UNLOCK(sc);
   7804 
   7805 		if (icr & ICR_RXO) {
   7806 #if defined(WM_DEBUG)
   7807 			log(LOG_WARNING, "%s: Receive overrun\n",
   7808 			    device_xname(sc->sc_dev));
   7809 #endif /* defined(WM_DEBUG) */
   7810 		}
   7811 	}
   7812 
   7813 	rnd_add_uint32(&sc->rnd_source, rndval);
   7814 
   7815 	if (handled) {
   7816 		/* Try to get more packets going. */
   7817 		ifp->if_start(ifp);
   7818 	}
   7819 
   7820 	return handled;
   7821 }
   7822 
   7823 static int
   7824 wm_txrxintr_msix(void *arg)
   7825 {
   7826 	struct wm_queue *wmq = arg;
   7827 	struct wm_txqueue *txq = &wmq->wmq_txq;
   7828 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7829 	struct wm_softc *sc = txq->txq_sc;
   7830 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7831 
   7832 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   7833 
   7834 	DPRINTF(WM_DEBUG_TX,
   7835 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7836 
   7837 	if (sc->sc_type == WM_T_82574)
   7838 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7839 	else if (sc->sc_type == WM_T_82575)
   7840 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7841 	else
   7842 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   7843 
   7844 	mutex_enter(txq->txq_lock);
   7845 
   7846 	if (txq->txq_stopping) {
   7847 		mutex_exit(txq->txq_lock);
   7848 		return 0;
   7849 	}
   7850 
   7851 	WM_Q_EVCNT_INCR(txq, txdw);
   7852 	wm_txeof(sc, txq);
   7853 
   7854 	/* Try to get more packets going. */
   7855 	if (pcq_peek(txq->txq_interq) != NULL)
   7856 		wm_nq_transmit_locked(ifp, txq);
   7857 	/*
   7858 	 * There are still some upper layer processing which call
   7859 	 * ifp->if_start(). e.g. ALTQ
   7860 	 */
   7861 	if (wmq->wmq_id == 0) {
   7862 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
   7863 			wm_nq_start_locked(ifp);
   7864 	}
   7865 
   7866 	mutex_exit(txq->txq_lock);
   7867 
   7868 	DPRINTF(WM_DEBUG_RX,
   7869 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7870 	mutex_enter(rxq->rxq_lock);
   7871 
   7872 	if (rxq->rxq_stopping) {
   7873 		mutex_exit(rxq->rxq_lock);
   7874 		return 0;
   7875 	}
   7876 
   7877 	WM_Q_EVCNT_INCR(rxq, rxintr);
   7878 	wm_rxeof(rxq);
   7879 	mutex_exit(rxq->rxq_lock);
   7880 
   7881 	if (sc->sc_type == WM_T_82574)
   7882 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7883 	else if (sc->sc_type == WM_T_82575)
   7884 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7885 	else
   7886 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   7887 
   7888 	return 1;
   7889 }
   7890 
   7891 /*
   7892  * wm_linkintr_msix:
   7893  *
   7894  *	Interrupt service routine for link status change for MSI-X.
   7895  */
   7896 static int
   7897 wm_linkintr_msix(void *arg)
   7898 {
   7899 	struct wm_softc *sc = arg;
   7900 	uint32_t reg;
   7901 
   7902 	DPRINTF(WM_DEBUG_LINK,
   7903 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   7904 
   7905 	reg = CSR_READ(sc, WMREG_ICR);
   7906 	WM_CORE_LOCK(sc);
   7907 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   7908 		goto out;
   7909 
   7910 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7911 	wm_linkintr(sc, ICR_LSC);
   7912 
   7913 out:
   7914 	WM_CORE_UNLOCK(sc);
   7915 
   7916 	if (sc->sc_type == WM_T_82574)
   7917 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   7918 	else if (sc->sc_type == WM_T_82575)
   7919 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   7920 	else
   7921 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   7922 
   7923 	return 1;
   7924 }
   7925 
   7926 /*
   7927  * Media related.
   7928  * GMII, SGMII, TBI (and SERDES)
   7929  */
   7930 
   7931 /* Common */
   7932 
   7933 /*
   7934  * wm_tbi_serdes_set_linkled:
   7935  *
   7936  *	Update the link LED on TBI and SERDES devices.
   7937  */
   7938 static void
   7939 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   7940 {
   7941 
   7942 	if (sc->sc_tbi_linkup)
   7943 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7944 	else
   7945 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7946 
   7947 	/* 82540 or newer devices are active low */
   7948 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7949 
   7950 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7951 }
   7952 
   7953 /* GMII related */
   7954 
   7955 /*
   7956  * wm_gmii_reset:
   7957  *
   7958  *	Reset the PHY.
   7959  */
   7960 static void
   7961 wm_gmii_reset(struct wm_softc *sc)
   7962 {
   7963 	uint32_t reg;
   7964 	int rv;
   7965 
   7966 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7967 		device_xname(sc->sc_dev), __func__));
   7968 
   7969 	rv = sc->phy.acquire(sc);
   7970 	if (rv != 0) {
   7971 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7972 		    __func__);
   7973 		return;
   7974 	}
   7975 
   7976 	switch (sc->sc_type) {
   7977 	case WM_T_82542_2_0:
   7978 	case WM_T_82542_2_1:
   7979 		/* null */
   7980 		break;
   7981 	case WM_T_82543:
   7982 		/*
   7983 		 * With 82543, we need to force speed and duplex on the MAC
   7984 		 * equal to what the PHY speed and duplex configuration is.
   7985 		 * In addition, we need to perform a hardware reset on the PHY
   7986 		 * to take it out of reset.
   7987 		 */
   7988 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7989 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7990 
   7991 		/* The PHY reset pin is active-low. */
   7992 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7993 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   7994 		    CTRL_EXT_SWDPIN(4));
   7995 		reg |= CTRL_EXT_SWDPIO(4);
   7996 
   7997 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7998 		CSR_WRITE_FLUSH(sc);
   7999 		delay(10*1000);
   8000 
   8001 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8002 		CSR_WRITE_FLUSH(sc);
   8003 		delay(150);
   8004 #if 0
   8005 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8006 #endif
   8007 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8008 		break;
   8009 	case WM_T_82544:	/* reset 10000us */
   8010 	case WM_T_82540:
   8011 	case WM_T_82545:
   8012 	case WM_T_82545_3:
   8013 	case WM_T_82546:
   8014 	case WM_T_82546_3:
   8015 	case WM_T_82541:
   8016 	case WM_T_82541_2:
   8017 	case WM_T_82547:
   8018 	case WM_T_82547_2:
   8019 	case WM_T_82571:	/* reset 100us */
   8020 	case WM_T_82572:
   8021 	case WM_T_82573:
   8022 	case WM_T_82574:
   8023 	case WM_T_82575:
   8024 	case WM_T_82576:
   8025 	case WM_T_82580:
   8026 	case WM_T_I350:
   8027 	case WM_T_I354:
   8028 	case WM_T_I210:
   8029 	case WM_T_I211:
   8030 	case WM_T_82583:
   8031 	case WM_T_80003:
   8032 		/* generic reset */
   8033 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8034 		CSR_WRITE_FLUSH(sc);
   8035 		delay(20000);
   8036 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8037 		CSR_WRITE_FLUSH(sc);
   8038 		delay(20000);
   8039 
   8040 		if ((sc->sc_type == WM_T_82541)
   8041 		    || (sc->sc_type == WM_T_82541_2)
   8042 		    || (sc->sc_type == WM_T_82547)
   8043 		    || (sc->sc_type == WM_T_82547_2)) {
   8044 			/* workaround for igp are done in igp_reset() */
   8045 			/* XXX add code to set LED after phy reset */
   8046 		}
   8047 		break;
   8048 	case WM_T_ICH8:
   8049 	case WM_T_ICH9:
   8050 	case WM_T_ICH10:
   8051 	case WM_T_PCH:
   8052 	case WM_T_PCH2:
   8053 	case WM_T_PCH_LPT:
   8054 	case WM_T_PCH_SPT:
   8055 		/* generic reset */
   8056 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8057 		CSR_WRITE_FLUSH(sc);
   8058 		delay(100);
   8059 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8060 		CSR_WRITE_FLUSH(sc);
   8061 		delay(150);
   8062 		break;
   8063 	default:
   8064 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8065 		    __func__);
   8066 		break;
   8067 	}
   8068 
   8069 	sc->phy.release(sc);
   8070 
   8071 	/* get_cfg_done */
   8072 	wm_get_cfg_done(sc);
   8073 
   8074 	/* extra setup */
   8075 	switch (sc->sc_type) {
   8076 	case WM_T_82542_2_0:
   8077 	case WM_T_82542_2_1:
   8078 	case WM_T_82543:
   8079 	case WM_T_82544:
   8080 	case WM_T_82540:
   8081 	case WM_T_82545:
   8082 	case WM_T_82545_3:
   8083 	case WM_T_82546:
   8084 	case WM_T_82546_3:
   8085 	case WM_T_82541_2:
   8086 	case WM_T_82547_2:
   8087 	case WM_T_82571:
   8088 	case WM_T_82572:
   8089 	case WM_T_82573:
   8090 	case WM_T_82575:
   8091 	case WM_T_82576:
   8092 	case WM_T_82580:
   8093 	case WM_T_I350:
   8094 	case WM_T_I354:
   8095 	case WM_T_I210:
   8096 	case WM_T_I211:
   8097 	case WM_T_80003:
   8098 		/* null */
   8099 		break;
   8100 	case WM_T_82574:
   8101 	case WM_T_82583:
   8102 		wm_lplu_d0_disable(sc);
   8103 		break;
   8104 	case WM_T_82541:
   8105 	case WM_T_82547:
   8106 		/* XXX Configure actively LED after PHY reset */
   8107 		break;
   8108 	case WM_T_ICH8:
   8109 	case WM_T_ICH9:
   8110 	case WM_T_ICH10:
   8111 	case WM_T_PCH:
   8112 	case WM_T_PCH2:
   8113 	case WM_T_PCH_LPT:
   8114 	case WM_T_PCH_SPT:
   8115 		/* Allow time for h/w to get to a quiescent state afer reset */
   8116 		delay(10*1000);
   8117 
   8118 		if (sc->sc_type == WM_T_PCH)
   8119 			wm_hv_phy_workaround_ich8lan(sc);
   8120 
   8121 		if (sc->sc_type == WM_T_PCH2)
   8122 			wm_lv_phy_workaround_ich8lan(sc);
   8123 
   8124 		/* Clear the host wakeup bit after lcd reset */
   8125 		if (sc->sc_type >= WM_T_PCH) {
   8126 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8127 			    BM_PORT_GEN_CFG);
   8128 			reg &= ~BM_WUC_HOST_WU_BIT;
   8129 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8130 			    BM_PORT_GEN_CFG, reg);
   8131 		}
   8132 
   8133 		/*
   8134 		 * XXX Configure the LCD with th extended configuration region
   8135 		 * in NVM
   8136 		 */
   8137 
   8138 		/* Disable D0 LPLU. */
   8139 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8140 			wm_lplu_d0_disable_pch(sc);
   8141 		else
   8142 			wm_lplu_d0_disable(sc);	/* ICH* */
   8143 		break;
   8144 	default:
   8145 		panic("%s: unknown type\n", __func__);
   8146 		break;
   8147 	}
   8148 }
   8149 
   8150 /*
   8151  * wm_get_phy_id_82575:
   8152  *
   8153  * Return PHY ID. Return -1 if it failed.
   8154  */
   8155 static int
   8156 wm_get_phy_id_82575(struct wm_softc *sc)
   8157 {
   8158 	uint32_t reg;
   8159 	int phyid = -1;
   8160 
   8161 	/* XXX */
   8162 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8163 		return -1;
   8164 
   8165 	if (wm_sgmii_uses_mdio(sc)) {
   8166 		switch (sc->sc_type) {
   8167 		case WM_T_82575:
   8168 		case WM_T_82576:
   8169 			reg = CSR_READ(sc, WMREG_MDIC);
   8170 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8171 			break;
   8172 		case WM_T_82580:
   8173 		case WM_T_I350:
   8174 		case WM_T_I354:
   8175 		case WM_T_I210:
   8176 		case WM_T_I211:
   8177 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8178 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8179 			break;
   8180 		default:
   8181 			return -1;
   8182 		}
   8183 	}
   8184 
   8185 	return phyid;
   8186 }
   8187 
   8188 
   8189 /*
   8190  * wm_gmii_mediainit:
   8191  *
   8192  *	Initialize media for use on 1000BASE-T devices.
   8193  */
   8194 static void
   8195 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8196 {
   8197 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8198 	struct mii_data *mii = &sc->sc_mii;
   8199 	uint32_t reg;
   8200 
   8201 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8202 		device_xname(sc->sc_dev), __func__));
   8203 
   8204 	/* We have GMII. */
   8205 	sc->sc_flags |= WM_F_HAS_MII;
   8206 
   8207 	if (sc->sc_type == WM_T_80003)
   8208 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8209 	else
   8210 		sc->sc_tipg = TIPG_1000T_DFLT;
   8211 
   8212 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8213 	if ((sc->sc_type == WM_T_82580)
   8214 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8215 	    || (sc->sc_type == WM_T_I211)) {
   8216 		reg = CSR_READ(sc, WMREG_PHPM);
   8217 		reg &= ~PHPM_GO_LINK_D;
   8218 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8219 	}
   8220 
   8221 	/*
   8222 	 * Let the chip set speed/duplex on its own based on
   8223 	 * signals from the PHY.
   8224 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8225 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8226 	 */
   8227 	sc->sc_ctrl |= CTRL_SLU;
   8228 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8229 
   8230 	/* Initialize our media structures and probe the GMII. */
   8231 	mii->mii_ifp = ifp;
   8232 
   8233 	/*
   8234 	 * Determine the PHY access method.
   8235 	 *
   8236 	 *  For SGMII, use SGMII specific method.
   8237 	 *
   8238 	 *  For some devices, we can determine the PHY access method
   8239 	 * from sc_type.
   8240 	 *
   8241 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   8242 	 * access  method by sc_type, so use the PCI product ID for some
   8243 	 * devices.
   8244 	 * For other ICH8 variants, try to use igp's method. If the PHY
   8245 	 * can't detect, then use bm's method.
   8246 	 */
   8247 	switch (prodid) {
   8248 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   8249 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   8250 		/* 82577 */
   8251 		sc->sc_phytype = WMPHY_82577;
   8252 		break;
   8253 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   8254 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   8255 		/* 82578 */
   8256 		sc->sc_phytype = WMPHY_82578;
   8257 		break;
   8258 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8259 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8260 		/* 82579 */
   8261 		sc->sc_phytype = WMPHY_82579;
   8262 		break;
   8263 	case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8264 	case PCI_PRODUCT_INTEL_82801I_BM:
   8265 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8266 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8267 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8268 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8269 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8270 		/* ICH8, 9, 10 with 82567 */
   8271 		sc->sc_phytype = WMPHY_BM;
   8272 		mii->mii_readreg = wm_gmii_bm_readreg;
   8273 		mii->mii_writereg = wm_gmii_bm_writereg;
   8274 		break;
   8275 	default:
   8276 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   8277 		    && !wm_sgmii_uses_mdio(sc)){
   8278 			/* SGMII */
   8279 			mii->mii_readreg = wm_sgmii_readreg;
   8280 			mii->mii_writereg = wm_sgmii_writereg;
   8281 		} else if ((sc->sc_type == WM_T_82574)
   8282 		    || (sc->sc_type == WM_T_82583)) {
   8283 			/* BM2 (phyaddr == 1) */
   8284 			sc->sc_phytype = WMPHY_BM;
   8285 			mii->mii_readreg = wm_gmii_bm_readreg;
   8286 			mii->mii_writereg = wm_gmii_bm_writereg;
   8287 		} else if (sc->sc_type >= WM_T_ICH8) {
   8288 			/* non-82567 ICH8, 9 and 10 */
   8289 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8290 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8291 		} else if (sc->sc_type >= WM_T_80003) {
   8292 			/* 80003 */
   8293 			sc->sc_phytype = WMPHY_GG82563;
   8294 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8295 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8296 		} else if (sc->sc_type >= WM_T_I210) {
   8297 			/* I210 and I211 */
   8298 			sc->sc_phytype = WMPHY_210;
   8299 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8300 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8301 		} else if (sc->sc_type >= WM_T_82580) {
   8302 			/* 82580, I350 and I354 */
   8303 			sc->sc_phytype = WMPHY_82580;
   8304 			mii->mii_readreg = wm_gmii_82580_readreg;
   8305 			mii->mii_writereg = wm_gmii_82580_writereg;
   8306 		} else if (sc->sc_type >= WM_T_82544) {
   8307 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8308 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8309 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8310 		} else {
   8311 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8312 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8313 		}
   8314 		break;
   8315 	}
   8316 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8317 		/* All PCH* use _hv_ */
   8318 		mii->mii_readreg = wm_gmii_hv_readreg;
   8319 		mii->mii_writereg = wm_gmii_hv_writereg;
   8320 	}
   8321 	mii->mii_statchg = wm_gmii_statchg;
   8322 
   8323 	wm_gmii_reset(sc);
   8324 
   8325 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8326 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8327 	    wm_gmii_mediastatus);
   8328 
   8329 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8330 	    || (sc->sc_type == WM_T_82580)
   8331 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8332 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8333 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8334 			/* Attach only one port */
   8335 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8336 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8337 		} else {
   8338 			int i, id;
   8339 			uint32_t ctrl_ext;
   8340 
   8341 			id = wm_get_phy_id_82575(sc);
   8342 			if (id != -1) {
   8343 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8344 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8345 			}
   8346 			if ((id == -1)
   8347 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8348 				/* Power on sgmii phy if it is disabled */
   8349 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8350 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8351 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8352 				CSR_WRITE_FLUSH(sc);
   8353 				delay(300*1000); /* XXX too long */
   8354 
   8355 				/* from 1 to 8 */
   8356 				for (i = 1; i < 8; i++)
   8357 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8358 					    0xffffffff, i, MII_OFFSET_ANY,
   8359 					    MIIF_DOPAUSE);
   8360 
   8361 				/* restore previous sfp cage power state */
   8362 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8363 			}
   8364 		}
   8365 	} else {
   8366 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8367 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8368 	}
   8369 
   8370 	/*
   8371 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8372 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8373 	 */
   8374 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8375 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8376 		wm_set_mdio_slow_mode_hv(sc);
   8377 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8378 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8379 	}
   8380 
   8381 	/*
   8382 	 * (For ICH8 variants)
   8383 	 * If PHY detection failed, use BM's r/w function and retry.
   8384 	 */
   8385 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8386 		/* if failed, retry with *_bm_* */
   8387 		mii->mii_readreg = wm_gmii_bm_readreg;
   8388 		mii->mii_writereg = wm_gmii_bm_writereg;
   8389 
   8390 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8391 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8392 	}
   8393 
   8394 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8395 		/* Any PHY wasn't find */
   8396 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8397 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8398 		sc->sc_phytype = WMPHY_NONE;
   8399 	} else {
   8400 		/*
   8401 		 * PHY Found!
   8402 		 * Check PHY type.
   8403 		 */
   8404 		uint32_t model;
   8405 		struct mii_softc *child;
   8406 
   8407 		child = LIST_FIRST(&mii->mii_phys);
   8408 		model = child->mii_mpd_model;
   8409 		if (model == MII_MODEL_yyINTEL_I82566)
   8410 			sc->sc_phytype = WMPHY_IGP_3;
   8411 
   8412 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8413 	}
   8414 }
   8415 
   8416 /*
   8417  * wm_gmii_mediachange:	[ifmedia interface function]
   8418  *
   8419  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8420  */
   8421 static int
   8422 wm_gmii_mediachange(struct ifnet *ifp)
   8423 {
   8424 	struct wm_softc *sc = ifp->if_softc;
   8425 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8426 	int rc;
   8427 
   8428 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8429 		device_xname(sc->sc_dev), __func__));
   8430 	if ((ifp->if_flags & IFF_UP) == 0)
   8431 		return 0;
   8432 
   8433 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8434 	sc->sc_ctrl |= CTRL_SLU;
   8435 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8436 	    || (sc->sc_type > WM_T_82543)) {
   8437 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8438 	} else {
   8439 		sc->sc_ctrl &= ~CTRL_ASDE;
   8440 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8441 		if (ife->ifm_media & IFM_FDX)
   8442 			sc->sc_ctrl |= CTRL_FD;
   8443 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8444 		case IFM_10_T:
   8445 			sc->sc_ctrl |= CTRL_SPEED_10;
   8446 			break;
   8447 		case IFM_100_TX:
   8448 			sc->sc_ctrl |= CTRL_SPEED_100;
   8449 			break;
   8450 		case IFM_1000_T:
   8451 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8452 			break;
   8453 		default:
   8454 			panic("wm_gmii_mediachange: bad media 0x%x",
   8455 			    ife->ifm_media);
   8456 		}
   8457 	}
   8458 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8459 	if (sc->sc_type <= WM_T_82543)
   8460 		wm_gmii_reset(sc);
   8461 
   8462 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8463 		return 0;
   8464 	return rc;
   8465 }
   8466 
   8467 /*
   8468  * wm_gmii_mediastatus:	[ifmedia interface function]
   8469  *
   8470  *	Get the current interface media status on a 1000BASE-T device.
   8471  */
   8472 static void
   8473 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8474 {
   8475 	struct wm_softc *sc = ifp->if_softc;
   8476 
   8477 	ether_mediastatus(ifp, ifmr);
   8478 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8479 	    | sc->sc_flowflags;
   8480 }
   8481 
   8482 #define	MDI_IO		CTRL_SWDPIN(2)
   8483 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8484 #define	MDI_CLK		CTRL_SWDPIN(3)
   8485 
   8486 static void
   8487 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8488 {
   8489 	uint32_t i, v;
   8490 
   8491 	v = CSR_READ(sc, WMREG_CTRL);
   8492 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8493 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8494 
   8495 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8496 		if (data & i)
   8497 			v |= MDI_IO;
   8498 		else
   8499 			v &= ~MDI_IO;
   8500 		CSR_WRITE(sc, WMREG_CTRL, v);
   8501 		CSR_WRITE_FLUSH(sc);
   8502 		delay(10);
   8503 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8504 		CSR_WRITE_FLUSH(sc);
   8505 		delay(10);
   8506 		CSR_WRITE(sc, WMREG_CTRL, v);
   8507 		CSR_WRITE_FLUSH(sc);
   8508 		delay(10);
   8509 	}
   8510 }
   8511 
   8512 static uint32_t
   8513 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8514 {
   8515 	uint32_t v, i, data = 0;
   8516 
   8517 	v = CSR_READ(sc, WMREG_CTRL);
   8518 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8519 	v |= CTRL_SWDPIO(3);
   8520 
   8521 	CSR_WRITE(sc, WMREG_CTRL, v);
   8522 	CSR_WRITE_FLUSH(sc);
   8523 	delay(10);
   8524 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8525 	CSR_WRITE_FLUSH(sc);
   8526 	delay(10);
   8527 	CSR_WRITE(sc, WMREG_CTRL, v);
   8528 	CSR_WRITE_FLUSH(sc);
   8529 	delay(10);
   8530 
   8531 	for (i = 0; i < 16; i++) {
   8532 		data <<= 1;
   8533 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8534 		CSR_WRITE_FLUSH(sc);
   8535 		delay(10);
   8536 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8537 			data |= 1;
   8538 		CSR_WRITE(sc, WMREG_CTRL, v);
   8539 		CSR_WRITE_FLUSH(sc);
   8540 		delay(10);
   8541 	}
   8542 
   8543 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8544 	CSR_WRITE_FLUSH(sc);
   8545 	delay(10);
   8546 	CSR_WRITE(sc, WMREG_CTRL, v);
   8547 	CSR_WRITE_FLUSH(sc);
   8548 	delay(10);
   8549 
   8550 	return data;
   8551 }
   8552 
   8553 #undef MDI_IO
   8554 #undef MDI_DIR
   8555 #undef MDI_CLK
   8556 
   8557 /*
   8558  * wm_gmii_i82543_readreg:	[mii interface function]
   8559  *
   8560  *	Read a PHY register on the GMII (i82543 version).
   8561  */
   8562 static int
   8563 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8564 {
   8565 	struct wm_softc *sc = device_private(self);
   8566 	int rv;
   8567 
   8568 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8569 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8570 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8571 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8572 
   8573 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8574 	    device_xname(sc->sc_dev), phy, reg, rv));
   8575 
   8576 	return rv;
   8577 }
   8578 
   8579 /*
   8580  * wm_gmii_i82543_writereg:	[mii interface function]
   8581  *
   8582  *	Write a PHY register on the GMII (i82543 version).
   8583  */
   8584 static void
   8585 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8586 {
   8587 	struct wm_softc *sc = device_private(self);
   8588 
   8589 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8590 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8591 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8592 	    (MII_COMMAND_START << 30), 32);
   8593 }
   8594 
   8595 /*
   8596  * wm_gmii_mdic_readreg:	[mii interface function]
   8597  *
   8598  *	Read a PHY register on the GMII.
   8599  */
   8600 static int
   8601 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   8602 {
   8603 	struct wm_softc *sc = device_private(self);
   8604 	uint32_t mdic = 0;
   8605 	int i, rv;
   8606 
   8607 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8608 	    MDIC_REGADD(reg));
   8609 
   8610 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8611 		mdic = CSR_READ(sc, WMREG_MDIC);
   8612 		if (mdic & MDIC_READY)
   8613 			break;
   8614 		delay(50);
   8615 	}
   8616 
   8617 	if ((mdic & MDIC_READY) == 0) {
   8618 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8619 		    device_xname(sc->sc_dev), phy, reg);
   8620 		rv = 0;
   8621 	} else if (mdic & MDIC_E) {
   8622 #if 0 /* This is normal if no PHY is present. */
   8623 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8624 		    device_xname(sc->sc_dev), phy, reg);
   8625 #endif
   8626 		rv = 0;
   8627 	} else {
   8628 		rv = MDIC_DATA(mdic);
   8629 		if (rv == 0xffff)
   8630 			rv = 0;
   8631 	}
   8632 
   8633 	return rv;
   8634 }
   8635 
   8636 /*
   8637  * wm_gmii_mdic_writereg:	[mii interface function]
   8638  *
   8639  *	Write a PHY register on the GMII.
   8640  */
   8641 static void
   8642 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   8643 {
   8644 	struct wm_softc *sc = device_private(self);
   8645 	uint32_t mdic = 0;
   8646 	int i;
   8647 
   8648 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8649 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8650 
   8651 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8652 		mdic = CSR_READ(sc, WMREG_MDIC);
   8653 		if (mdic & MDIC_READY)
   8654 			break;
   8655 		delay(50);
   8656 	}
   8657 
   8658 	if ((mdic & MDIC_READY) == 0)
   8659 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8660 		    device_xname(sc->sc_dev), phy, reg);
   8661 	else if (mdic & MDIC_E)
   8662 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8663 		    device_xname(sc->sc_dev), phy, reg);
   8664 }
   8665 
   8666 /*
   8667  * wm_gmii_i82544_readreg:	[mii interface function]
   8668  *
   8669  *	Read a PHY register on the GMII.
   8670  */
   8671 static int
   8672 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8673 {
   8674 	struct wm_softc *sc = device_private(self);
   8675 	int rv;
   8676 
   8677 	if (sc->phy.acquire(sc)) {
   8678 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8679 		    __func__);
   8680 		return 0;
   8681 	}
   8682 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   8683 	sc->phy.release(sc);
   8684 
   8685 	return rv;
   8686 }
   8687 
   8688 /*
   8689  * wm_gmii_i82544_writereg:	[mii interface function]
   8690  *
   8691  *	Write a PHY register on the GMII.
   8692  */
   8693 static void
   8694 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8695 {
   8696 	struct wm_softc *sc = device_private(self);
   8697 
   8698 	if (sc->phy.acquire(sc)) {
   8699 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8700 		    __func__);
   8701 	}
   8702 	wm_gmii_mdic_writereg(self, phy, reg, val);
   8703 	sc->phy.release(sc);
   8704 }
   8705 
   8706 /*
   8707  * wm_gmii_i80003_readreg:	[mii interface function]
   8708  *
   8709  *	Read a PHY register on the kumeran
   8710  * This could be handled by the PHY layer if we didn't have to lock the
   8711  * ressource ...
   8712  */
   8713 static int
   8714 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8715 {
   8716 	struct wm_softc *sc = device_private(self);
   8717 	int rv;
   8718 
   8719 	if (phy != 1) /* only one PHY on kumeran bus */
   8720 		return 0;
   8721 
   8722 	if (sc->phy.acquire(sc)) {
   8723 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8724 		    __func__);
   8725 		return 0;
   8726 	}
   8727 
   8728 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   8729 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8730 		    reg >> GG82563_PAGE_SHIFT);
   8731 	} else {
   8732 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8733 		    reg >> GG82563_PAGE_SHIFT);
   8734 	}
   8735 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8736 	delay(200);
   8737 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   8738 	delay(200);
   8739 	sc->phy.release(sc);
   8740 
   8741 	return rv;
   8742 }
   8743 
   8744 /*
   8745  * wm_gmii_i80003_writereg:	[mii interface function]
   8746  *
   8747  *	Write a PHY register on the kumeran.
   8748  * This could be handled by the PHY layer if we didn't have to lock the
   8749  * ressource ...
   8750  */
   8751 static void
   8752 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8753 {
   8754 	struct wm_softc *sc = device_private(self);
   8755 
   8756 	if (phy != 1) /* only one PHY on kumeran bus */
   8757 		return;
   8758 
   8759 	if (sc->phy.acquire(sc)) {
   8760 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8761 		    __func__);
   8762 		return;
   8763 	}
   8764 
   8765 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   8766 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8767 		    reg >> GG82563_PAGE_SHIFT);
   8768 	} else {
   8769 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8770 		    reg >> GG82563_PAGE_SHIFT);
   8771 	}
   8772 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8773 	delay(200);
   8774 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   8775 	delay(200);
   8776 
   8777 	sc->phy.release(sc);
   8778 }
   8779 
   8780 /*
   8781  * wm_gmii_bm_readreg:	[mii interface function]
   8782  *
   8783  *	Read a PHY register on the kumeran
   8784  * This could be handled by the PHY layer if we didn't have to lock the
   8785  * ressource ...
   8786  */
   8787 static int
   8788 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8789 {
   8790 	struct wm_softc *sc = device_private(self);
   8791 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   8792 	uint16_t val;
   8793 	int rv;
   8794 
   8795 	if (sc->phy.acquire(sc)) {
   8796 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8797 		    __func__);
   8798 		return 0;
   8799 	}
   8800 
   8801 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   8802 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   8803 		    || (reg == 31)) ? 1 : phy;
   8804 	/* Page 800 works differently than the rest so it has its own func */
   8805 	if (page == BM_WUC_PAGE) {
   8806 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8807 		rv = val;
   8808 		goto release;
   8809 	}
   8810 
   8811 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8812 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   8813 		    && (sc->sc_type != WM_T_82583))
   8814 			wm_gmii_mdic_writereg(self, phy,
   8815 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   8816 		else
   8817 			wm_gmii_mdic_writereg(self, phy,
   8818 			    BME1000_PHY_PAGE_SELECT, page);
   8819 	}
   8820 
   8821 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   8822 
   8823 release:
   8824 	sc->phy.release(sc);
   8825 	return rv;
   8826 }
   8827 
   8828 /*
   8829  * wm_gmii_bm_writereg:	[mii interface function]
   8830  *
   8831  *	Write a PHY register on the kumeran.
   8832  * This could be handled by the PHY layer if we didn't have to lock the
   8833  * ressource ...
   8834  */
   8835 static void
   8836 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8837 {
   8838 	struct wm_softc *sc = device_private(self);
   8839 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   8840 
   8841 	if (sc->phy.acquire(sc)) {
   8842 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8843 		    __func__);
   8844 		return;
   8845 	}
   8846 
   8847 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   8848 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   8849 		    || (reg == 31)) ? 1 : phy;
   8850 	/* Page 800 works differently than the rest so it has its own func */
   8851 	if (page == BM_WUC_PAGE) {
   8852 		uint16_t tmp;
   8853 
   8854 		tmp = val;
   8855 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8856 		goto release;
   8857 	}
   8858 
   8859 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8860 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   8861 		    && (sc->sc_type != WM_T_82583))
   8862 			wm_gmii_mdic_writereg(self, phy,
   8863 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   8864 		else
   8865 			wm_gmii_mdic_writereg(self, phy,
   8866 			    BME1000_PHY_PAGE_SELECT, page);
   8867 	}
   8868 
   8869 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   8870 
   8871 release:
   8872 	sc->phy.release(sc);
   8873 }
   8874 
   8875 static void
   8876 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8877 {
   8878 	struct wm_softc *sc = device_private(self);
   8879 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8880 	uint16_t wuce, reg;
   8881 
   8882 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8883 		device_xname(sc->sc_dev), __func__));
   8884 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8885 	if (sc->sc_type == WM_T_PCH) {
   8886 		/* XXX e1000 driver do nothing... why? */
   8887 	}
   8888 
   8889 	/*
   8890 	 * 1) Enable PHY wakeup register first.
   8891 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   8892 	 */
   8893 
   8894 	/* Set page 769 */
   8895 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8896 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8897 
   8898 	/* Read WUCE and save it */
   8899 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   8900 
   8901 	reg = wuce | BM_WUC_ENABLE_BIT;
   8902 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   8903 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   8904 
   8905 	/* Select page 800 */
   8906 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8907 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   8908 
   8909 	/*
   8910 	 * 2) Access PHY wakeup register.
   8911 	 * See e1000_access_phy_wakeup_reg_bm.
   8912 	 */
   8913 
   8914 	/* Write page 800 */
   8915 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   8916 
   8917 	if (rd)
   8918 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   8919 	else
   8920 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   8921 
   8922 	/*
   8923 	 * 3) Disable PHY wakeup register.
   8924 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   8925 	 */
   8926 	/* Set page 769 */
   8927 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8928 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8929 
   8930 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   8931 }
   8932 
   8933 /*
   8934  * wm_gmii_hv_readreg:	[mii interface function]
   8935  *
   8936  *	Read a PHY register on the kumeran
   8937  * This could be handled by the PHY layer if we didn't have to lock the
   8938  * ressource ...
   8939  */
   8940 static int
   8941 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   8942 {
   8943 	struct wm_softc *sc = device_private(self);
   8944 	int rv;
   8945 
   8946 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8947 		device_xname(sc->sc_dev), __func__));
   8948 	if (sc->phy.acquire(sc)) {
   8949 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8950 		    __func__);
   8951 		return 0;
   8952 	}
   8953 
   8954 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   8955 	sc->phy.release(sc);
   8956 	return rv;
   8957 }
   8958 
   8959 static int
   8960 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   8961 {
   8962 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8963 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8964 	uint16_t val;
   8965 	int rv;
   8966 
   8967 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   8968 
   8969 	/* Page 800 works differently than the rest so it has its own func */
   8970 	if (page == BM_WUC_PAGE) {
   8971 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8972 		return val;
   8973 	}
   8974 
   8975 	/*
   8976 	 * Lower than page 768 works differently than the rest so it has its
   8977 	 * own func
   8978 	 */
   8979 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8980 		printf("gmii_hv_readreg!!!\n");
   8981 		return 0;
   8982 	}
   8983 
   8984 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8985 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8986 		    page << BME1000_PAGE_SHIFT);
   8987 	}
   8988 
   8989 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   8990 	return rv;
   8991 }
   8992 
   8993 /*
   8994  * wm_gmii_hv_writereg:	[mii interface function]
   8995  *
   8996  *	Write a PHY register on the kumeran.
   8997  * This could be handled by the PHY layer if we didn't have to lock the
   8998  * ressource ...
   8999  */
   9000 static void
   9001 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9002 {
   9003 	struct wm_softc *sc = device_private(self);
   9004 
   9005 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9006 		device_xname(sc->sc_dev), __func__));
   9007 
   9008 	if (sc->phy.acquire(sc)) {
   9009 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9010 		    __func__);
   9011 		return;
   9012 	}
   9013 
   9014 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   9015 	sc->phy.release(sc);
   9016 }
   9017 
   9018 static void
   9019 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   9020 {
   9021 	struct wm_softc *sc = device_private(self);
   9022 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9023 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9024 
   9025 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9026 
   9027 	/* Page 800 works differently than the rest so it has its own func */
   9028 	if (page == BM_WUC_PAGE) {
   9029 		uint16_t tmp;
   9030 
   9031 		tmp = val;
   9032 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9033 		return;
   9034 	}
   9035 
   9036 	/*
   9037 	 * Lower than page 768 works differently than the rest so it has its
   9038 	 * own func
   9039 	 */
   9040 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9041 		printf("gmii_hv_writereg!!!\n");
   9042 		return;
   9043 	}
   9044 
   9045 	{
   9046 		/*
   9047 		 * XXX Workaround MDIO accesses being disabled after entering
   9048 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9049 		 * register is set)
   9050 		 */
   9051 		if (sc->sc_phytype == WMPHY_82578) {
   9052 			struct mii_softc *child;
   9053 
   9054 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9055 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   9056 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   9057 			    && ((val & (1 << 11)) != 0)) {
   9058 				printf("XXX need workaround\n");
   9059 			}
   9060 		}
   9061 
   9062 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9063 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9064 			    page << BME1000_PAGE_SHIFT);
   9065 		}
   9066 	}
   9067 
   9068 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   9069 }
   9070 
   9071 /*
   9072  * wm_gmii_82580_readreg:	[mii interface function]
   9073  *
   9074  *	Read a PHY register on the 82580 and I350.
   9075  * This could be handled by the PHY layer if we didn't have to lock the
   9076  * ressource ...
   9077  */
   9078 static int
   9079 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   9080 {
   9081 	struct wm_softc *sc = device_private(self);
   9082 	int rv;
   9083 
   9084 	if (sc->phy.acquire(sc) != 0) {
   9085 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9086 		    __func__);
   9087 		return 0;
   9088 	}
   9089 
   9090 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9091 
   9092 	sc->phy.release(sc);
   9093 	return rv;
   9094 }
   9095 
   9096 /*
   9097  * wm_gmii_82580_writereg:	[mii interface function]
   9098  *
   9099  *	Write a PHY register on the 82580 and I350.
   9100  * This could be handled by the PHY layer if we didn't have to lock the
   9101  * ressource ...
   9102  */
   9103 static void
   9104 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   9105 {
   9106 	struct wm_softc *sc = device_private(self);
   9107 
   9108 	if (sc->phy.acquire(sc) != 0) {
   9109 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9110 		    __func__);
   9111 		return;
   9112 	}
   9113 
   9114 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9115 
   9116 	sc->phy.release(sc);
   9117 }
   9118 
   9119 /*
   9120  * wm_gmii_gs40g_readreg:	[mii interface function]
   9121  *
   9122  *	Read a PHY register on the I2100 and I211.
   9123  * This could be handled by the PHY layer if we didn't have to lock the
   9124  * ressource ...
   9125  */
   9126 static int
   9127 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   9128 {
   9129 	struct wm_softc *sc = device_private(self);
   9130 	int page, offset;
   9131 	int rv;
   9132 
   9133 	/* Acquire semaphore */
   9134 	if (sc->phy.acquire(sc)) {
   9135 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9136 		    __func__);
   9137 		return 0;
   9138 	}
   9139 
   9140 	/* Page select */
   9141 	page = reg >> GS40G_PAGE_SHIFT;
   9142 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9143 
   9144 	/* Read reg */
   9145 	offset = reg & GS40G_OFFSET_MASK;
   9146 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   9147 
   9148 	sc->phy.release(sc);
   9149 	return rv;
   9150 }
   9151 
   9152 /*
   9153  * wm_gmii_gs40g_writereg:	[mii interface function]
   9154  *
   9155  *	Write a PHY register on the I210 and I211.
   9156  * This could be handled by the PHY layer if we didn't have to lock the
   9157  * ressource ...
   9158  */
   9159 static void
   9160 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   9161 {
   9162 	struct wm_softc *sc = device_private(self);
   9163 	int page, offset;
   9164 
   9165 	/* Acquire semaphore */
   9166 	if (sc->phy.acquire(sc)) {
   9167 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9168 		    __func__);
   9169 		return;
   9170 	}
   9171 
   9172 	/* Page select */
   9173 	page = reg >> GS40G_PAGE_SHIFT;
   9174 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9175 
   9176 	/* Write reg */
   9177 	offset = reg & GS40G_OFFSET_MASK;
   9178 	wm_gmii_mdic_writereg(self, phy, offset, val);
   9179 
   9180 	/* Release semaphore */
   9181 	sc->phy.release(sc);
   9182 }
   9183 
   9184 /*
   9185  * wm_gmii_statchg:	[mii interface function]
   9186  *
   9187  *	Callback from MII layer when media changes.
   9188  */
   9189 static void
   9190 wm_gmii_statchg(struct ifnet *ifp)
   9191 {
   9192 	struct wm_softc *sc = ifp->if_softc;
   9193 	struct mii_data *mii = &sc->sc_mii;
   9194 
   9195 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   9196 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9197 	sc->sc_fcrtl &= ~FCRTL_XONE;
   9198 
   9199 	/*
   9200 	 * Get flow control negotiation result.
   9201 	 */
   9202 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   9203 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   9204 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   9205 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   9206 	}
   9207 
   9208 	if (sc->sc_flowflags & IFM_FLOW) {
   9209 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   9210 			sc->sc_ctrl |= CTRL_TFCE;
   9211 			sc->sc_fcrtl |= FCRTL_XONE;
   9212 		}
   9213 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   9214 			sc->sc_ctrl |= CTRL_RFCE;
   9215 	}
   9216 
   9217 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   9218 		DPRINTF(WM_DEBUG_LINK,
   9219 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   9220 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9221 	} else {
   9222 		DPRINTF(WM_DEBUG_LINK,
   9223 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   9224 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9225 	}
   9226 
   9227 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9228 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9229 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   9230 						 : WMREG_FCRTL, sc->sc_fcrtl);
   9231 	if (sc->sc_type == WM_T_80003) {
   9232 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   9233 		case IFM_1000_T:
   9234 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9235 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   9236 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9237 			break;
   9238 		default:
   9239 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9240 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   9241 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   9242 			break;
   9243 		}
   9244 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   9245 	}
   9246 }
   9247 
   9248 /*
   9249  * wm_kmrn_readreg:
   9250  *
   9251  *	Read a kumeran register
   9252  */
   9253 static int
   9254 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   9255 {
   9256 	int rv;
   9257 
   9258 	if (sc->sc_type == WM_T_80003)
   9259 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9260 	else
   9261 		rv = sc->phy.acquire(sc);
   9262 	if (rv != 0) {
   9263 		aprint_error_dev(sc->sc_dev,
   9264 		    "%s: failed to get semaphore\n", __func__);
   9265 		return 0;
   9266 	}
   9267 
   9268 	rv = wm_kmrn_readreg_locked(sc, reg);
   9269 
   9270 	if (sc->sc_type == WM_T_80003)
   9271 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9272 	else
   9273 		sc->phy.release(sc);
   9274 
   9275 	return rv;
   9276 }
   9277 
   9278 static int
   9279 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   9280 {
   9281 	int rv;
   9282 
   9283 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9284 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9285 	    KUMCTRLSTA_REN);
   9286 	CSR_WRITE_FLUSH(sc);
   9287 	delay(2);
   9288 
   9289 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9290 
   9291 	return rv;
   9292 }
   9293 
   9294 /*
   9295  * wm_kmrn_writereg:
   9296  *
   9297  *	Write a kumeran register
   9298  */
   9299 static void
   9300 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9301 {
   9302 	int rv;
   9303 
   9304 	if (sc->sc_type == WM_T_80003)
   9305 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9306 	else
   9307 		rv = sc->phy.acquire(sc);
   9308 	if (rv != 0) {
   9309 		aprint_error_dev(sc->sc_dev,
   9310 		    "%s: failed to get semaphore\n", __func__);
   9311 		return;
   9312 	}
   9313 
   9314 	wm_kmrn_writereg_locked(sc, reg, val);
   9315 
   9316 	if (sc->sc_type == WM_T_80003)
   9317 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9318 	else
   9319 		sc->phy.release(sc);
   9320 }
   9321 
   9322 static void
   9323 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   9324 {
   9325 
   9326 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9327 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9328 	    (val & KUMCTRLSTA_MASK));
   9329 }
   9330 
   9331 /* SGMII related */
   9332 
   9333 /*
   9334  * wm_sgmii_uses_mdio
   9335  *
   9336  * Check whether the transaction is to the internal PHY or the external
   9337  * MDIO interface. Return true if it's MDIO.
   9338  */
   9339 static bool
   9340 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9341 {
   9342 	uint32_t reg;
   9343 	bool ismdio = false;
   9344 
   9345 	switch (sc->sc_type) {
   9346 	case WM_T_82575:
   9347 	case WM_T_82576:
   9348 		reg = CSR_READ(sc, WMREG_MDIC);
   9349 		ismdio = ((reg & MDIC_DEST) != 0);
   9350 		break;
   9351 	case WM_T_82580:
   9352 	case WM_T_I350:
   9353 	case WM_T_I354:
   9354 	case WM_T_I210:
   9355 	case WM_T_I211:
   9356 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9357 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9358 		break;
   9359 	default:
   9360 		break;
   9361 	}
   9362 
   9363 	return ismdio;
   9364 }
   9365 
   9366 /*
   9367  * wm_sgmii_readreg:	[mii interface function]
   9368  *
   9369  *	Read a PHY register on the SGMII
   9370  * This could be handled by the PHY layer if we didn't have to lock the
   9371  * ressource ...
   9372  */
   9373 static int
   9374 wm_sgmii_readreg(device_t self, int phy, int reg)
   9375 {
   9376 	struct wm_softc *sc = device_private(self);
   9377 	uint32_t i2ccmd;
   9378 	int i, rv;
   9379 
   9380 	if (sc->phy.acquire(sc)) {
   9381 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9382 		    __func__);
   9383 		return 0;
   9384 	}
   9385 
   9386 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9387 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9388 	    | I2CCMD_OPCODE_READ;
   9389 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9390 
   9391 	/* Poll the ready bit */
   9392 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9393 		delay(50);
   9394 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9395 		if (i2ccmd & I2CCMD_READY)
   9396 			break;
   9397 	}
   9398 	if ((i2ccmd & I2CCMD_READY) == 0)
   9399 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9400 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9401 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9402 
   9403 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9404 
   9405 	sc->phy.release(sc);
   9406 	return rv;
   9407 }
   9408 
   9409 /*
   9410  * wm_sgmii_writereg:	[mii interface function]
   9411  *
   9412  *	Write a PHY register on the SGMII.
   9413  * This could be handled by the PHY layer if we didn't have to lock the
   9414  * ressource ...
   9415  */
   9416 static void
   9417 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9418 {
   9419 	struct wm_softc *sc = device_private(self);
   9420 	uint32_t i2ccmd;
   9421 	int i;
   9422 	int val_swapped;
   9423 
   9424 	if (sc->phy.acquire(sc) != 0) {
   9425 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9426 		    __func__);
   9427 		return;
   9428 	}
   9429 	/* Swap the data bytes for the I2C interface */
   9430 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9431 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9432 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9433 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9434 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9435 
   9436 	/* Poll the ready bit */
   9437 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9438 		delay(50);
   9439 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9440 		if (i2ccmd & I2CCMD_READY)
   9441 			break;
   9442 	}
   9443 	if ((i2ccmd & I2CCMD_READY) == 0)
   9444 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9445 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9446 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9447 
   9448 	sc->phy.release(sc);
   9449 }
   9450 
   9451 /* TBI related */
   9452 
   9453 /*
   9454  * wm_tbi_mediainit:
   9455  *
   9456  *	Initialize media for use on 1000BASE-X devices.
   9457  */
   9458 static void
   9459 wm_tbi_mediainit(struct wm_softc *sc)
   9460 {
   9461 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9462 	const char *sep = "";
   9463 
   9464 	if (sc->sc_type < WM_T_82543)
   9465 		sc->sc_tipg = TIPG_WM_DFLT;
   9466 	else
   9467 		sc->sc_tipg = TIPG_LG_DFLT;
   9468 
   9469 	sc->sc_tbi_serdes_anegticks = 5;
   9470 
   9471 	/* Initialize our media structures */
   9472 	sc->sc_mii.mii_ifp = ifp;
   9473 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9474 
   9475 	if ((sc->sc_type >= WM_T_82575)
   9476 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9477 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9478 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9479 	else
   9480 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9481 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9482 
   9483 	/*
   9484 	 * SWD Pins:
   9485 	 *
   9486 	 *	0 = Link LED (output)
   9487 	 *	1 = Loss Of Signal (input)
   9488 	 */
   9489 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9490 
   9491 	/* XXX Perhaps this is only for TBI */
   9492 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9493 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9494 
   9495 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9496 		sc->sc_ctrl &= ~CTRL_LRST;
   9497 
   9498 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9499 
   9500 #define	ADD(ss, mm, dd)							\
   9501 do {									\
   9502 	aprint_normal("%s%s", sep, ss);					\
   9503 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9504 	sep = ", ";							\
   9505 } while (/*CONSTCOND*/0)
   9506 
   9507 	aprint_normal_dev(sc->sc_dev, "");
   9508 
   9509 	/* Only 82545 is LX */
   9510 	if (sc->sc_type == WM_T_82545) {
   9511 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9512 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9513 	} else {
   9514 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9515 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9516 	}
   9517 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9518 	aprint_normal("\n");
   9519 
   9520 #undef ADD
   9521 
   9522 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9523 }
   9524 
   9525 /*
   9526  * wm_tbi_mediachange:	[ifmedia interface function]
   9527  *
   9528  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9529  */
   9530 static int
   9531 wm_tbi_mediachange(struct ifnet *ifp)
   9532 {
   9533 	struct wm_softc *sc = ifp->if_softc;
   9534 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9535 	uint32_t status;
   9536 	int i;
   9537 
   9538 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9539 		/* XXX need some work for >= 82571 and < 82575 */
   9540 		if (sc->sc_type < WM_T_82575)
   9541 			return 0;
   9542 	}
   9543 
   9544 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9545 	    || (sc->sc_type >= WM_T_82575))
   9546 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9547 
   9548 	sc->sc_ctrl &= ~CTRL_LRST;
   9549 	sc->sc_txcw = TXCW_ANE;
   9550 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9551 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9552 	else if (ife->ifm_media & IFM_FDX)
   9553 		sc->sc_txcw |= TXCW_FD;
   9554 	else
   9555 		sc->sc_txcw |= TXCW_HD;
   9556 
   9557 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9558 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9559 
   9560 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9561 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9562 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9563 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9564 	CSR_WRITE_FLUSH(sc);
   9565 	delay(1000);
   9566 
   9567 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9568 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9569 
   9570 	/*
   9571 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9572 	 * optics detect a signal, 0 if they don't.
   9573 	 */
   9574 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9575 		/* Have signal; wait for the link to come up. */
   9576 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9577 			delay(10000);
   9578 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9579 				break;
   9580 		}
   9581 
   9582 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9583 			    device_xname(sc->sc_dev),i));
   9584 
   9585 		status = CSR_READ(sc, WMREG_STATUS);
   9586 		DPRINTF(WM_DEBUG_LINK,
   9587 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9588 			device_xname(sc->sc_dev),status, STATUS_LU));
   9589 		if (status & STATUS_LU) {
   9590 			/* Link is up. */
   9591 			DPRINTF(WM_DEBUG_LINK,
   9592 			    ("%s: LINK: set media -> link up %s\n",
   9593 			    device_xname(sc->sc_dev),
   9594 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9595 
   9596 			/*
   9597 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9598 			 * so we should update sc->sc_ctrl
   9599 			 */
   9600 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9601 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9602 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9603 			if (status & STATUS_FD)
   9604 				sc->sc_tctl |=
   9605 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9606 			else
   9607 				sc->sc_tctl |=
   9608 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9609 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9610 				sc->sc_fcrtl |= FCRTL_XONE;
   9611 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9612 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9613 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9614 				      sc->sc_fcrtl);
   9615 			sc->sc_tbi_linkup = 1;
   9616 		} else {
   9617 			if (i == WM_LINKUP_TIMEOUT)
   9618 				wm_check_for_link(sc);
   9619 			/* Link is down. */
   9620 			DPRINTF(WM_DEBUG_LINK,
   9621 			    ("%s: LINK: set media -> link down\n",
   9622 			    device_xname(sc->sc_dev)));
   9623 			sc->sc_tbi_linkup = 0;
   9624 		}
   9625 	} else {
   9626 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9627 		    device_xname(sc->sc_dev)));
   9628 		sc->sc_tbi_linkup = 0;
   9629 	}
   9630 
   9631 	wm_tbi_serdes_set_linkled(sc);
   9632 
   9633 	return 0;
   9634 }
   9635 
   9636 /*
   9637  * wm_tbi_mediastatus:	[ifmedia interface function]
   9638  *
   9639  *	Get the current interface media status on a 1000BASE-X device.
   9640  */
   9641 static void
   9642 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9643 {
   9644 	struct wm_softc *sc = ifp->if_softc;
   9645 	uint32_t ctrl, status;
   9646 
   9647 	ifmr->ifm_status = IFM_AVALID;
   9648 	ifmr->ifm_active = IFM_ETHER;
   9649 
   9650 	status = CSR_READ(sc, WMREG_STATUS);
   9651 	if ((status & STATUS_LU) == 0) {
   9652 		ifmr->ifm_active |= IFM_NONE;
   9653 		return;
   9654 	}
   9655 
   9656 	ifmr->ifm_status |= IFM_ACTIVE;
   9657 	/* Only 82545 is LX */
   9658 	if (sc->sc_type == WM_T_82545)
   9659 		ifmr->ifm_active |= IFM_1000_LX;
   9660 	else
   9661 		ifmr->ifm_active |= IFM_1000_SX;
   9662 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9663 		ifmr->ifm_active |= IFM_FDX;
   9664 	else
   9665 		ifmr->ifm_active |= IFM_HDX;
   9666 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9667 	if (ctrl & CTRL_RFCE)
   9668 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9669 	if (ctrl & CTRL_TFCE)
   9670 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9671 }
   9672 
   9673 /* XXX TBI only */
   9674 static int
   9675 wm_check_for_link(struct wm_softc *sc)
   9676 {
   9677 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9678 	uint32_t rxcw;
   9679 	uint32_t ctrl;
   9680 	uint32_t status;
   9681 	uint32_t sig;
   9682 
   9683 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9684 		/* XXX need some work for >= 82571 */
   9685 		if (sc->sc_type >= WM_T_82571) {
   9686 			sc->sc_tbi_linkup = 1;
   9687 			return 0;
   9688 		}
   9689 	}
   9690 
   9691 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9692 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9693 	status = CSR_READ(sc, WMREG_STATUS);
   9694 
   9695 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9696 
   9697 	DPRINTF(WM_DEBUG_LINK,
   9698 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9699 		device_xname(sc->sc_dev), __func__,
   9700 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9701 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   9702 
   9703 	/*
   9704 	 * SWDPIN   LU RXCW
   9705 	 *      0    0    0
   9706 	 *      0    0    1	(should not happen)
   9707 	 *      0    1    0	(should not happen)
   9708 	 *      0    1    1	(should not happen)
   9709 	 *      1    0    0	Disable autonego and force linkup
   9710 	 *      1    0    1	got /C/ but not linkup yet
   9711 	 *      1    1    0	(linkup)
   9712 	 *      1    1    1	If IFM_AUTO, back to autonego
   9713 	 *
   9714 	 */
   9715 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9716 	    && ((status & STATUS_LU) == 0)
   9717 	    && ((rxcw & RXCW_C) == 0)) {
   9718 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9719 			__func__));
   9720 		sc->sc_tbi_linkup = 0;
   9721 		/* Disable auto-negotiation in the TXCW register */
   9722 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9723 
   9724 		/*
   9725 		 * Force link-up and also force full-duplex.
   9726 		 *
   9727 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9728 		 * so we should update sc->sc_ctrl
   9729 		 */
   9730 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9731 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9732 	} else if (((status & STATUS_LU) != 0)
   9733 	    && ((rxcw & RXCW_C) != 0)
   9734 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9735 		sc->sc_tbi_linkup = 1;
   9736 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9737 			__func__));
   9738 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9739 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9740 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9741 	    && ((rxcw & RXCW_C) != 0)) {
   9742 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9743 	} else {
   9744 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9745 			status));
   9746 	}
   9747 
   9748 	return 0;
   9749 }
   9750 
   9751 /*
   9752  * wm_tbi_tick:
   9753  *
   9754  *	Check the link on TBI devices.
   9755  *	This function acts as mii_tick().
   9756  */
   9757 static void
   9758 wm_tbi_tick(struct wm_softc *sc)
   9759 {
   9760 	struct mii_data *mii = &sc->sc_mii;
   9761 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9762 	uint32_t status;
   9763 
   9764 	KASSERT(WM_CORE_LOCKED(sc));
   9765 
   9766 	status = CSR_READ(sc, WMREG_STATUS);
   9767 
   9768 	/* XXX is this needed? */
   9769 	(void)CSR_READ(sc, WMREG_RXCW);
   9770 	(void)CSR_READ(sc, WMREG_CTRL);
   9771 
   9772 	/* set link status */
   9773 	if ((status & STATUS_LU) == 0) {
   9774 		DPRINTF(WM_DEBUG_LINK,
   9775 		    ("%s: LINK: checklink -> down\n",
   9776 			device_xname(sc->sc_dev)));
   9777 		sc->sc_tbi_linkup = 0;
   9778 	} else if (sc->sc_tbi_linkup == 0) {
   9779 		DPRINTF(WM_DEBUG_LINK,
   9780 		    ("%s: LINK: checklink -> up %s\n",
   9781 			device_xname(sc->sc_dev),
   9782 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9783 		sc->sc_tbi_linkup = 1;
   9784 		sc->sc_tbi_serdes_ticks = 0;
   9785 	}
   9786 
   9787 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9788 		goto setled;
   9789 
   9790 	if ((status & STATUS_LU) == 0) {
   9791 		sc->sc_tbi_linkup = 0;
   9792 		/* If the timer expired, retry autonegotiation */
   9793 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9794 		    && (++sc->sc_tbi_serdes_ticks
   9795 			>= sc->sc_tbi_serdes_anegticks)) {
   9796 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9797 			sc->sc_tbi_serdes_ticks = 0;
   9798 			/*
   9799 			 * Reset the link, and let autonegotiation do
   9800 			 * its thing
   9801 			 */
   9802 			sc->sc_ctrl |= CTRL_LRST;
   9803 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9804 			CSR_WRITE_FLUSH(sc);
   9805 			delay(1000);
   9806 			sc->sc_ctrl &= ~CTRL_LRST;
   9807 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9808 			CSR_WRITE_FLUSH(sc);
   9809 			delay(1000);
   9810 			CSR_WRITE(sc, WMREG_TXCW,
   9811 			    sc->sc_txcw & ~TXCW_ANE);
   9812 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9813 		}
   9814 	}
   9815 
   9816 setled:
   9817 	wm_tbi_serdes_set_linkled(sc);
   9818 }
   9819 
   9820 /* SERDES related */
   9821 static void
   9822 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9823 {
   9824 	uint32_t reg;
   9825 
   9826 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9827 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9828 		return;
   9829 
   9830 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9831 	reg |= PCS_CFG_PCS_EN;
   9832 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9833 
   9834 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9835 	reg &= ~CTRL_EXT_SWDPIN(3);
   9836 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9837 	CSR_WRITE_FLUSH(sc);
   9838 }
   9839 
   9840 static int
   9841 wm_serdes_mediachange(struct ifnet *ifp)
   9842 {
   9843 	struct wm_softc *sc = ifp->if_softc;
   9844 	bool pcs_autoneg = true; /* XXX */
   9845 	uint32_t ctrl_ext, pcs_lctl, reg;
   9846 
   9847 	/* XXX Currently, this function is not called on 8257[12] */
   9848 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9849 	    || (sc->sc_type >= WM_T_82575))
   9850 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9851 
   9852 	wm_serdes_power_up_link_82575(sc);
   9853 
   9854 	sc->sc_ctrl |= CTRL_SLU;
   9855 
   9856 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9857 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9858 
   9859 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9860 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9861 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9862 	case CTRL_EXT_LINK_MODE_SGMII:
   9863 		pcs_autoneg = true;
   9864 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9865 		break;
   9866 	case CTRL_EXT_LINK_MODE_1000KX:
   9867 		pcs_autoneg = false;
   9868 		/* FALLTHROUGH */
   9869 	default:
   9870 		if ((sc->sc_type == WM_T_82575)
   9871 		    || (sc->sc_type == WM_T_82576)) {
   9872 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9873 				pcs_autoneg = false;
   9874 		}
   9875 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9876 		    | CTRL_FRCFDX;
   9877 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9878 	}
   9879 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9880 
   9881 	if (pcs_autoneg) {
   9882 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9883 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9884 
   9885 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9886 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9887 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9888 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9889 	} else
   9890 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9891 
   9892 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   9893 
   9894 
   9895 	return 0;
   9896 }
   9897 
   9898 static void
   9899 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9900 {
   9901 	struct wm_softc *sc = ifp->if_softc;
   9902 	struct mii_data *mii = &sc->sc_mii;
   9903 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9904 	uint32_t pcs_adv, pcs_lpab, reg;
   9905 
   9906 	ifmr->ifm_status = IFM_AVALID;
   9907 	ifmr->ifm_active = IFM_ETHER;
   9908 
   9909 	/* Check PCS */
   9910 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9911 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   9912 		ifmr->ifm_active |= IFM_NONE;
   9913 		sc->sc_tbi_linkup = 0;
   9914 		goto setled;
   9915 	}
   9916 
   9917 	sc->sc_tbi_linkup = 1;
   9918 	ifmr->ifm_status |= IFM_ACTIVE;
   9919 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   9920 	if ((reg & PCS_LSTS_FDX) != 0)
   9921 		ifmr->ifm_active |= IFM_FDX;
   9922 	else
   9923 		ifmr->ifm_active |= IFM_HDX;
   9924 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   9925 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9926 		/* Check flow */
   9927 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9928 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9929 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   9930 			goto setled;
   9931 		}
   9932 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9933 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9934 		DPRINTF(WM_DEBUG_LINK,
   9935 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   9936 		if ((pcs_adv & TXCW_SYM_PAUSE)
   9937 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9938 			mii->mii_media_active |= IFM_FLOW
   9939 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9940 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9941 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9942 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   9943 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9944 			mii->mii_media_active |= IFM_FLOW
   9945 			    | IFM_ETH_TXPAUSE;
   9946 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   9947 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9948 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9949 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9950 			mii->mii_media_active |= IFM_FLOW
   9951 			    | IFM_ETH_RXPAUSE;
   9952 		} else {
   9953 		}
   9954 	}
   9955 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9956 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   9957 setled:
   9958 	wm_tbi_serdes_set_linkled(sc);
   9959 }
   9960 
   9961 /*
   9962  * wm_serdes_tick:
   9963  *
   9964  *	Check the link on serdes devices.
   9965  */
   9966 static void
   9967 wm_serdes_tick(struct wm_softc *sc)
   9968 {
   9969 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9970 	struct mii_data *mii = &sc->sc_mii;
   9971 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9972 	uint32_t reg;
   9973 
   9974 	KASSERT(WM_CORE_LOCKED(sc));
   9975 
   9976 	mii->mii_media_status = IFM_AVALID;
   9977 	mii->mii_media_active = IFM_ETHER;
   9978 
   9979 	/* Check PCS */
   9980 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9981 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   9982 		mii->mii_media_status |= IFM_ACTIVE;
   9983 		sc->sc_tbi_linkup = 1;
   9984 		sc->sc_tbi_serdes_ticks = 0;
   9985 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   9986 		if ((reg & PCS_LSTS_FDX) != 0)
   9987 			mii->mii_media_active |= IFM_FDX;
   9988 		else
   9989 			mii->mii_media_active |= IFM_HDX;
   9990 	} else {
   9991 		mii->mii_media_status |= IFM_NONE;
   9992 		sc->sc_tbi_linkup = 0;
   9993 		    /* If the timer expired, retry autonegotiation */
   9994 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9995 		    && (++sc->sc_tbi_serdes_ticks
   9996 			>= sc->sc_tbi_serdes_anegticks)) {
   9997 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9998 			sc->sc_tbi_serdes_ticks = 0;
   9999 			/* XXX */
   10000 			wm_serdes_mediachange(ifp);
   10001 		}
   10002 	}
   10003 
   10004 	wm_tbi_serdes_set_linkled(sc);
   10005 }
   10006 
   10007 /* SFP related */
   10008 
   10009 static int
   10010 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   10011 {
   10012 	uint32_t i2ccmd;
   10013 	int i;
   10014 
   10015 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10016 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10017 
   10018 	/* Poll the ready bit */
   10019 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10020 		delay(50);
   10021 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10022 		if (i2ccmd & I2CCMD_READY)
   10023 			break;
   10024 	}
   10025 	if ((i2ccmd & I2CCMD_READY) == 0)
   10026 		return -1;
   10027 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10028 		return -1;
   10029 
   10030 	*data = i2ccmd & 0x00ff;
   10031 
   10032 	return 0;
   10033 }
   10034 
   10035 static uint32_t
   10036 wm_sfp_get_media_type(struct wm_softc *sc)
   10037 {
   10038 	uint32_t ctrl_ext;
   10039 	uint8_t val = 0;
   10040 	int timeout = 3;
   10041 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   10042 	int rv = -1;
   10043 
   10044 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10045 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   10046 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   10047 	CSR_WRITE_FLUSH(sc);
   10048 
   10049 	/* Read SFP module data */
   10050 	while (timeout) {
   10051 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   10052 		if (rv == 0)
   10053 			break;
   10054 		delay(100*1000); /* XXX too big */
   10055 		timeout--;
   10056 	}
   10057 	if (rv != 0)
   10058 		goto out;
   10059 	switch (val) {
   10060 	case SFF_SFP_ID_SFF:
   10061 		aprint_normal_dev(sc->sc_dev,
   10062 		    "Module/Connector soldered to board\n");
   10063 		break;
   10064 	case SFF_SFP_ID_SFP:
   10065 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   10066 		break;
   10067 	case SFF_SFP_ID_UNKNOWN:
   10068 		goto out;
   10069 	default:
   10070 		break;
   10071 	}
   10072 
   10073 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   10074 	if (rv != 0) {
   10075 		goto out;
   10076 	}
   10077 
   10078 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   10079 		mediatype = WM_MEDIATYPE_SERDES;
   10080 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   10081 		sc->sc_flags |= WM_F_SGMII;
   10082 		mediatype = WM_MEDIATYPE_COPPER;
   10083 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   10084 		sc->sc_flags |= WM_F_SGMII;
   10085 		mediatype = WM_MEDIATYPE_SERDES;
   10086 	}
   10087 
   10088 out:
   10089 	/* Restore I2C interface setting */
   10090 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10091 
   10092 	return mediatype;
   10093 }
   10094 /*
   10095  * NVM related.
   10096  * Microwire, SPI (w/wo EERD) and Flash.
   10097  */
   10098 
   10099 /* Both spi and uwire */
   10100 
   10101 /*
   10102  * wm_eeprom_sendbits:
   10103  *
   10104  *	Send a series of bits to the EEPROM.
   10105  */
   10106 static void
   10107 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   10108 {
   10109 	uint32_t reg;
   10110 	int x;
   10111 
   10112 	reg = CSR_READ(sc, WMREG_EECD);
   10113 
   10114 	for (x = nbits; x > 0; x--) {
   10115 		if (bits & (1U << (x - 1)))
   10116 			reg |= EECD_DI;
   10117 		else
   10118 			reg &= ~EECD_DI;
   10119 		CSR_WRITE(sc, WMREG_EECD, reg);
   10120 		CSR_WRITE_FLUSH(sc);
   10121 		delay(2);
   10122 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10123 		CSR_WRITE_FLUSH(sc);
   10124 		delay(2);
   10125 		CSR_WRITE(sc, WMREG_EECD, reg);
   10126 		CSR_WRITE_FLUSH(sc);
   10127 		delay(2);
   10128 	}
   10129 }
   10130 
   10131 /*
   10132  * wm_eeprom_recvbits:
   10133  *
   10134  *	Receive a series of bits from the EEPROM.
   10135  */
   10136 static void
   10137 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   10138 {
   10139 	uint32_t reg, val;
   10140 	int x;
   10141 
   10142 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   10143 
   10144 	val = 0;
   10145 	for (x = nbits; x > 0; x--) {
   10146 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10147 		CSR_WRITE_FLUSH(sc);
   10148 		delay(2);
   10149 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   10150 			val |= (1U << (x - 1));
   10151 		CSR_WRITE(sc, WMREG_EECD, reg);
   10152 		CSR_WRITE_FLUSH(sc);
   10153 		delay(2);
   10154 	}
   10155 	*valp = val;
   10156 }
   10157 
   10158 /* Microwire */
   10159 
   10160 /*
   10161  * wm_nvm_read_uwire:
   10162  *
   10163  *	Read a word from the EEPROM using the MicroWire protocol.
   10164  */
   10165 static int
   10166 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10167 {
   10168 	uint32_t reg, val;
   10169 	int i;
   10170 
   10171 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10172 		device_xname(sc->sc_dev), __func__));
   10173 
   10174 	for (i = 0; i < wordcnt; i++) {
   10175 		/* Clear SK and DI. */
   10176 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   10177 		CSR_WRITE(sc, WMREG_EECD, reg);
   10178 
   10179 		/*
   10180 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   10181 		 * and Xen.
   10182 		 *
   10183 		 * We use this workaround only for 82540 because qemu's
   10184 		 * e1000 act as 82540.
   10185 		 */
   10186 		if (sc->sc_type == WM_T_82540) {
   10187 			reg |= EECD_SK;
   10188 			CSR_WRITE(sc, WMREG_EECD, reg);
   10189 			reg &= ~EECD_SK;
   10190 			CSR_WRITE(sc, WMREG_EECD, reg);
   10191 			CSR_WRITE_FLUSH(sc);
   10192 			delay(2);
   10193 		}
   10194 		/* XXX: end of workaround */
   10195 
   10196 		/* Set CHIP SELECT. */
   10197 		reg |= EECD_CS;
   10198 		CSR_WRITE(sc, WMREG_EECD, reg);
   10199 		CSR_WRITE_FLUSH(sc);
   10200 		delay(2);
   10201 
   10202 		/* Shift in the READ command. */
   10203 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   10204 
   10205 		/* Shift in address. */
   10206 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   10207 
   10208 		/* Shift out the data. */
   10209 		wm_eeprom_recvbits(sc, &val, 16);
   10210 		data[i] = val & 0xffff;
   10211 
   10212 		/* Clear CHIP SELECT. */
   10213 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   10214 		CSR_WRITE(sc, WMREG_EECD, reg);
   10215 		CSR_WRITE_FLUSH(sc);
   10216 		delay(2);
   10217 	}
   10218 
   10219 	return 0;
   10220 }
   10221 
   10222 /* SPI */
   10223 
   10224 /*
   10225  * Set SPI and FLASH related information from the EECD register.
   10226  * For 82541 and 82547, the word size is taken from EEPROM.
   10227  */
   10228 static int
   10229 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   10230 {
   10231 	int size;
   10232 	uint32_t reg;
   10233 	uint16_t data;
   10234 
   10235 	reg = CSR_READ(sc, WMREG_EECD);
   10236 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   10237 
   10238 	/* Read the size of NVM from EECD by default */
   10239 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10240 	switch (sc->sc_type) {
   10241 	case WM_T_82541:
   10242 	case WM_T_82541_2:
   10243 	case WM_T_82547:
   10244 	case WM_T_82547_2:
   10245 		/* Set dummy value to access EEPROM */
   10246 		sc->sc_nvm_wordsize = 64;
   10247 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   10248 		reg = data;
   10249 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10250 		if (size == 0)
   10251 			size = 6; /* 64 word size */
   10252 		else
   10253 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   10254 		break;
   10255 	case WM_T_80003:
   10256 	case WM_T_82571:
   10257 	case WM_T_82572:
   10258 	case WM_T_82573: /* SPI case */
   10259 	case WM_T_82574: /* SPI case */
   10260 	case WM_T_82583: /* SPI case */
   10261 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10262 		if (size > 14)
   10263 			size = 14;
   10264 		break;
   10265 	case WM_T_82575:
   10266 	case WM_T_82576:
   10267 	case WM_T_82580:
   10268 	case WM_T_I350:
   10269 	case WM_T_I354:
   10270 	case WM_T_I210:
   10271 	case WM_T_I211:
   10272 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10273 		if (size > 15)
   10274 			size = 15;
   10275 		break;
   10276 	default:
   10277 		aprint_error_dev(sc->sc_dev,
   10278 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   10279 		return -1;
   10280 		break;
   10281 	}
   10282 
   10283 	sc->sc_nvm_wordsize = 1 << size;
   10284 
   10285 	return 0;
   10286 }
   10287 
   10288 /*
   10289  * wm_nvm_ready_spi:
   10290  *
   10291  *	Wait for a SPI EEPROM to be ready for commands.
   10292  */
   10293 static int
   10294 wm_nvm_ready_spi(struct wm_softc *sc)
   10295 {
   10296 	uint32_t val;
   10297 	int usec;
   10298 
   10299 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10300 		device_xname(sc->sc_dev), __func__));
   10301 
   10302 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10303 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10304 		wm_eeprom_recvbits(sc, &val, 8);
   10305 		if ((val & SPI_SR_RDY) == 0)
   10306 			break;
   10307 	}
   10308 	if (usec >= SPI_MAX_RETRIES) {
   10309 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10310 		return 1;
   10311 	}
   10312 	return 0;
   10313 }
   10314 
   10315 /*
   10316  * wm_nvm_read_spi:
   10317  *
   10318  *	Read a work from the EEPROM using the SPI protocol.
   10319  */
   10320 static int
   10321 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10322 {
   10323 	uint32_t reg, val;
   10324 	int i;
   10325 	uint8_t opc;
   10326 
   10327 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10328 		device_xname(sc->sc_dev), __func__));
   10329 
   10330 	/* Clear SK and CS. */
   10331 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10332 	CSR_WRITE(sc, WMREG_EECD, reg);
   10333 	CSR_WRITE_FLUSH(sc);
   10334 	delay(2);
   10335 
   10336 	if (wm_nvm_ready_spi(sc))
   10337 		return 1;
   10338 
   10339 	/* Toggle CS to flush commands. */
   10340 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10341 	CSR_WRITE_FLUSH(sc);
   10342 	delay(2);
   10343 	CSR_WRITE(sc, WMREG_EECD, reg);
   10344 	CSR_WRITE_FLUSH(sc);
   10345 	delay(2);
   10346 
   10347 	opc = SPI_OPC_READ;
   10348 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10349 		opc |= SPI_OPC_A8;
   10350 
   10351 	wm_eeprom_sendbits(sc, opc, 8);
   10352 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10353 
   10354 	for (i = 0; i < wordcnt; i++) {
   10355 		wm_eeprom_recvbits(sc, &val, 16);
   10356 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10357 	}
   10358 
   10359 	/* Raise CS and clear SK. */
   10360 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10361 	CSR_WRITE(sc, WMREG_EECD, reg);
   10362 	CSR_WRITE_FLUSH(sc);
   10363 	delay(2);
   10364 
   10365 	return 0;
   10366 }
   10367 
   10368 /* Using with EERD */
   10369 
   10370 static int
   10371 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10372 {
   10373 	uint32_t attempts = 100000;
   10374 	uint32_t i, reg = 0;
   10375 	int32_t done = -1;
   10376 
   10377 	for (i = 0; i < attempts; i++) {
   10378 		reg = CSR_READ(sc, rw);
   10379 
   10380 		if (reg & EERD_DONE) {
   10381 			done = 0;
   10382 			break;
   10383 		}
   10384 		delay(5);
   10385 	}
   10386 
   10387 	return done;
   10388 }
   10389 
   10390 static int
   10391 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   10392     uint16_t *data)
   10393 {
   10394 	int i, eerd = 0;
   10395 	int error = 0;
   10396 
   10397 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10398 		device_xname(sc->sc_dev), __func__));
   10399 
   10400 	for (i = 0; i < wordcnt; i++) {
   10401 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   10402 
   10403 		CSR_WRITE(sc, WMREG_EERD, eerd);
   10404 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   10405 		if (error != 0)
   10406 			break;
   10407 
   10408 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   10409 	}
   10410 
   10411 	return error;
   10412 }
   10413 
   10414 /* Flash */
   10415 
   10416 static int
   10417 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10418 {
   10419 	uint32_t eecd;
   10420 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10421 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10422 	uint8_t sig_byte = 0;
   10423 
   10424 	switch (sc->sc_type) {
   10425 	case WM_T_PCH_SPT:
   10426 		/*
   10427 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   10428 		 * sector valid bits from the NVM.
   10429 		 */
   10430 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   10431 		if ((*bank == 0) || (*bank == 1)) {
   10432 			aprint_error_dev(sc->sc_dev,
   10433 			    "%s: no valid NVM bank present (%u)\n", __func__,
   10434 				*bank);
   10435 			return -1;
   10436 		} else {
   10437 			*bank = *bank - 2;
   10438 			return 0;
   10439 		}
   10440 	case WM_T_ICH8:
   10441 	case WM_T_ICH9:
   10442 		eecd = CSR_READ(sc, WMREG_EECD);
   10443 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10444 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10445 			return 0;
   10446 		}
   10447 		/* FALLTHROUGH */
   10448 	default:
   10449 		/* Default to 0 */
   10450 		*bank = 0;
   10451 
   10452 		/* Check bank 0 */
   10453 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10454 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10455 			*bank = 0;
   10456 			return 0;
   10457 		}
   10458 
   10459 		/* Check bank 1 */
   10460 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10461 		    &sig_byte);
   10462 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10463 			*bank = 1;
   10464 			return 0;
   10465 		}
   10466 	}
   10467 
   10468 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10469 		device_xname(sc->sc_dev)));
   10470 	return -1;
   10471 }
   10472 
   10473 /******************************************************************************
   10474  * This function does initial flash setup so that a new read/write/erase cycle
   10475  * can be started.
   10476  *
   10477  * sc - The pointer to the hw structure
   10478  ****************************************************************************/
   10479 static int32_t
   10480 wm_ich8_cycle_init(struct wm_softc *sc)
   10481 {
   10482 	uint16_t hsfsts;
   10483 	int32_t error = 1;
   10484 	int32_t i     = 0;
   10485 
   10486 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10487 
   10488 	/* May be check the Flash Des Valid bit in Hw status */
   10489 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10490 		return error;
   10491 	}
   10492 
   10493 	/* Clear FCERR in Hw status by writing 1 */
   10494 	/* Clear DAEL in Hw status by writing a 1 */
   10495 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10496 
   10497 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10498 
   10499 	/*
   10500 	 * Either we should have a hardware SPI cycle in progress bit to check
   10501 	 * against, in order to start a new cycle or FDONE bit should be
   10502 	 * changed in the hardware so that it is 1 after harware reset, which
   10503 	 * can then be used as an indication whether a cycle is in progress or
   10504 	 * has been completed .. we should also have some software semaphore
   10505 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10506 	 * threads access to those bits can be sequentiallized or a way so that
   10507 	 * 2 threads dont start the cycle at the same time
   10508 	 */
   10509 
   10510 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10511 		/*
   10512 		 * There is no cycle running at present, so we can start a
   10513 		 * cycle
   10514 		 */
   10515 
   10516 		/* Begin by setting Flash Cycle Done. */
   10517 		hsfsts |= HSFSTS_DONE;
   10518 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10519 		error = 0;
   10520 	} else {
   10521 		/*
   10522 		 * otherwise poll for sometime so the current cycle has a
   10523 		 * chance to end before giving up.
   10524 		 */
   10525 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10526 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10527 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10528 				error = 0;
   10529 				break;
   10530 			}
   10531 			delay(1);
   10532 		}
   10533 		if (error == 0) {
   10534 			/*
   10535 			 * Successful in waiting for previous cycle to timeout,
   10536 			 * now set the Flash Cycle Done.
   10537 			 */
   10538 			hsfsts |= HSFSTS_DONE;
   10539 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10540 		}
   10541 	}
   10542 	return error;
   10543 }
   10544 
   10545 /******************************************************************************
   10546  * This function starts a flash cycle and waits for its completion
   10547  *
   10548  * sc - The pointer to the hw structure
   10549  ****************************************************************************/
   10550 static int32_t
   10551 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10552 {
   10553 	uint16_t hsflctl;
   10554 	uint16_t hsfsts;
   10555 	int32_t error = 1;
   10556 	uint32_t i = 0;
   10557 
   10558 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10559 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10560 	hsflctl |= HSFCTL_GO;
   10561 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10562 
   10563 	/* Wait till FDONE bit is set to 1 */
   10564 	do {
   10565 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10566 		if (hsfsts & HSFSTS_DONE)
   10567 			break;
   10568 		delay(1);
   10569 		i++;
   10570 	} while (i < timeout);
   10571 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10572 		error = 0;
   10573 
   10574 	return error;
   10575 }
   10576 
   10577 /******************************************************************************
   10578  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   10579  *
   10580  * sc - The pointer to the hw structure
   10581  * index - The index of the byte or word to read.
   10582  * size - Size of data to read, 1=byte 2=word, 4=dword
   10583  * data - Pointer to the word to store the value read.
   10584  *****************************************************************************/
   10585 static int32_t
   10586 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10587     uint32_t size, uint32_t *data)
   10588 {
   10589 	uint16_t hsfsts;
   10590 	uint16_t hsflctl;
   10591 	uint32_t flash_linear_address;
   10592 	uint32_t flash_data = 0;
   10593 	int32_t error = 1;
   10594 	int32_t count = 0;
   10595 
   10596 	if (size < 1  || size > 4 || data == 0x0 ||
   10597 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10598 		return error;
   10599 
   10600 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10601 	    sc->sc_ich8_flash_base;
   10602 
   10603 	do {
   10604 		delay(1);
   10605 		/* Steps */
   10606 		error = wm_ich8_cycle_init(sc);
   10607 		if (error)
   10608 			break;
   10609 
   10610 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10611 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10612 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10613 		    & HSFCTL_BCOUNT_MASK;
   10614 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10615 		if (sc->sc_type == WM_T_PCH_SPT) {
   10616 			/*
   10617 			 * In SPT, This register is in Lan memory space, not
   10618 			 * flash. Therefore, only 32 bit access is supported.
   10619 			 */
   10620 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   10621 			    (uint32_t)hsflctl);
   10622 		} else
   10623 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10624 
   10625 		/*
   10626 		 * Write the last 24 bits of index into Flash Linear address
   10627 		 * field in Flash Address
   10628 		 */
   10629 		/* TODO: TBD maybe check the index against the size of flash */
   10630 
   10631 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10632 
   10633 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10634 
   10635 		/*
   10636 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10637 		 * the whole sequence a few more times, else read in (shift in)
   10638 		 * the Flash Data0, the order is least significant byte first
   10639 		 * msb to lsb
   10640 		 */
   10641 		if (error == 0) {
   10642 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10643 			if (size == 1)
   10644 				*data = (uint8_t)(flash_data & 0x000000FF);
   10645 			else if (size == 2)
   10646 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10647 			else if (size == 4)
   10648 				*data = (uint32_t)flash_data;
   10649 			break;
   10650 		} else {
   10651 			/*
   10652 			 * If we've gotten here, then things are probably
   10653 			 * completely hosed, but if the error condition is
   10654 			 * detected, it won't hurt to give it another try...
   10655 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10656 			 */
   10657 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10658 			if (hsfsts & HSFSTS_ERR) {
   10659 				/* Repeat for some time before giving up. */
   10660 				continue;
   10661 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10662 				break;
   10663 		}
   10664 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10665 
   10666 	return error;
   10667 }
   10668 
   10669 /******************************************************************************
   10670  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10671  *
   10672  * sc - pointer to wm_hw structure
   10673  * index - The index of the byte to read.
   10674  * data - Pointer to a byte to store the value read.
   10675  *****************************************************************************/
   10676 static int32_t
   10677 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10678 {
   10679 	int32_t status;
   10680 	uint32_t word = 0;
   10681 
   10682 	status = wm_read_ich8_data(sc, index, 1, &word);
   10683 	if (status == 0)
   10684 		*data = (uint8_t)word;
   10685 	else
   10686 		*data = 0;
   10687 
   10688 	return status;
   10689 }
   10690 
   10691 /******************************************************************************
   10692  * Reads a word from the NVM using the ICH8 flash access registers.
   10693  *
   10694  * sc - pointer to wm_hw structure
   10695  * index - The starting byte index of the word to read.
   10696  * data - Pointer to a word to store the value read.
   10697  *****************************************************************************/
   10698 static int32_t
   10699 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10700 {
   10701 	int32_t status;
   10702 	uint32_t word = 0;
   10703 
   10704 	status = wm_read_ich8_data(sc, index, 2, &word);
   10705 	if (status == 0)
   10706 		*data = (uint16_t)word;
   10707 	else
   10708 		*data = 0;
   10709 
   10710 	return status;
   10711 }
   10712 
   10713 /******************************************************************************
   10714  * Reads a dword from the NVM using the ICH8 flash access registers.
   10715  *
   10716  * sc - pointer to wm_hw structure
   10717  * index - The starting byte index of the word to read.
   10718  * data - Pointer to a word to store the value read.
   10719  *****************************************************************************/
   10720 static int32_t
   10721 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   10722 {
   10723 	int32_t status;
   10724 
   10725 	status = wm_read_ich8_data(sc, index, 4, data);
   10726 	return status;
   10727 }
   10728 
   10729 /******************************************************************************
   10730  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10731  * register.
   10732  *
   10733  * sc - Struct containing variables accessed by shared code
   10734  * offset - offset of word in the EEPROM to read
   10735  * data - word read from the EEPROM
   10736  * words - number of words to read
   10737  *****************************************************************************/
   10738 static int
   10739 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10740 {
   10741 	int32_t  error = 0;
   10742 	uint32_t flash_bank = 0;
   10743 	uint32_t act_offset = 0;
   10744 	uint32_t bank_offset = 0;
   10745 	uint16_t word = 0;
   10746 	uint16_t i = 0;
   10747 
   10748 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10749 		device_xname(sc->sc_dev), __func__));
   10750 
   10751 	/*
   10752 	 * We need to know which is the valid flash bank.  In the event
   10753 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10754 	 * managing flash_bank.  So it cannot be trusted and needs
   10755 	 * to be updated with each read.
   10756 	 */
   10757 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10758 	if (error) {
   10759 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10760 			device_xname(sc->sc_dev)));
   10761 		flash_bank = 0;
   10762 	}
   10763 
   10764 	/*
   10765 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10766 	 * size
   10767 	 */
   10768 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10769 
   10770 	error = wm_get_swfwhw_semaphore(sc);
   10771 	if (error) {
   10772 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10773 		    __func__);
   10774 		return error;
   10775 	}
   10776 
   10777 	for (i = 0; i < words; i++) {
   10778 		/* The NVM part needs a byte offset, hence * 2 */
   10779 		act_offset = bank_offset + ((offset + i) * 2);
   10780 		error = wm_read_ich8_word(sc, act_offset, &word);
   10781 		if (error) {
   10782 			aprint_error_dev(sc->sc_dev,
   10783 			    "%s: failed to read NVM\n", __func__);
   10784 			break;
   10785 		}
   10786 		data[i] = word;
   10787 	}
   10788 
   10789 	wm_put_swfwhw_semaphore(sc);
   10790 	return error;
   10791 }
   10792 
   10793 /******************************************************************************
   10794  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   10795  * register.
   10796  *
   10797  * sc - Struct containing variables accessed by shared code
   10798  * offset - offset of word in the EEPROM to read
   10799  * data - word read from the EEPROM
   10800  * words - number of words to read
   10801  *****************************************************************************/
   10802 static int
   10803 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10804 {
   10805 	int32_t  error = 0;
   10806 	uint32_t flash_bank = 0;
   10807 	uint32_t act_offset = 0;
   10808 	uint32_t bank_offset = 0;
   10809 	uint32_t dword = 0;
   10810 	uint16_t i = 0;
   10811 
   10812 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10813 		device_xname(sc->sc_dev), __func__));
   10814 
   10815 	/*
   10816 	 * We need to know which is the valid flash bank.  In the event
   10817 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10818 	 * managing flash_bank.  So it cannot be trusted and needs
   10819 	 * to be updated with each read.
   10820 	 */
   10821 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10822 	if (error) {
   10823 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10824 			device_xname(sc->sc_dev)));
   10825 		flash_bank = 0;
   10826 	}
   10827 
   10828 	/*
   10829 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10830 	 * size
   10831 	 */
   10832 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10833 
   10834 	error = wm_get_swfwhw_semaphore(sc);
   10835 	if (error) {
   10836 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10837 		    __func__);
   10838 		return error;
   10839 	}
   10840 
   10841 	for (i = 0; i < words; i++) {
   10842 		/* The NVM part needs a byte offset, hence * 2 */
   10843 		act_offset = bank_offset + ((offset + i) * 2);
   10844 		/* but we must read dword aligned, so mask ... */
   10845 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   10846 		if (error) {
   10847 			aprint_error_dev(sc->sc_dev,
   10848 			    "%s: failed to read NVM\n", __func__);
   10849 			break;
   10850 		}
   10851 		/* ... and pick out low or high word */
   10852 		if ((act_offset & 0x2) == 0)
   10853 			data[i] = (uint16_t)(dword & 0xFFFF);
   10854 		else
   10855 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   10856 	}
   10857 
   10858 	wm_put_swfwhw_semaphore(sc);
   10859 	return error;
   10860 }
   10861 
   10862 /* iNVM */
   10863 
   10864 static int
   10865 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   10866 {
   10867 	int32_t  rv = 0;
   10868 	uint32_t invm_dword;
   10869 	uint16_t i;
   10870 	uint8_t record_type, word_address;
   10871 
   10872 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10873 		device_xname(sc->sc_dev), __func__));
   10874 
   10875 	for (i = 0; i < INVM_SIZE; i++) {
   10876 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   10877 		/* Get record type */
   10878 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   10879 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   10880 			break;
   10881 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   10882 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   10883 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   10884 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   10885 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   10886 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   10887 			if (word_address == address) {
   10888 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   10889 				rv = 0;
   10890 				break;
   10891 			}
   10892 		}
   10893 	}
   10894 
   10895 	return rv;
   10896 }
   10897 
   10898 static int
   10899 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10900 {
   10901 	int rv = 0;
   10902 	int i;
   10903 
   10904 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10905 		device_xname(sc->sc_dev), __func__));
   10906 
   10907 	for (i = 0; i < words; i++) {
   10908 		switch (offset + i) {
   10909 		case NVM_OFF_MACADDR:
   10910 		case NVM_OFF_MACADDR1:
   10911 		case NVM_OFF_MACADDR2:
   10912 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   10913 			if (rv != 0) {
   10914 				data[i] = 0xffff;
   10915 				rv = -1;
   10916 			}
   10917 			break;
   10918 		case NVM_OFF_CFG2:
   10919 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10920 			if (rv != 0) {
   10921 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   10922 				rv = 0;
   10923 			}
   10924 			break;
   10925 		case NVM_OFF_CFG4:
   10926 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10927 			if (rv != 0) {
   10928 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   10929 				rv = 0;
   10930 			}
   10931 			break;
   10932 		case NVM_OFF_LED_1_CFG:
   10933 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10934 			if (rv != 0) {
   10935 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   10936 				rv = 0;
   10937 			}
   10938 			break;
   10939 		case NVM_OFF_LED_0_2_CFG:
   10940 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10941 			if (rv != 0) {
   10942 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   10943 				rv = 0;
   10944 			}
   10945 			break;
   10946 		case NVM_OFF_ID_LED_SETTINGS:
   10947 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10948 			if (rv != 0) {
   10949 				*data = ID_LED_RESERVED_FFFF;
   10950 				rv = 0;
   10951 			}
   10952 			break;
   10953 		default:
   10954 			DPRINTF(WM_DEBUG_NVM,
   10955 			    ("NVM word 0x%02x is not mapped.\n", offset));
   10956 			*data = NVM_RESERVED_WORD;
   10957 			break;
   10958 		}
   10959 	}
   10960 
   10961 	return rv;
   10962 }
   10963 
   10964 /* Lock, detecting NVM type, validate checksum, version and read */
   10965 
   10966 /*
   10967  * wm_nvm_acquire:
   10968  *
   10969  *	Perform the EEPROM handshake required on some chips.
   10970  */
   10971 static int
   10972 wm_nvm_acquire(struct wm_softc *sc)
   10973 {
   10974 	uint32_t reg;
   10975 	int x;
   10976 	int ret = 0;
   10977 
   10978 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10979 		device_xname(sc->sc_dev), __func__));
   10980 
   10981 	if (sc->sc_type >= WM_T_ICH8) {
   10982 		ret = wm_get_nvm_ich8lan(sc);
   10983 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   10984 		ret = wm_get_swfwhw_semaphore(sc);
   10985 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   10986 		/* This will also do wm_get_swsm_semaphore() if needed */
   10987 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   10988 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10989 		ret = wm_get_swsm_semaphore(sc);
   10990 	}
   10991 
   10992 	if (ret) {
   10993 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10994 			__func__);
   10995 		return 1;
   10996 	}
   10997 
   10998 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10999 		reg = CSR_READ(sc, WMREG_EECD);
   11000 
   11001 		/* Request EEPROM access. */
   11002 		reg |= EECD_EE_REQ;
   11003 		CSR_WRITE(sc, WMREG_EECD, reg);
   11004 
   11005 		/* ..and wait for it to be granted. */
   11006 		for (x = 0; x < 1000; x++) {
   11007 			reg = CSR_READ(sc, WMREG_EECD);
   11008 			if (reg & EECD_EE_GNT)
   11009 				break;
   11010 			delay(5);
   11011 		}
   11012 		if ((reg & EECD_EE_GNT) == 0) {
   11013 			aprint_error_dev(sc->sc_dev,
   11014 			    "could not acquire EEPROM GNT\n");
   11015 			reg &= ~EECD_EE_REQ;
   11016 			CSR_WRITE(sc, WMREG_EECD, reg);
   11017 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11018 				wm_put_swfwhw_semaphore(sc);
   11019 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   11020 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11021 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11022 				wm_put_swsm_semaphore(sc);
   11023 			return 1;
   11024 		}
   11025 	}
   11026 
   11027 	return 0;
   11028 }
   11029 
   11030 /*
   11031  * wm_nvm_release:
   11032  *
   11033  *	Release the EEPROM mutex.
   11034  */
   11035 static void
   11036 wm_nvm_release(struct wm_softc *sc)
   11037 {
   11038 	uint32_t reg;
   11039 
   11040 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11041 		device_xname(sc->sc_dev), __func__));
   11042 
   11043 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11044 		reg = CSR_READ(sc, WMREG_EECD);
   11045 		reg &= ~EECD_EE_REQ;
   11046 		CSR_WRITE(sc, WMREG_EECD, reg);
   11047 	}
   11048 
   11049 	if (sc->sc_type >= WM_T_ICH8) {
   11050 		wm_put_nvm_ich8lan(sc);
   11051 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11052 		wm_put_swfwhw_semaphore(sc);
   11053 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   11054 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11055 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11056 		wm_put_swsm_semaphore(sc);
   11057 }
   11058 
   11059 static int
   11060 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   11061 {
   11062 	uint32_t eecd = 0;
   11063 
   11064 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   11065 	    || sc->sc_type == WM_T_82583) {
   11066 		eecd = CSR_READ(sc, WMREG_EECD);
   11067 
   11068 		/* Isolate bits 15 & 16 */
   11069 		eecd = ((eecd >> 15) & 0x03);
   11070 
   11071 		/* If both bits are set, device is Flash type */
   11072 		if (eecd == 0x03)
   11073 			return 0;
   11074 	}
   11075 	return 1;
   11076 }
   11077 
   11078 static int
   11079 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   11080 {
   11081 	uint32_t eec;
   11082 
   11083 	eec = CSR_READ(sc, WMREG_EEC);
   11084 	if ((eec & EEC_FLASH_DETECTED) != 0)
   11085 		return 1;
   11086 
   11087 	return 0;
   11088 }
   11089 
   11090 /*
   11091  * wm_nvm_validate_checksum
   11092  *
   11093  * The checksum is defined as the sum of the first 64 (16 bit) words.
   11094  */
   11095 static int
   11096 wm_nvm_validate_checksum(struct wm_softc *sc)
   11097 {
   11098 	uint16_t checksum;
   11099 	uint16_t eeprom_data;
   11100 #ifdef WM_DEBUG
   11101 	uint16_t csum_wordaddr, valid_checksum;
   11102 #endif
   11103 	int i;
   11104 
   11105 	checksum = 0;
   11106 
   11107 	/* Don't check for I211 */
   11108 	if (sc->sc_type == WM_T_I211)
   11109 		return 0;
   11110 
   11111 #ifdef WM_DEBUG
   11112 	if (sc->sc_type == WM_T_PCH_LPT) {
   11113 		csum_wordaddr = NVM_OFF_COMPAT;
   11114 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   11115 	} else {
   11116 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   11117 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   11118 	}
   11119 
   11120 	/* Dump EEPROM image for debug */
   11121 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11122 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11123 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   11124 		/* XXX PCH_SPT? */
   11125 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   11126 		if ((eeprom_data & valid_checksum) == 0) {
   11127 			DPRINTF(WM_DEBUG_NVM,
   11128 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   11129 				device_xname(sc->sc_dev), eeprom_data,
   11130 				    valid_checksum));
   11131 		}
   11132 	}
   11133 
   11134 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   11135 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   11136 		for (i = 0; i < NVM_SIZE; i++) {
   11137 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11138 				printf("XXXX ");
   11139 			else
   11140 				printf("%04hx ", eeprom_data);
   11141 			if (i % 8 == 7)
   11142 				printf("\n");
   11143 		}
   11144 	}
   11145 
   11146 #endif /* WM_DEBUG */
   11147 
   11148 	for (i = 0; i < NVM_SIZE; i++) {
   11149 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11150 			return 1;
   11151 		checksum += eeprom_data;
   11152 	}
   11153 
   11154 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   11155 #ifdef WM_DEBUG
   11156 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   11157 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   11158 #endif
   11159 	}
   11160 
   11161 	return 0;
   11162 }
   11163 
   11164 static void
   11165 wm_nvm_version_invm(struct wm_softc *sc)
   11166 {
   11167 	uint32_t dword;
   11168 
   11169 	/*
   11170 	 * Linux's code to decode version is very strange, so we don't
   11171 	 * obey that algorithm and just use word 61 as the document.
   11172 	 * Perhaps it's not perfect though...
   11173 	 *
   11174 	 * Example:
   11175 	 *
   11176 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   11177 	 */
   11178 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   11179 	dword = __SHIFTOUT(dword, INVM_VER_1);
   11180 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   11181 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   11182 }
   11183 
   11184 static void
   11185 wm_nvm_version(struct wm_softc *sc)
   11186 {
   11187 	uint16_t major, minor, build, patch;
   11188 	uint16_t uid0, uid1;
   11189 	uint16_t nvm_data;
   11190 	uint16_t off;
   11191 	bool check_version = false;
   11192 	bool check_optionrom = false;
   11193 	bool have_build = false;
   11194 
   11195 	/*
   11196 	 * Version format:
   11197 	 *
   11198 	 * XYYZ
   11199 	 * X0YZ
   11200 	 * X0YY
   11201 	 *
   11202 	 * Example:
   11203 	 *
   11204 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   11205 	 *	82571	0x50a6	5.10.6?
   11206 	 *	82572	0x506a	5.6.10?
   11207 	 *	82572EI	0x5069	5.6.9?
   11208 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   11209 	 *		0x2013	2.1.3?
   11210 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   11211 	 */
   11212 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   11213 	switch (sc->sc_type) {
   11214 	case WM_T_82571:
   11215 	case WM_T_82572:
   11216 	case WM_T_82574:
   11217 	case WM_T_82583:
   11218 		check_version = true;
   11219 		check_optionrom = true;
   11220 		have_build = true;
   11221 		break;
   11222 	case WM_T_82575:
   11223 	case WM_T_82576:
   11224 	case WM_T_82580:
   11225 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   11226 			check_version = true;
   11227 		break;
   11228 	case WM_T_I211:
   11229 		wm_nvm_version_invm(sc);
   11230 		goto printver;
   11231 	case WM_T_I210:
   11232 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   11233 			wm_nvm_version_invm(sc);
   11234 			goto printver;
   11235 		}
   11236 		/* FALLTHROUGH */
   11237 	case WM_T_I350:
   11238 	case WM_T_I354:
   11239 		check_version = true;
   11240 		check_optionrom = true;
   11241 		break;
   11242 	default:
   11243 		return;
   11244 	}
   11245 	if (check_version) {
   11246 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   11247 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   11248 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   11249 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   11250 			build = nvm_data & NVM_BUILD_MASK;
   11251 			have_build = true;
   11252 		} else
   11253 			minor = nvm_data & 0x00ff;
   11254 
   11255 		/* Decimal */
   11256 		minor = (minor / 16) * 10 + (minor % 16);
   11257 		sc->sc_nvm_ver_major = major;
   11258 		sc->sc_nvm_ver_minor = minor;
   11259 
   11260 printver:
   11261 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   11262 		    sc->sc_nvm_ver_minor);
   11263 		if (have_build) {
   11264 			sc->sc_nvm_ver_build = build;
   11265 			aprint_verbose(".%d", build);
   11266 		}
   11267 	}
   11268 	if (check_optionrom) {
   11269 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   11270 		/* Option ROM Version */
   11271 		if ((off != 0x0000) && (off != 0xffff)) {
   11272 			off += NVM_COMBO_VER_OFF;
   11273 			wm_nvm_read(sc, off + 1, 1, &uid1);
   11274 			wm_nvm_read(sc, off, 1, &uid0);
   11275 			if ((uid0 != 0) && (uid0 != 0xffff)
   11276 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   11277 				/* 16bits */
   11278 				major = uid0 >> 8;
   11279 				build = (uid0 << 8) | (uid1 >> 8);
   11280 				patch = uid1 & 0x00ff;
   11281 				aprint_verbose(", option ROM Version %d.%d.%d",
   11282 				    major, build, patch);
   11283 			}
   11284 		}
   11285 	}
   11286 
   11287 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   11288 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   11289 }
   11290 
   11291 /*
   11292  * wm_nvm_read:
   11293  *
   11294  *	Read data from the serial EEPROM.
   11295  */
   11296 static int
   11297 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11298 {
   11299 	int rv;
   11300 
   11301 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11302 		device_xname(sc->sc_dev), __func__));
   11303 
   11304 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   11305 		return 1;
   11306 
   11307 	if (wm_nvm_acquire(sc))
   11308 		return 1;
   11309 
   11310 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11311 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11312 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   11313 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   11314 	else if (sc->sc_type == WM_T_PCH_SPT)
   11315 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11316 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11317 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11318 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11319 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11320 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11321 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11322 	else
   11323 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11324 
   11325 	wm_nvm_release(sc);
   11326 	return rv;
   11327 }
   11328 
   11329 /*
   11330  * Hardware semaphores.
   11331  * Very complexed...
   11332  */
   11333 
   11334 static int
   11335 wm_get_null(struct wm_softc *sc)
   11336 {
   11337 
   11338 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11339 		device_xname(sc->sc_dev), __func__));
   11340 	return 0;
   11341 }
   11342 
   11343 static void
   11344 wm_put_null(struct wm_softc *sc)
   11345 {
   11346 
   11347 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11348 		device_xname(sc->sc_dev), __func__));
   11349 	return;
   11350 }
   11351 
   11352 /*
   11353  * Get hardware semaphore.
   11354  * Same as e1000_get_hw_semaphore_generic()
   11355  */
   11356 static int
   11357 wm_get_swsm_semaphore(struct wm_softc *sc)
   11358 {
   11359 	int32_t timeout;
   11360 	uint32_t swsm;
   11361 
   11362 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11363 		device_xname(sc->sc_dev), __func__));
   11364 	KASSERT(sc->sc_nvm_wordsize > 0);
   11365 
   11366 	/* Get the SW semaphore. */
   11367 	timeout = sc->sc_nvm_wordsize + 1;
   11368 	while (timeout) {
   11369 		swsm = CSR_READ(sc, WMREG_SWSM);
   11370 
   11371 		if ((swsm & SWSM_SMBI) == 0)
   11372 			break;
   11373 
   11374 		delay(50);
   11375 		timeout--;
   11376 	}
   11377 
   11378 	if (timeout == 0) {
   11379 		aprint_error_dev(sc->sc_dev,
   11380 		    "could not acquire SWSM SMBI\n");
   11381 		return 1;
   11382 	}
   11383 
   11384 	/* Get the FW semaphore. */
   11385 	timeout = sc->sc_nvm_wordsize + 1;
   11386 	while (timeout) {
   11387 		swsm = CSR_READ(sc, WMREG_SWSM);
   11388 		swsm |= SWSM_SWESMBI;
   11389 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   11390 		/* If we managed to set the bit we got the semaphore. */
   11391 		swsm = CSR_READ(sc, WMREG_SWSM);
   11392 		if (swsm & SWSM_SWESMBI)
   11393 			break;
   11394 
   11395 		delay(50);
   11396 		timeout--;
   11397 	}
   11398 
   11399 	if (timeout == 0) {
   11400 		aprint_error_dev(sc->sc_dev,
   11401 		    "could not acquire SWSM SWESMBI\n");
   11402 		/* Release semaphores */
   11403 		wm_put_swsm_semaphore(sc);
   11404 		return 1;
   11405 	}
   11406 	return 0;
   11407 }
   11408 
   11409 /*
   11410  * Put hardware semaphore.
   11411  * Same as e1000_put_hw_semaphore_generic()
   11412  */
   11413 static void
   11414 wm_put_swsm_semaphore(struct wm_softc *sc)
   11415 {
   11416 	uint32_t swsm;
   11417 
   11418 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11419 		device_xname(sc->sc_dev), __func__));
   11420 
   11421 	swsm = CSR_READ(sc, WMREG_SWSM);
   11422 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   11423 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   11424 }
   11425 
   11426 /*
   11427  * Get SW/FW semaphore.
   11428  * Same as e1000_acquire_swfw_sync_82575().
   11429  */
   11430 static int
   11431 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11432 {
   11433 	uint32_t swfw_sync;
   11434 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   11435 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   11436 	int timeout = 200;
   11437 
   11438 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11439 		device_xname(sc->sc_dev), __func__));
   11440 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11441 
   11442 	for (timeout = 0; timeout < 200; timeout++) {
   11443 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11444 			if (wm_get_swsm_semaphore(sc)) {
   11445 				aprint_error_dev(sc->sc_dev,
   11446 				    "%s: failed to get semaphore\n",
   11447 				    __func__);
   11448 				return 1;
   11449 			}
   11450 		}
   11451 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11452 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   11453 			swfw_sync |= swmask;
   11454 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11455 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   11456 				wm_put_swsm_semaphore(sc);
   11457 			return 0;
   11458 		}
   11459 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   11460 			wm_put_swsm_semaphore(sc);
   11461 		delay(5000);
   11462 	}
   11463 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   11464 	    device_xname(sc->sc_dev), mask, swfw_sync);
   11465 	return 1;
   11466 }
   11467 
   11468 static void
   11469 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11470 {
   11471 	uint32_t swfw_sync;
   11472 
   11473 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11474 		device_xname(sc->sc_dev), __func__));
   11475 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11476 
   11477 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11478 		while (wm_get_swsm_semaphore(sc) != 0)
   11479 			continue;
   11480 	}
   11481 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11482 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   11483 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11484 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   11485 		wm_put_swsm_semaphore(sc);
   11486 }
   11487 
   11488 static int
   11489 wm_get_phy_82575(struct wm_softc *sc)
   11490 {
   11491 
   11492 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11493 		device_xname(sc->sc_dev), __func__));
   11494 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11495 }
   11496 
   11497 static void
   11498 wm_put_phy_82575(struct wm_softc *sc)
   11499 {
   11500 
   11501 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11502 		device_xname(sc->sc_dev), __func__));
   11503 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11504 }
   11505 
   11506 static int
   11507 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   11508 {
   11509 	uint32_t ext_ctrl;
   11510 	int timeout = 200;
   11511 
   11512 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11513 		device_xname(sc->sc_dev), __func__));
   11514 
   11515 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11516 	for (timeout = 0; timeout < 200; timeout++) {
   11517 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11518 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11519 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11520 
   11521 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11522 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11523 			return 0;
   11524 		delay(5000);
   11525 	}
   11526 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   11527 	    device_xname(sc->sc_dev), ext_ctrl);
   11528 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11529 	return 1;
   11530 }
   11531 
   11532 static void
   11533 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   11534 {
   11535 	uint32_t ext_ctrl;
   11536 
   11537 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11538 		device_xname(sc->sc_dev), __func__));
   11539 
   11540 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11541 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11542 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11543 
   11544 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11545 }
   11546 
   11547 static int
   11548 wm_get_swflag_ich8lan(struct wm_softc *sc)
   11549 {
   11550 	uint32_t ext_ctrl;
   11551 	int timeout;
   11552 
   11553 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11554 		device_xname(sc->sc_dev), __func__));
   11555 	mutex_enter(sc->sc_ich_phymtx);
   11556 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   11557 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11558 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   11559 			break;
   11560 		delay(1000);
   11561 	}
   11562 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   11563 		printf("%s: SW has already locked the resource\n",
   11564 		    device_xname(sc->sc_dev));
   11565 		goto out;
   11566 	}
   11567 
   11568 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11569 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11570 	for (timeout = 0; timeout < 1000; timeout++) {
   11571 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11572 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11573 			break;
   11574 		delay(1000);
   11575 	}
   11576 	if (timeout >= 1000) {
   11577 		printf("%s: failed to acquire semaphore\n",
   11578 		    device_xname(sc->sc_dev));
   11579 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11580 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11581 		goto out;
   11582 	}
   11583 	return 0;
   11584 
   11585 out:
   11586 	mutex_exit(sc->sc_ich_phymtx);
   11587 	return 1;
   11588 }
   11589 
   11590 static void
   11591 wm_put_swflag_ich8lan(struct wm_softc *sc)
   11592 {
   11593 	uint32_t ext_ctrl;
   11594 
   11595 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11596 		device_xname(sc->sc_dev), __func__));
   11597 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11598 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   11599 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11600 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11601 	} else {
   11602 		printf("%s: Semaphore unexpectedly released\n",
   11603 		    device_xname(sc->sc_dev));
   11604 	}
   11605 
   11606 	mutex_exit(sc->sc_ich_phymtx);
   11607 }
   11608 
   11609 static int
   11610 wm_get_nvm_ich8lan(struct wm_softc *sc)
   11611 {
   11612 
   11613 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11614 		device_xname(sc->sc_dev), __func__));
   11615 	mutex_enter(sc->sc_ich_nvmmtx);
   11616 
   11617 	return 0;
   11618 }
   11619 
   11620 static void
   11621 wm_put_nvm_ich8lan(struct wm_softc *sc)
   11622 {
   11623 
   11624 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11625 		device_xname(sc->sc_dev), __func__));
   11626 	mutex_exit(sc->sc_ich_nvmmtx);
   11627 }
   11628 
   11629 static int
   11630 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   11631 {
   11632 	int i = 0;
   11633 	uint32_t reg;
   11634 
   11635 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11636 		device_xname(sc->sc_dev), __func__));
   11637 
   11638 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11639 	do {
   11640 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   11641 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   11642 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11643 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   11644 			break;
   11645 		delay(2*1000);
   11646 		i++;
   11647 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   11648 
   11649 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   11650 		wm_put_hw_semaphore_82573(sc);
   11651 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   11652 		    device_xname(sc->sc_dev));
   11653 		return -1;
   11654 	}
   11655 
   11656 	return 0;
   11657 }
   11658 
   11659 static void
   11660 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   11661 {
   11662 	uint32_t reg;
   11663 
   11664 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11665 		device_xname(sc->sc_dev), __func__));
   11666 
   11667 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11668 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11669 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11670 }
   11671 
   11672 /*
   11673  * Management mode and power management related subroutines.
   11674  * BMC, AMT, suspend/resume and EEE.
   11675  */
   11676 
   11677 #ifdef WM_WOL
   11678 static int
   11679 wm_check_mng_mode(struct wm_softc *sc)
   11680 {
   11681 	int rv;
   11682 
   11683 	switch (sc->sc_type) {
   11684 	case WM_T_ICH8:
   11685 	case WM_T_ICH9:
   11686 	case WM_T_ICH10:
   11687 	case WM_T_PCH:
   11688 	case WM_T_PCH2:
   11689 	case WM_T_PCH_LPT:
   11690 	case WM_T_PCH_SPT:
   11691 		rv = wm_check_mng_mode_ich8lan(sc);
   11692 		break;
   11693 	case WM_T_82574:
   11694 	case WM_T_82583:
   11695 		rv = wm_check_mng_mode_82574(sc);
   11696 		break;
   11697 	case WM_T_82571:
   11698 	case WM_T_82572:
   11699 	case WM_T_82573:
   11700 	case WM_T_80003:
   11701 		rv = wm_check_mng_mode_generic(sc);
   11702 		break;
   11703 	default:
   11704 		/* noting to do */
   11705 		rv = 0;
   11706 		break;
   11707 	}
   11708 
   11709 	return rv;
   11710 }
   11711 
   11712 static int
   11713 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   11714 {
   11715 	uint32_t fwsm;
   11716 
   11717 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11718 
   11719 	if (((fwsm & FWSM_FW_VALID) != 0)
   11720 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11721 		return 1;
   11722 
   11723 	return 0;
   11724 }
   11725 
   11726 static int
   11727 wm_check_mng_mode_82574(struct wm_softc *sc)
   11728 {
   11729 	uint16_t data;
   11730 
   11731 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11732 
   11733 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   11734 		return 1;
   11735 
   11736 	return 0;
   11737 }
   11738 
   11739 static int
   11740 wm_check_mng_mode_generic(struct wm_softc *sc)
   11741 {
   11742 	uint32_t fwsm;
   11743 
   11744 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11745 
   11746 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   11747 		return 1;
   11748 
   11749 	return 0;
   11750 }
   11751 #endif /* WM_WOL */
   11752 
   11753 static int
   11754 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11755 {
   11756 	uint32_t manc, fwsm, factps;
   11757 
   11758 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11759 		return 0;
   11760 
   11761 	manc = CSR_READ(sc, WMREG_MANC);
   11762 
   11763 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11764 		device_xname(sc->sc_dev), manc));
   11765 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11766 		return 0;
   11767 
   11768 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11769 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11770 		factps = CSR_READ(sc, WMREG_FACTPS);
   11771 		if (((factps & FACTPS_MNGCG) == 0)
   11772 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11773 			return 1;
   11774 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11775 		uint16_t data;
   11776 
   11777 		factps = CSR_READ(sc, WMREG_FACTPS);
   11778 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11779 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11780 			device_xname(sc->sc_dev), factps, data));
   11781 		if (((factps & FACTPS_MNGCG) == 0)
   11782 		    && ((data & NVM_CFG2_MNGM_MASK)
   11783 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11784 			return 1;
   11785 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11786 	    && ((manc & MANC_ASF_EN) == 0))
   11787 		return 1;
   11788 
   11789 	return 0;
   11790 }
   11791 
   11792 static bool
   11793 wm_phy_resetisblocked(struct wm_softc *sc)
   11794 {
   11795 	bool blocked = false;
   11796 	uint32_t reg;
   11797 	int i = 0;
   11798 
   11799 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11800 		device_xname(sc->sc_dev), __func__));
   11801 
   11802 	switch (sc->sc_type) {
   11803 	case WM_T_ICH8:
   11804 	case WM_T_ICH9:
   11805 	case WM_T_ICH10:
   11806 	case WM_T_PCH:
   11807 	case WM_T_PCH2:
   11808 	case WM_T_PCH_LPT:
   11809 	case WM_T_PCH_SPT:
   11810 		do {
   11811 			reg = CSR_READ(sc, WMREG_FWSM);
   11812 			if ((reg & FWSM_RSPCIPHY) == 0) {
   11813 				blocked = true;
   11814 				delay(10*1000);
   11815 				continue;
   11816 			}
   11817 			blocked = false;
   11818 		} while (blocked && (i++ < 30));
   11819 		return blocked;
   11820 		break;
   11821 	case WM_T_82571:
   11822 	case WM_T_82572:
   11823 	case WM_T_82573:
   11824 	case WM_T_82574:
   11825 	case WM_T_82583:
   11826 	case WM_T_80003:
   11827 		reg = CSR_READ(sc, WMREG_MANC);
   11828 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   11829 			return true;
   11830 		else
   11831 			return false;
   11832 		break;
   11833 	default:
   11834 		/* no problem */
   11835 		break;
   11836 	}
   11837 
   11838 	return false;
   11839 }
   11840 
   11841 static void
   11842 wm_get_hw_control(struct wm_softc *sc)
   11843 {
   11844 	uint32_t reg;
   11845 
   11846 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11847 		device_xname(sc->sc_dev), __func__));
   11848 
   11849 	switch (sc->sc_type) {
   11850 	case WM_T_82573:
   11851 		reg = CSR_READ(sc, WMREG_SWSM);
   11852 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   11853 		break;
   11854 	case WM_T_82571:
   11855 	case WM_T_82572:
   11856 	case WM_T_82574:
   11857 	case WM_T_82583:
   11858 	case WM_T_80003:
   11859 	case WM_T_ICH8:
   11860 	case WM_T_ICH9:
   11861 	case WM_T_ICH10:
   11862 	case WM_T_PCH:
   11863 	case WM_T_PCH2:
   11864 	case WM_T_PCH_LPT:
   11865 	case WM_T_PCH_SPT:
   11866 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11867 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   11868 		break;
   11869 	default:
   11870 		break;
   11871 	}
   11872 }
   11873 
   11874 static void
   11875 wm_release_hw_control(struct wm_softc *sc)
   11876 {
   11877 	uint32_t reg;
   11878 
   11879 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11880 		device_xname(sc->sc_dev), __func__));
   11881 
   11882 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   11883 		return;
   11884 
   11885 	if (sc->sc_type == WM_T_82573) {
   11886 		reg = CSR_READ(sc, WMREG_SWSM);
   11887 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   11888 	} else {
   11889 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11890 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   11891 	}
   11892 }
   11893 
   11894 static void
   11895 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   11896 {
   11897 	uint32_t reg;
   11898 
   11899 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11900 		device_xname(sc->sc_dev), __func__));
   11901 
   11902 	if (sc->sc_type < WM_T_PCH2)
   11903 		return;
   11904 
   11905 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11906 
   11907 	if (gate)
   11908 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   11909 	else
   11910 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   11911 
   11912 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11913 }
   11914 
   11915 static void
   11916 wm_smbustopci(struct wm_softc *sc)
   11917 {
   11918 	uint32_t fwsm, reg;
   11919 
   11920 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11921 		device_xname(sc->sc_dev), __func__));
   11922 
   11923 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   11924 	wm_gate_hw_phy_config_ich8lan(sc, true);
   11925 
   11926 	/* Acquire PHY semaphore */
   11927 	sc->phy.acquire(sc);
   11928 
   11929 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11930 	if (((fwsm & FWSM_FW_VALID) == 0)
   11931 	    && ((wm_phy_resetisblocked(sc) == false))) {
   11932 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11933 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11934 			reg |= CTRL_EXT_FORCE_SMBUS;
   11935 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11936 			CSR_WRITE_FLUSH(sc);
   11937 			delay(50*1000);
   11938 		}
   11939 
   11940 		/* Toggle LANPHYPC */
   11941 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   11942 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   11943 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11944 		CSR_WRITE_FLUSH(sc);
   11945 		delay(1000);
   11946 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   11947 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11948 		CSR_WRITE_FLUSH(sc);
   11949 		delay(50*1000);
   11950 
   11951 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11952 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11953 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   11954 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11955 		}
   11956 	}
   11957 
   11958 	/* Release semaphore */
   11959 	sc->phy.release(sc);
   11960 
   11961 	/*
   11962 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   11963 	 */
   11964 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0))
   11965 		wm_gate_hw_phy_config_ich8lan(sc, false);
   11966 }
   11967 
   11968 static void
   11969 wm_init_manageability(struct wm_softc *sc)
   11970 {
   11971 
   11972 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11973 		device_xname(sc->sc_dev), __func__));
   11974 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11975 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   11976 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11977 
   11978 		/* Disable hardware interception of ARP */
   11979 		manc &= ~MANC_ARP_EN;
   11980 
   11981 		/* Enable receiving management packets to the host */
   11982 		if (sc->sc_type >= WM_T_82571) {
   11983 			manc |= MANC_EN_MNG2HOST;
   11984 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   11985 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   11986 		}
   11987 
   11988 		CSR_WRITE(sc, WMREG_MANC, manc);
   11989 	}
   11990 }
   11991 
   11992 static void
   11993 wm_release_manageability(struct wm_softc *sc)
   11994 {
   11995 
   11996 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11997 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11998 
   11999 		manc |= MANC_ARP_EN;
   12000 		if (sc->sc_type >= WM_T_82571)
   12001 			manc &= ~MANC_EN_MNG2HOST;
   12002 
   12003 		CSR_WRITE(sc, WMREG_MANC, manc);
   12004 	}
   12005 }
   12006 
   12007 static void
   12008 wm_get_wakeup(struct wm_softc *sc)
   12009 {
   12010 
   12011 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   12012 	switch (sc->sc_type) {
   12013 	case WM_T_82573:
   12014 	case WM_T_82583:
   12015 		sc->sc_flags |= WM_F_HAS_AMT;
   12016 		/* FALLTHROUGH */
   12017 	case WM_T_80003:
   12018 	case WM_T_82541:
   12019 	case WM_T_82547:
   12020 	case WM_T_82571:
   12021 	case WM_T_82572:
   12022 	case WM_T_82574:
   12023 	case WM_T_82575:
   12024 	case WM_T_82576:
   12025 	case WM_T_82580:
   12026 	case WM_T_I350:
   12027 	case WM_T_I354:
   12028 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   12029 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   12030 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12031 		break;
   12032 	case WM_T_ICH8:
   12033 	case WM_T_ICH9:
   12034 	case WM_T_ICH10:
   12035 	case WM_T_PCH:
   12036 	case WM_T_PCH2:
   12037 	case WM_T_PCH_LPT:
   12038 	case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
   12039 		sc->sc_flags |= WM_F_HAS_AMT;
   12040 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12041 		break;
   12042 	default:
   12043 		break;
   12044 	}
   12045 
   12046 	/* 1: HAS_MANAGE */
   12047 	if (wm_enable_mng_pass_thru(sc) != 0)
   12048 		sc->sc_flags |= WM_F_HAS_MANAGE;
   12049 
   12050 #ifdef WM_DEBUG
   12051 	printf("\n");
   12052 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   12053 		printf("HAS_AMT,");
   12054 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   12055 		printf("ARC_SUBSYS_VALID,");
   12056 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   12057 		printf("ASF_FIRMWARE_PRES,");
   12058 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   12059 		printf("HAS_MANAGE,");
   12060 	printf("\n");
   12061 #endif
   12062 	/*
   12063 	 * Note that the WOL flags is set after the resetting of the eeprom
   12064 	 * stuff
   12065 	 */
   12066 }
   12067 
   12068 /* WOL in the newer chipset interfaces (pchlan) */
   12069 static void
   12070 wm_enable_phy_wakeup(struct wm_softc *sc)
   12071 {
   12072 #if 0
   12073 	uint16_t preg;
   12074 
   12075 	/* Copy MAC RARs to PHY RARs */
   12076 
   12077 	/* Copy MAC MTA to PHY MTA */
   12078 
   12079 	/* Configure PHY Rx Control register */
   12080 
   12081 	/* Enable PHY wakeup in MAC register */
   12082 
   12083 	/* Configure and enable PHY wakeup in PHY registers */
   12084 
   12085 	/* Activate PHY wakeup */
   12086 
   12087 	/* XXX */
   12088 #endif
   12089 }
   12090 
   12091 /* Power down workaround on D3 */
   12092 static void
   12093 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   12094 {
   12095 	uint32_t reg;
   12096 	int i;
   12097 
   12098 	for (i = 0; i < 2; i++) {
   12099 		/* Disable link */
   12100 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12101 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12102 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12103 
   12104 		/*
   12105 		 * Call gig speed drop workaround on Gig disable before
   12106 		 * accessing any PHY registers
   12107 		 */
   12108 		if (sc->sc_type == WM_T_ICH8)
   12109 			wm_gig_downshift_workaround_ich8lan(sc);
   12110 
   12111 		/* Write VR power-down enable */
   12112 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12113 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12114 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   12115 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   12116 
   12117 		/* Read it back and test */
   12118 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12119 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12120 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   12121 			break;
   12122 
   12123 		/* Issue PHY reset and repeat at most one more time */
   12124 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   12125 	}
   12126 }
   12127 
   12128 static void
   12129 wm_enable_wakeup(struct wm_softc *sc)
   12130 {
   12131 	uint32_t reg, pmreg;
   12132 	pcireg_t pmode;
   12133 
   12134 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12135 		device_xname(sc->sc_dev), __func__));
   12136 
   12137 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12138 		&pmreg, NULL) == 0)
   12139 		return;
   12140 
   12141 	/* Advertise the wakeup capability */
   12142 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   12143 	    | CTRL_SWDPIN(3));
   12144 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   12145 
   12146 	/* ICH workaround */
   12147 	switch (sc->sc_type) {
   12148 	case WM_T_ICH8:
   12149 	case WM_T_ICH9:
   12150 	case WM_T_ICH10:
   12151 	case WM_T_PCH:
   12152 	case WM_T_PCH2:
   12153 	case WM_T_PCH_LPT:
   12154 	case WM_T_PCH_SPT:
   12155 		/* Disable gig during WOL */
   12156 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12157 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   12158 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12159 		if (sc->sc_type == WM_T_PCH)
   12160 			wm_gmii_reset(sc);
   12161 
   12162 		/* Power down workaround */
   12163 		if (sc->sc_phytype == WMPHY_82577) {
   12164 			struct mii_softc *child;
   12165 
   12166 			/* Assume that the PHY is copper */
   12167 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12168 			if (child->mii_mpd_rev <= 2)
   12169 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   12170 				    (768 << 5) | 25, 0x0444); /* magic num */
   12171 		}
   12172 		break;
   12173 	default:
   12174 		break;
   12175 	}
   12176 
   12177 	/* Keep the laser running on fiber adapters */
   12178 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   12179 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12180 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12181 		reg |= CTRL_EXT_SWDPIN(3);
   12182 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12183 	}
   12184 
   12185 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   12186 #if 0	/* for the multicast packet */
   12187 	reg |= WUFC_MC;
   12188 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   12189 #endif
   12190 
   12191 	if (sc->sc_type >= WM_T_PCH)
   12192 		wm_enable_phy_wakeup(sc);
   12193 	else {
   12194 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   12195 		CSR_WRITE(sc, WMREG_WUFC, reg);
   12196 	}
   12197 
   12198 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12199 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12200 		|| (sc->sc_type == WM_T_PCH2))
   12201 		    && (sc->sc_phytype == WMPHY_IGP_3))
   12202 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   12203 
   12204 	/* Request PME */
   12205 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   12206 #if 0
   12207 	/* Disable WOL */
   12208 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   12209 #else
   12210 	/* For WOL */
   12211 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   12212 #endif
   12213 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   12214 }
   12215 
   12216 /* LPLU */
   12217 
   12218 static void
   12219 wm_lplu_d0_disable(struct wm_softc *sc)
   12220 {
   12221 	uint32_t reg;
   12222 
   12223 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12224 		device_xname(sc->sc_dev), __func__));
   12225 
   12226 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12227 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   12228 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12229 }
   12230 
   12231 static void
   12232 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   12233 {
   12234 	uint32_t reg;
   12235 
   12236 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12237 		device_xname(sc->sc_dev), __func__));
   12238 
   12239 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   12240 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   12241 	reg |= HV_OEM_BITS_ANEGNOW;
   12242 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   12243 }
   12244 
   12245 /* EEE */
   12246 
   12247 static void
   12248 wm_set_eee_i350(struct wm_softc *sc)
   12249 {
   12250 	uint32_t ipcnfg, eeer;
   12251 
   12252 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   12253 	eeer = CSR_READ(sc, WMREG_EEER);
   12254 
   12255 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   12256 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12257 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12258 		    | EEER_LPI_FC);
   12259 	} else {
   12260 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12261 		ipcnfg &= ~IPCNFG_10BASE_TE;
   12262 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12263 		    | EEER_LPI_FC);
   12264 	}
   12265 
   12266 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   12267 	CSR_WRITE(sc, WMREG_EEER, eeer);
   12268 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   12269 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   12270 }
   12271 
   12272 /*
   12273  * Workarounds (mainly PHY related).
   12274  * Basically, PHY's workarounds are in the PHY drivers.
   12275  */
   12276 
   12277 /* Work-around for 82566 Kumeran PCS lock loss */
   12278 static void
   12279 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   12280 {
   12281 #if 0
   12282 	int miistatus, active, i;
   12283 	int reg;
   12284 
   12285 	miistatus = sc->sc_mii.mii_media_status;
   12286 
   12287 	/* If the link is not up, do nothing */
   12288 	if ((miistatus & IFM_ACTIVE) == 0)
   12289 		return;
   12290 
   12291 	active = sc->sc_mii.mii_media_active;
   12292 
   12293 	/* Nothing to do if the link is other than 1Gbps */
   12294 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   12295 		return;
   12296 
   12297 	for (i = 0; i < 10; i++) {
   12298 		/* read twice */
   12299 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12300 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12301 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   12302 			goto out;	/* GOOD! */
   12303 
   12304 		/* Reset the PHY */
   12305 		wm_gmii_reset(sc);
   12306 		delay(5*1000);
   12307 	}
   12308 
   12309 	/* Disable GigE link negotiation */
   12310 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12311 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12312 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12313 
   12314 	/*
   12315 	 * Call gig speed drop workaround on Gig disable before accessing
   12316 	 * any PHY registers.
   12317 	 */
   12318 	wm_gig_downshift_workaround_ich8lan(sc);
   12319 
   12320 out:
   12321 	return;
   12322 #endif
   12323 }
   12324 
   12325 /* WOL from S5 stops working */
   12326 static void
   12327 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   12328 {
   12329 	uint16_t kmrn_reg;
   12330 
   12331 	/* Only for igp3 */
   12332 	if (sc->sc_phytype == WMPHY_IGP_3) {
   12333 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   12334 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   12335 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12336 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   12337 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12338 	}
   12339 }
   12340 
   12341 /*
   12342  * Workaround for pch's PHYs
   12343  * XXX should be moved to new PHY driver?
   12344  */
   12345 static void
   12346 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   12347 {
   12348 
   12349 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12350 		device_xname(sc->sc_dev), __func__));
   12351 	KASSERT(sc->sc_type == WM_T_PCH);
   12352 
   12353 	if (sc->sc_phytype == WMPHY_82577)
   12354 		wm_set_mdio_slow_mode_hv(sc);
   12355 
   12356 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   12357 
   12358 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   12359 
   12360 	/* 82578 */
   12361 	if (sc->sc_phytype == WMPHY_82578) {
   12362 		struct mii_softc *child;
   12363 
   12364 		/*
   12365 		 * Return registers to default by doing a soft reset then
   12366 		 * writing 0x3140 to the control register
   12367 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   12368 		 */
   12369 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12370 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   12371 			PHY_RESET(child);
   12372 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   12373 			    0x3140);
   12374 		}
   12375 	}
   12376 
   12377 	/* Select page 0 */
   12378 	sc->phy.acquire(sc);
   12379 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   12380 	sc->phy.release(sc);
   12381 
   12382 	/*
   12383 	 * Configure the K1 Si workaround during phy reset assuming there is
   12384 	 * link so that it disables K1 if link is in 1Gbps.
   12385 	 */
   12386 	wm_k1_gig_workaround_hv(sc, 1);
   12387 }
   12388 
   12389 static void
   12390 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   12391 {
   12392 
   12393 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12394 		device_xname(sc->sc_dev), __func__));
   12395 	KASSERT(sc->sc_type == WM_T_PCH2);
   12396 
   12397 	wm_set_mdio_slow_mode_hv(sc);
   12398 }
   12399 
   12400 static int
   12401 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   12402 {
   12403 	int k1_enable = sc->sc_nvm_k1_enabled;
   12404 
   12405 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12406 		device_xname(sc->sc_dev), __func__));
   12407 
   12408 	if (sc->phy.acquire(sc) != 0)
   12409 		return -1;
   12410 
   12411 	if (link) {
   12412 		k1_enable = 0;
   12413 
   12414 		/* Link stall fix for link up */
   12415 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   12416 	} else {
   12417 		/* Link stall fix for link down */
   12418 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   12419 	}
   12420 
   12421 	wm_configure_k1_ich8lan(sc, k1_enable);
   12422 	sc->phy.release(sc);
   12423 
   12424 	return 0;
   12425 }
   12426 
   12427 static void
   12428 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   12429 {
   12430 	uint32_t reg;
   12431 
   12432 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   12433 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   12434 	    reg | HV_KMRN_MDIO_SLOW);
   12435 }
   12436 
   12437 static void
   12438 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   12439 {
   12440 	uint32_t ctrl, ctrl_ext, tmp;
   12441 	uint16_t kmrn_reg;
   12442 
   12443 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   12444 
   12445 	if (k1_enable)
   12446 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   12447 	else
   12448 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   12449 
   12450 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   12451 
   12452 	delay(20);
   12453 
   12454 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12455 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12456 
   12457 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   12458 	tmp |= CTRL_FRCSPD;
   12459 
   12460 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   12461 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   12462 	CSR_WRITE_FLUSH(sc);
   12463 	delay(20);
   12464 
   12465 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   12466 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12467 	CSR_WRITE_FLUSH(sc);
   12468 	delay(20);
   12469 }
   12470 
   12471 /* special case - for 82575 - need to do manual init ... */
   12472 static void
   12473 wm_reset_init_script_82575(struct wm_softc *sc)
   12474 {
   12475 	/*
   12476 	 * remark: this is untested code - we have no board without EEPROM
   12477 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   12478 	 */
   12479 
   12480 	/* SerDes configuration via SERDESCTRL */
   12481 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   12482 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   12483 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   12484 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   12485 
   12486 	/* CCM configuration via CCMCTL register */
   12487 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   12488 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   12489 
   12490 	/* PCIe lanes configuration */
   12491 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   12492 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   12493 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   12494 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   12495 
   12496 	/* PCIe PLL Configuration */
   12497 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   12498 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   12499 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   12500 }
   12501 
   12502 static void
   12503 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   12504 {
   12505 	uint32_t reg;
   12506 	uint16_t nvmword;
   12507 	int rv;
   12508 
   12509 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   12510 		return;
   12511 
   12512 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   12513 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   12514 	if (rv != 0) {
   12515 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   12516 		    __func__);
   12517 		return;
   12518 	}
   12519 
   12520 	reg = CSR_READ(sc, WMREG_MDICNFG);
   12521 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   12522 		reg |= MDICNFG_DEST;
   12523 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   12524 		reg |= MDICNFG_COM_MDIO;
   12525 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12526 }
   12527 
   12528 /*
   12529  * I210 Errata 25 and I211 Errata 10
   12530  * Slow System Clock.
   12531  */
   12532 static void
   12533 wm_pll_workaround_i210(struct wm_softc *sc)
   12534 {
   12535 	uint32_t mdicnfg, wuc;
   12536 	uint32_t reg;
   12537 	pcireg_t pcireg;
   12538 	uint32_t pmreg;
   12539 	uint16_t nvmword, tmp_nvmword;
   12540 	int phyval;
   12541 	bool wa_done = false;
   12542 	int i;
   12543 
   12544 	/* Save WUC and MDICNFG registers */
   12545 	wuc = CSR_READ(sc, WMREG_WUC);
   12546 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   12547 
   12548 	reg = mdicnfg & ~MDICNFG_DEST;
   12549 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12550 
   12551 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   12552 		nvmword = INVM_DEFAULT_AL;
   12553 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   12554 
   12555 	/* Get Power Management cap offset */
   12556 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12557 		&pmreg, NULL) == 0)
   12558 		return;
   12559 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   12560 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   12561 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   12562 
   12563 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   12564 			break; /* OK */
   12565 		}
   12566 
   12567 		wa_done = true;
   12568 		/* Directly reset the internal PHY */
   12569 		reg = CSR_READ(sc, WMREG_CTRL);
   12570 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   12571 
   12572 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12573 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   12574 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12575 
   12576 		CSR_WRITE(sc, WMREG_WUC, 0);
   12577 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   12578 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12579 
   12580 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   12581 		    pmreg + PCI_PMCSR);
   12582 		pcireg |= PCI_PMCSR_STATE_D3;
   12583 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12584 		    pmreg + PCI_PMCSR, pcireg);
   12585 		delay(1000);
   12586 		pcireg &= ~PCI_PMCSR_STATE_D3;
   12587 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12588 		    pmreg + PCI_PMCSR, pcireg);
   12589 
   12590 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   12591 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12592 
   12593 		/* Restore WUC register */
   12594 		CSR_WRITE(sc, WMREG_WUC, wuc);
   12595 	}
   12596 
   12597 	/* Restore MDICNFG setting */
   12598 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   12599 	if (wa_done)
   12600 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   12601 }
   12602