Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.434
      1 /*	$NetBSD: if_wm.c,v 1.434 2016/10/28 06:59:08 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Advanced Receive Descriptor
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.434 2016/10/28 06:59:08 msaitoh Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #endif
    165 
    166 /*
    167  * This device driver's max interrupt numbers.
    168  */
    169 #define WM_MAX_NQUEUEINTR	16
    170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    171 
    172 /*
    173  * Transmit descriptor list size.  Due to errata, we can only have
    174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    175  * on >= 82544.  We tell the upper layers that they can queue a lot
    176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    177  * of them at a time.
    178  *
    179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    180  * chains containing many small mbufs have been observed in zero-copy
    181  * situations with jumbo frames.
    182  */
    183 #define	WM_NTXSEGS		256
    184 #define	WM_IFQUEUELEN		256
    185 #define	WM_TXQUEUELEN_MAX	64
    186 #define	WM_TXQUEUELEN_MAX_82547	16
    187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    190 #define	WM_NTXDESC_82542	256
    191 #define	WM_NTXDESC_82544	4096
    192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    197 
    198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    199 
    200 #define	WM_TXINTERQSIZE		256
    201 
    202 /*
    203  * Receive descriptor list size.  We have one Rx buffer for normal
    204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    205  * packet.  We allocate 256 receive descriptors, each with a 2k
    206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    207  */
    208 #define	WM_NRXDESC		256
    209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    212 
    213 typedef union txdescs {
    214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    216 } txdescs_t;
    217 
    218 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    219 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    220 
    221 /*
    222  * Software state for transmit jobs.
    223  */
    224 struct wm_txsoft {
    225 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    226 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    227 	int txs_firstdesc;		/* first descriptor in packet */
    228 	int txs_lastdesc;		/* last descriptor in packet */
    229 	int txs_ndesc;			/* # of descriptors used */
    230 };
    231 
    232 /*
    233  * Software state for receive buffers.  Each descriptor gets a
    234  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    235  * more than one buffer, we chain them together.
    236  */
    237 struct wm_rxsoft {
    238 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    239 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    240 };
    241 
    242 #define WM_LINKUP_TIMEOUT	50
    243 
    244 static uint16_t swfwphysem[] = {
    245 	SWFW_PHY0_SM,
    246 	SWFW_PHY1_SM,
    247 	SWFW_PHY2_SM,
    248 	SWFW_PHY3_SM
    249 };
    250 
    251 static const uint32_t wm_82580_rxpbs_table[] = {
    252 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    253 };
    254 
    255 struct wm_softc;
    256 
    257 #ifdef WM_EVENT_COUNTERS
    258 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    259 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    260 	struct evcnt qname##_ev_##evname;
    261 
    262 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    263 	do{								\
    264 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    265 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    266 		    "%s%02d%s", #qname, (qnum), #evname);		\
    267 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    268 		    (evtype), NULL, (xname),				\
    269 		    (q)->qname##_##evname##_evcnt_name);		\
    270 	}while(0)
    271 
    272 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    273 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    274 
    275 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    276 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    277 #endif /* WM_EVENT_COUNTERS */
    278 
    279 struct wm_txqueue {
    280 	kmutex_t *txq_lock;		/* lock for tx operations */
    281 
    282 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    283 
    284 	/* Software state for the transmit descriptors. */
    285 	int txq_num;			/* must be a power of two */
    286 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    287 
    288 	/* TX control data structures. */
    289 	int txq_ndesc;			/* must be a power of two */
    290 	size_t txq_descsize;		/* a tx descriptor size */
    291 	txdescs_t *txq_descs_u;
    292         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    293 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    294 	int txq_desc_rseg;		/* real number of control segment */
    295 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    296 #define	txq_descs	txq_descs_u->sctxu_txdescs
    297 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    298 
    299 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    300 
    301 	int txq_free;			/* number of free Tx descriptors */
    302 	int txq_next;			/* next ready Tx descriptor */
    303 
    304 	int txq_sfree;			/* number of free Tx jobs */
    305 	int txq_snext;			/* next free Tx job */
    306 	int txq_sdirty;			/* dirty Tx jobs */
    307 
    308 	/* These 4 variables are used only on the 82547. */
    309 	int txq_fifo_size;		/* Tx FIFO size */
    310 	int txq_fifo_head;		/* current head of FIFO */
    311 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    312 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    313 
    314 	/*
    315 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    316 	 * CPUs. This queue intermediate them without block.
    317 	 */
    318 	pcq_t *txq_interq;
    319 
    320 	/*
    321 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    322 	 * to manage Tx H/W queue's busy flag.
    323 	 */
    324 	int txq_flags;			/* flags for H/W queue, see below */
    325 #define	WM_TXQ_NO_SPACE	0x1
    326 
    327 	bool txq_stopping;
    328 
    329 #ifdef WM_EVENT_COUNTERS
    330 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    331 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    332 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    333 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    334 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    335 						/* XXX not used? */
    336 
    337 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    338 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    339 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    340 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    341 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    342 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    343 
    344 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    345 
    346 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    347 
    348 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    349 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    350 #endif /* WM_EVENT_COUNTERS */
    351 };
    352 
    353 struct wm_rxqueue {
    354 	kmutex_t *rxq_lock;		/* lock for rx operations */
    355 
    356 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    357 
    358 	/* Software state for the receive descriptors. */
    359 	wiseman_rxdesc_t *rxq_descs;
    360 
    361 	/* RX control data structures. */
    362 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    363 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    364 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    365 	int rxq_desc_rseg;		/* real number of control segment */
    366 	size_t rxq_desc_size;		/* control data size */
    367 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    368 
    369 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    370 
    371 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    372 	int rxq_discard;
    373 	int rxq_len;
    374 	struct mbuf *rxq_head;
    375 	struct mbuf *rxq_tail;
    376 	struct mbuf **rxq_tailp;
    377 
    378 	bool rxq_stopping;
    379 
    380 #ifdef WM_EVENT_COUNTERS
    381 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    382 
    383 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    384 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    385 #endif
    386 };
    387 
    388 struct wm_queue {
    389 	int wmq_id;			/* index of transmit and receive queues */
    390 	int wmq_intr_idx;		/* index of MSI-X tables */
    391 
    392 	struct wm_txqueue wmq_txq;
    393 	struct wm_rxqueue wmq_rxq;
    394 };
    395 
    396 struct wm_phyop {
    397 	int (*acquire)(struct wm_softc *);
    398 	void (*release)(struct wm_softc *);
    399 };
    400 
    401 /*
    402  * Software state per device.
    403  */
    404 struct wm_softc {
    405 	device_t sc_dev;		/* generic device information */
    406 	bus_space_tag_t sc_st;		/* bus space tag */
    407 	bus_space_handle_t sc_sh;	/* bus space handle */
    408 	bus_size_t sc_ss;		/* bus space size */
    409 	bus_space_tag_t sc_iot;		/* I/O space tag */
    410 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    411 	bus_size_t sc_ios;		/* I/O space size */
    412 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    413 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    414 	bus_size_t sc_flashs;		/* flash registers space size */
    415 	off_t sc_flashreg_offset;	/*
    416 					 * offset to flash registers from
    417 					 * start of BAR
    418 					 */
    419 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    420 
    421 	struct ethercom sc_ethercom;	/* ethernet common data */
    422 	struct mii_data sc_mii;		/* MII/media information */
    423 
    424 	pci_chipset_tag_t sc_pc;
    425 	pcitag_t sc_pcitag;
    426 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    427 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    428 
    429 	uint16_t sc_pcidevid;		/* PCI device ID */
    430 	wm_chip_type sc_type;		/* MAC type */
    431 	int sc_rev;			/* MAC revision */
    432 	wm_phy_type sc_phytype;		/* PHY type */
    433 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    434 #define	WM_MEDIATYPE_UNKNOWN		0x00
    435 #define	WM_MEDIATYPE_FIBER		0x01
    436 #define	WM_MEDIATYPE_COPPER		0x02
    437 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    438 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    439 	int sc_flags;			/* flags; see below */
    440 	int sc_if_flags;		/* last if_flags */
    441 	int sc_flowflags;		/* 802.3x flow control flags */
    442 	int sc_align_tweak;
    443 
    444 	void *sc_ihs[WM_MAX_NINTR];	/*
    445 					 * interrupt cookie.
    446 					 * legacy and msi use sc_ihs[0].
    447 					 */
    448 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    449 	int sc_nintrs;			/* number of interrupts */
    450 
    451 	int sc_link_intr_idx;		/* index of MSI-X tables */
    452 
    453 	callout_t sc_tick_ch;		/* tick callout */
    454 	bool sc_core_stopping;
    455 
    456 	int sc_nvm_ver_major;
    457 	int sc_nvm_ver_minor;
    458 	int sc_nvm_ver_build;
    459 	int sc_nvm_addrbits;		/* NVM address bits */
    460 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    461 	int sc_ich8_flash_base;
    462 	int sc_ich8_flash_bank_size;
    463 	int sc_nvm_k1_enabled;
    464 
    465 	int sc_nqueues;
    466 	struct wm_queue *sc_queue;
    467 
    468 	int sc_affinity_offset;
    469 
    470 #ifdef WM_EVENT_COUNTERS
    471 	/* Event counters. */
    472 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    473 
    474         /* WM_T_82542_2_1 only */
    475 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    476 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    477 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    478 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    479 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    480 #endif /* WM_EVENT_COUNTERS */
    481 
    482 	/* This variable are used only on the 82547. */
    483 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    484 
    485 	uint32_t sc_ctrl;		/* prototype CTRL register */
    486 #if 0
    487 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    488 #endif
    489 	uint32_t sc_icr;		/* prototype interrupt bits */
    490 	uint32_t sc_itr;		/* prototype intr throttling reg */
    491 	uint32_t sc_tctl;		/* prototype TCTL register */
    492 	uint32_t sc_rctl;		/* prototype RCTL register */
    493 	uint32_t sc_txcw;		/* prototype TXCW register */
    494 	uint32_t sc_tipg;		/* prototype TIPG register */
    495 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    496 	uint32_t sc_pba;		/* prototype PBA register */
    497 
    498 	int sc_tbi_linkup;		/* TBI link status */
    499 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    500 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    501 
    502 	int sc_mchash_type;		/* multicast filter offset */
    503 
    504 	krndsource_t rnd_source;	/* random source */
    505 
    506 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    507 
    508 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    509 	kmutex_t *sc_ich_phymtx;	/*
    510 					 * 82574/82583/ICH/PCH specific PHY
    511 					 * mutex. For 82574/82583, the mutex
    512 					 * is used for both PHY and NVM.
    513 					 */
    514 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    515 
    516 	struct wm_phyop phy;
    517 };
    518 
    519 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    520 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    521 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    522 
    523 #ifdef WM_MPSAFE
    524 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    525 #else
    526 #define CALLOUT_FLAGS	0
    527 #endif
    528 
    529 #define	WM_RXCHAIN_RESET(rxq)						\
    530 do {									\
    531 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    532 	*(rxq)->rxq_tailp = NULL;					\
    533 	(rxq)->rxq_len = 0;						\
    534 } while (/*CONSTCOND*/0)
    535 
    536 #define	WM_RXCHAIN_LINK(rxq, m)						\
    537 do {									\
    538 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    539 	(rxq)->rxq_tailp = &(m)->m_next;				\
    540 } while (/*CONSTCOND*/0)
    541 
    542 #ifdef WM_EVENT_COUNTERS
    543 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    544 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    545 
    546 #define WM_Q_EVCNT_INCR(qname, evname)			\
    547 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    548 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    549 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    550 #else /* !WM_EVENT_COUNTERS */
    551 #define	WM_EVCNT_INCR(ev)	/* nothing */
    552 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    553 
    554 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    555 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    556 #endif /* !WM_EVENT_COUNTERS */
    557 
    558 #define	CSR_READ(sc, reg)						\
    559 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    560 #define	CSR_WRITE(sc, reg, val)						\
    561 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    562 #define	CSR_WRITE_FLUSH(sc)						\
    563 	(void) CSR_READ((sc), WMREG_STATUS)
    564 
    565 #define ICH8_FLASH_READ32(sc, reg)					\
    566 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    567 	    (reg) + sc->sc_flashreg_offset)
    568 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    569 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    570 	    (reg) + sc->sc_flashreg_offset, (data))
    571 
    572 #define ICH8_FLASH_READ16(sc, reg)					\
    573 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    574 	    (reg) + sc->sc_flashreg_offset)
    575 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    576 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    577 	    (reg) + sc->sc_flashreg_offset, (data))
    578 
    579 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    580 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    581 
    582 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    583 #define	WM_CDTXADDR_HI(txq, x)						\
    584 	(sizeof(bus_addr_t) == 8 ?					\
    585 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    586 
    587 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    588 #define	WM_CDRXADDR_HI(rxq, x)						\
    589 	(sizeof(bus_addr_t) == 8 ?					\
    590 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    591 
    592 /*
    593  * Register read/write functions.
    594  * Other than CSR_{READ|WRITE}().
    595  */
    596 #if 0
    597 static inline uint32_t wm_io_read(struct wm_softc *, int);
    598 #endif
    599 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    600 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    601 	uint32_t, uint32_t);
    602 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    603 
    604 /*
    605  * Descriptor sync/init functions.
    606  */
    607 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    608 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    609 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    610 
    611 /*
    612  * Device driver interface functions and commonly used functions.
    613  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    614  */
    615 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    616 static int	wm_match(device_t, cfdata_t, void *);
    617 static void	wm_attach(device_t, device_t, void *);
    618 static int	wm_detach(device_t, int);
    619 static bool	wm_suspend(device_t, const pmf_qual_t *);
    620 static bool	wm_resume(device_t, const pmf_qual_t *);
    621 static void	wm_watchdog(struct ifnet *);
    622 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    623 static void	wm_tick(void *);
    624 static int	wm_ifflags_cb(struct ethercom *);
    625 static int	wm_ioctl(struct ifnet *, u_long, void *);
    626 /* MAC address related */
    627 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    628 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    629 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    630 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    631 static void	wm_set_filter(struct wm_softc *);
    632 /* Reset and init related */
    633 static void	wm_set_vlan(struct wm_softc *);
    634 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    635 static void	wm_get_auto_rd_done(struct wm_softc *);
    636 static void	wm_lan_init_done(struct wm_softc *);
    637 static void	wm_get_cfg_done(struct wm_softc *);
    638 static void	wm_initialize_hardware_bits(struct wm_softc *);
    639 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    640 static void	wm_reset(struct wm_softc *);
    641 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    642 static void	wm_rxdrain(struct wm_rxqueue *);
    643 static void	wm_rss_getkey(uint8_t *);
    644 static void	wm_init_rss(struct wm_softc *);
    645 static void	wm_adjust_qnum(struct wm_softc *, int);
    646 static int	wm_setup_legacy(struct wm_softc *);
    647 static int	wm_setup_msix(struct wm_softc *);
    648 static int	wm_init(struct ifnet *);
    649 static int	wm_init_locked(struct ifnet *);
    650 static void	wm_turnon(struct wm_softc *);
    651 static void	wm_turnoff(struct wm_softc *);
    652 static void	wm_stop(struct ifnet *, int);
    653 static void	wm_stop_locked(struct ifnet *, int);
    654 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    655 static void	wm_82547_txfifo_stall(void *);
    656 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    657 /* DMA related */
    658 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    659 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    660 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    661 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    662     struct wm_txqueue *);
    663 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    664 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    665 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    666     struct wm_rxqueue *);
    667 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    668 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    669 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    670 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    671 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    672 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    673 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    674     struct wm_txqueue *);
    675 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    676     struct wm_rxqueue *);
    677 static int	wm_alloc_txrx_queues(struct wm_softc *);
    678 static void	wm_free_txrx_queues(struct wm_softc *);
    679 static int	wm_init_txrx_queues(struct wm_softc *);
    680 /* Start */
    681 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    682     uint32_t *, uint8_t *);
    683 static void	wm_start(struct ifnet *);
    684 static void	wm_start_locked(struct ifnet *);
    685 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    686     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    687 static void	wm_nq_start(struct ifnet *);
    688 static void	wm_nq_start_locked(struct ifnet *);
    689 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    690 static inline int	wm_nq_select_txqueue(struct ifnet *, struct mbuf *);
    691 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    692 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    693 /* Interrupt */
    694 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    695 static void	wm_rxeof(struct wm_rxqueue *);
    696 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    697 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    698 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    699 static void	wm_linkintr(struct wm_softc *, uint32_t);
    700 static int	wm_intr_legacy(void *);
    701 static int	wm_txrxintr_msix(void *);
    702 static int	wm_linkintr_msix(void *);
    703 
    704 /*
    705  * Media related.
    706  * GMII, SGMII, TBI, SERDES and SFP.
    707  */
    708 /* Common */
    709 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    710 /* GMII related */
    711 static void	wm_gmii_reset(struct wm_softc *);
    712 static int	wm_get_phy_id_82575(struct wm_softc *);
    713 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    714 static int	wm_gmii_mediachange(struct ifnet *);
    715 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    716 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    717 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    718 static int	wm_gmii_i82543_readreg(device_t, int, int);
    719 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    720 static int	wm_gmii_mdic_readreg(device_t, int, int);
    721 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    722 static int	wm_gmii_i82544_readreg(device_t, int, int);
    723 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    724 static int	wm_gmii_i80003_readreg(device_t, int, int);
    725 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    726 static int	wm_gmii_bm_readreg(device_t, int, int);
    727 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    728 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    729 static int	wm_gmii_hv_readreg(device_t, int, int);
    730 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    731 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    732 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    733 static int	wm_gmii_82580_readreg(device_t, int, int);
    734 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    735 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    736 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    737 static void	wm_gmii_statchg(struct ifnet *);
    738 static int	wm_kmrn_readreg(struct wm_softc *, int);
    739 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    740 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    741 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    742 /* SGMII */
    743 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    744 static int	wm_sgmii_readreg(device_t, int, int);
    745 static void	wm_sgmii_writereg(device_t, int, int, int);
    746 /* TBI related */
    747 static void	wm_tbi_mediainit(struct wm_softc *);
    748 static int	wm_tbi_mediachange(struct ifnet *);
    749 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    750 static int	wm_check_for_link(struct wm_softc *);
    751 static void	wm_tbi_tick(struct wm_softc *);
    752 /* SERDES related */
    753 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    754 static int	wm_serdes_mediachange(struct ifnet *);
    755 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    756 static void	wm_serdes_tick(struct wm_softc *);
    757 /* SFP related */
    758 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    759 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    760 
    761 /*
    762  * NVM related.
    763  * Microwire, SPI (w/wo EERD) and Flash.
    764  */
    765 /* Misc functions */
    766 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    767 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    768 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    769 /* Microwire */
    770 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    771 /* SPI */
    772 static int	wm_nvm_ready_spi(struct wm_softc *);
    773 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    774 /* Using with EERD */
    775 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    776 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    777 /* Flash */
    778 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    779     unsigned int *);
    780 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    781 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    782 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    783 	uint32_t *);
    784 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    785 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    786 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    787 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    788 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    789 /* iNVM */
    790 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    791 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    792 /* Lock, detecting NVM type, validate checksum and read */
    793 static int	wm_nvm_acquire(struct wm_softc *);
    794 static void	wm_nvm_release(struct wm_softc *);
    795 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    796 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    797 static int	wm_nvm_validate_checksum(struct wm_softc *);
    798 static void	wm_nvm_version_invm(struct wm_softc *);
    799 static void	wm_nvm_version(struct wm_softc *);
    800 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    801 
    802 /*
    803  * Hardware semaphores.
    804  * Very complexed...
    805  */
    806 static int	wm_get_null(struct wm_softc *);
    807 static void	wm_put_null(struct wm_softc *);
    808 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    809 static void	wm_put_swsm_semaphore(struct wm_softc *);
    810 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    811 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    812 static int	wm_get_phy_82575(struct wm_softc *);
    813 static void	wm_put_phy_82575(struct wm_softc *);
    814 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    815 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    816 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    817 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    818 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    819 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    820 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    821 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    822 
    823 /*
    824  * Management mode and power management related subroutines.
    825  * BMC, AMT, suspend/resume and EEE.
    826  */
    827 #ifdef WM_WOL
    828 static int	wm_check_mng_mode(struct wm_softc *);
    829 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    830 static int	wm_check_mng_mode_82574(struct wm_softc *);
    831 static int	wm_check_mng_mode_generic(struct wm_softc *);
    832 #endif
    833 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    834 static bool	wm_phy_resetisblocked(struct wm_softc *);
    835 static void	wm_get_hw_control(struct wm_softc *);
    836 static void	wm_release_hw_control(struct wm_softc *);
    837 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    838 static void	wm_smbustopci(struct wm_softc *);
    839 static void	wm_init_manageability(struct wm_softc *);
    840 static void	wm_release_manageability(struct wm_softc *);
    841 static void	wm_get_wakeup(struct wm_softc *);
    842 #ifdef WM_WOL
    843 static void	wm_enable_phy_wakeup(struct wm_softc *);
    844 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    845 static void	wm_enable_wakeup(struct wm_softc *);
    846 #endif
    847 /* LPLU (Low Power Link Up) */
    848 static void	wm_lplu_d0_disable(struct wm_softc *);
    849 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    850 /* EEE */
    851 static void	wm_set_eee_i350(struct wm_softc *);
    852 
    853 /*
    854  * Workarounds (mainly PHY related).
    855  * Basically, PHY's workarounds are in the PHY drivers.
    856  */
    857 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    858 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    859 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    860 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    861 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    862 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    863 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    864 static void	wm_reset_init_script_82575(struct wm_softc *);
    865 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    866 static void	wm_pll_workaround_i210(struct wm_softc *);
    867 
    868 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    869     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    870 
    871 /*
    872  * Devices supported by this driver.
    873  */
    874 static const struct wm_product {
    875 	pci_vendor_id_t		wmp_vendor;
    876 	pci_product_id_t	wmp_product;
    877 	const char		*wmp_name;
    878 	wm_chip_type		wmp_type;
    879 	uint32_t		wmp_flags;
    880 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    881 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    882 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    883 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    884 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    885 } wm_products[] = {
    886 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    887 	  "Intel i82542 1000BASE-X Ethernet",
    888 	  WM_T_82542_2_1,	WMP_F_FIBER },
    889 
    890 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    891 	  "Intel i82543GC 1000BASE-X Ethernet",
    892 	  WM_T_82543,		WMP_F_FIBER },
    893 
    894 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    895 	  "Intel i82543GC 1000BASE-T Ethernet",
    896 	  WM_T_82543,		WMP_F_COPPER },
    897 
    898 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    899 	  "Intel i82544EI 1000BASE-T Ethernet",
    900 	  WM_T_82544,		WMP_F_COPPER },
    901 
    902 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    903 	  "Intel i82544EI 1000BASE-X Ethernet",
    904 	  WM_T_82544,		WMP_F_FIBER },
    905 
    906 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    907 	  "Intel i82544GC 1000BASE-T Ethernet",
    908 	  WM_T_82544,		WMP_F_COPPER },
    909 
    910 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    911 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    912 	  WM_T_82544,		WMP_F_COPPER },
    913 
    914 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    915 	  "Intel i82540EM 1000BASE-T Ethernet",
    916 	  WM_T_82540,		WMP_F_COPPER },
    917 
    918 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    919 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    920 	  WM_T_82540,		WMP_F_COPPER },
    921 
    922 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    923 	  "Intel i82540EP 1000BASE-T Ethernet",
    924 	  WM_T_82540,		WMP_F_COPPER },
    925 
    926 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    927 	  "Intel i82540EP 1000BASE-T Ethernet",
    928 	  WM_T_82540,		WMP_F_COPPER },
    929 
    930 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    931 	  "Intel i82540EP 1000BASE-T Ethernet",
    932 	  WM_T_82540,		WMP_F_COPPER },
    933 
    934 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    935 	  "Intel i82545EM 1000BASE-T Ethernet",
    936 	  WM_T_82545,		WMP_F_COPPER },
    937 
    938 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    939 	  "Intel i82545GM 1000BASE-T Ethernet",
    940 	  WM_T_82545_3,		WMP_F_COPPER },
    941 
    942 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    943 	  "Intel i82545GM 1000BASE-X Ethernet",
    944 	  WM_T_82545_3,		WMP_F_FIBER },
    945 
    946 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    947 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    948 	  WM_T_82545_3,		WMP_F_SERDES },
    949 
    950 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    951 	  "Intel i82546EB 1000BASE-T Ethernet",
    952 	  WM_T_82546,		WMP_F_COPPER },
    953 
    954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    955 	  "Intel i82546EB 1000BASE-T Ethernet",
    956 	  WM_T_82546,		WMP_F_COPPER },
    957 
    958 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    959 	  "Intel i82545EM 1000BASE-X Ethernet",
    960 	  WM_T_82545,		WMP_F_FIBER },
    961 
    962 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    963 	  "Intel i82546EB 1000BASE-X Ethernet",
    964 	  WM_T_82546,		WMP_F_FIBER },
    965 
    966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    967 	  "Intel i82546GB 1000BASE-T Ethernet",
    968 	  WM_T_82546_3,		WMP_F_COPPER },
    969 
    970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    971 	  "Intel i82546GB 1000BASE-X Ethernet",
    972 	  WM_T_82546_3,		WMP_F_FIBER },
    973 
    974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    975 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    976 	  WM_T_82546_3,		WMP_F_SERDES },
    977 
    978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    979 	  "i82546GB quad-port Gigabit Ethernet",
    980 	  WM_T_82546_3,		WMP_F_COPPER },
    981 
    982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    983 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    984 	  WM_T_82546_3,		WMP_F_COPPER },
    985 
    986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    987 	  "Intel PRO/1000MT (82546GB)",
    988 	  WM_T_82546_3,		WMP_F_COPPER },
    989 
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    991 	  "Intel i82541EI 1000BASE-T Ethernet",
    992 	  WM_T_82541,		WMP_F_COPPER },
    993 
    994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    995 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    996 	  WM_T_82541,		WMP_F_COPPER },
    997 
    998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    999 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1000 	  WM_T_82541,		WMP_F_COPPER },
   1001 
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1003 	  "Intel i82541ER 1000BASE-T Ethernet",
   1004 	  WM_T_82541_2,		WMP_F_COPPER },
   1005 
   1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1007 	  "Intel i82541GI 1000BASE-T Ethernet",
   1008 	  WM_T_82541_2,		WMP_F_COPPER },
   1009 
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1011 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1012 	  WM_T_82541_2,		WMP_F_COPPER },
   1013 
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1015 	  "Intel i82541PI 1000BASE-T Ethernet",
   1016 	  WM_T_82541_2,		WMP_F_COPPER },
   1017 
   1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1019 	  "Intel i82547EI 1000BASE-T Ethernet",
   1020 	  WM_T_82547,		WMP_F_COPPER },
   1021 
   1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1023 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1024 	  WM_T_82547,		WMP_F_COPPER },
   1025 
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1027 	  "Intel i82547GI 1000BASE-T Ethernet",
   1028 	  WM_T_82547_2,		WMP_F_COPPER },
   1029 
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1031 	  "Intel PRO/1000 PT (82571EB)",
   1032 	  WM_T_82571,		WMP_F_COPPER },
   1033 
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1035 	  "Intel PRO/1000 PF (82571EB)",
   1036 	  WM_T_82571,		WMP_F_FIBER },
   1037 
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1039 	  "Intel PRO/1000 PB (82571EB)",
   1040 	  WM_T_82571,		WMP_F_SERDES },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1043 	  "Intel PRO/1000 QT (82571EB)",
   1044 	  WM_T_82571,		WMP_F_COPPER },
   1045 
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1047 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1048 	  WM_T_82571,		WMP_F_COPPER, },
   1049 
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1051 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1052 	  WM_T_82571,		WMP_F_COPPER, },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1055 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1056 	  WM_T_82571,		WMP_F_SERDES, },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1059 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1060 	  WM_T_82571,		WMP_F_SERDES, },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1063 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1064 	  WM_T_82571,		WMP_F_FIBER, },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1067 	  "Intel i82572EI 1000baseT Ethernet",
   1068 	  WM_T_82572,		WMP_F_COPPER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1071 	  "Intel i82572EI 1000baseX Ethernet",
   1072 	  WM_T_82572,		WMP_F_FIBER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1075 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1076 	  WM_T_82572,		WMP_F_SERDES },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1079 	  "Intel i82572EI 1000baseT Ethernet",
   1080 	  WM_T_82572,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1083 	  "Intel i82573E",
   1084 	  WM_T_82573,		WMP_F_COPPER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1087 	  "Intel i82573E IAMT",
   1088 	  WM_T_82573,		WMP_F_COPPER },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1091 	  "Intel i82573L Gigabit Ethernet",
   1092 	  WM_T_82573,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1095 	  "Intel i82574L",
   1096 	  WM_T_82574,		WMP_F_COPPER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1099 	  "Intel i82574L",
   1100 	  WM_T_82574,		WMP_F_COPPER },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1103 	  "Intel i82583V",
   1104 	  WM_T_82583,		WMP_F_COPPER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1107 	  "i80003 dual 1000baseT Ethernet",
   1108 	  WM_T_80003,		WMP_F_COPPER },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1111 	  "i80003 dual 1000baseX Ethernet",
   1112 	  WM_T_80003,		WMP_F_COPPER },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1115 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1116 	  WM_T_80003,		WMP_F_SERDES },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1119 	  "Intel i80003 1000baseT Ethernet",
   1120 	  WM_T_80003,		WMP_F_COPPER },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1123 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1124 	  WM_T_80003,		WMP_F_SERDES },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1127 	  "Intel i82801H (M_AMT) LAN Controller",
   1128 	  WM_T_ICH8,		WMP_F_COPPER },
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1130 	  "Intel i82801H (AMT) LAN Controller",
   1131 	  WM_T_ICH8,		WMP_F_COPPER },
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1133 	  "Intel i82801H LAN Controller",
   1134 	  WM_T_ICH8,		WMP_F_COPPER },
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1136 	  "Intel i82801H (IFE) LAN Controller",
   1137 	  WM_T_ICH8,		WMP_F_COPPER },
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1139 	  "Intel i82801H (M) LAN Controller",
   1140 	  WM_T_ICH8,		WMP_F_COPPER },
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1142 	  "Intel i82801H IFE (GT) LAN Controller",
   1143 	  WM_T_ICH8,		WMP_F_COPPER },
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1145 	  "Intel i82801H IFE (G) LAN Controller",
   1146 	  WM_T_ICH8,		WMP_F_COPPER },
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1148 	  "82567V-3 LAN Controller",
   1149 	  WM_T_ICH8,		WMP_F_COPPER },
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1151 	  "82801I (AMT) LAN Controller",
   1152 	  WM_T_ICH9,		WMP_F_COPPER },
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1154 	  "82801I LAN Controller",
   1155 	  WM_T_ICH9,		WMP_F_COPPER },
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1157 	  "82801I (G) LAN Controller",
   1158 	  WM_T_ICH9,		WMP_F_COPPER },
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1160 	  "82801I (GT) LAN Controller",
   1161 	  WM_T_ICH9,		WMP_F_COPPER },
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1163 	  "82801I (C) LAN Controller",
   1164 	  WM_T_ICH9,		WMP_F_COPPER },
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1166 	  "82801I mobile LAN Controller",
   1167 	  WM_T_ICH9,		WMP_F_COPPER },
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1169 	  "82801I mobile (V) LAN Controller",
   1170 	  WM_T_ICH9,		WMP_F_COPPER },
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1172 	  "82801I mobile (AMT) LAN Controller",
   1173 	  WM_T_ICH9,		WMP_F_COPPER },
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1175 	  "82567LM-4 LAN Controller",
   1176 	  WM_T_ICH9,		WMP_F_COPPER },
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1178 	  "82567LM-2 LAN Controller",
   1179 	  WM_T_ICH10,		WMP_F_COPPER },
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1181 	  "82567LF-2 LAN Controller",
   1182 	  WM_T_ICH10,		WMP_F_COPPER },
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1184 	  "82567LM-3 LAN Controller",
   1185 	  WM_T_ICH10,		WMP_F_COPPER },
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1187 	  "82567LF-3 LAN Controller",
   1188 	  WM_T_ICH10,		WMP_F_COPPER },
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1190 	  "82567V-2 LAN Controller",
   1191 	  WM_T_ICH10,		WMP_F_COPPER },
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1193 	  "82567V-3? LAN Controller",
   1194 	  WM_T_ICH10,		WMP_F_COPPER },
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1196 	  "HANKSVILLE LAN Controller",
   1197 	  WM_T_ICH10,		WMP_F_COPPER },
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1199 	  "PCH LAN (82577LM) Controller",
   1200 	  WM_T_PCH,		WMP_F_COPPER },
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1202 	  "PCH LAN (82577LC) Controller",
   1203 	  WM_T_PCH,		WMP_F_COPPER },
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1205 	  "PCH LAN (82578DM) Controller",
   1206 	  WM_T_PCH,		WMP_F_COPPER },
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1208 	  "PCH LAN (82578DC) Controller",
   1209 	  WM_T_PCH,		WMP_F_COPPER },
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1211 	  "PCH2 LAN (82579LM) Controller",
   1212 	  WM_T_PCH2,		WMP_F_COPPER },
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1214 	  "PCH2 LAN (82579V) Controller",
   1215 	  WM_T_PCH2,		WMP_F_COPPER },
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1217 	  "82575EB dual-1000baseT Ethernet",
   1218 	  WM_T_82575,		WMP_F_COPPER },
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1220 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1221 	  WM_T_82575,		WMP_F_SERDES },
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1223 	  "82575GB quad-1000baseT Ethernet",
   1224 	  WM_T_82575,		WMP_F_COPPER },
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1226 	  "82575GB quad-1000baseT Ethernet (PM)",
   1227 	  WM_T_82575,		WMP_F_COPPER },
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1229 	  "82576 1000BaseT Ethernet",
   1230 	  WM_T_82576,		WMP_F_COPPER },
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1232 	  "82576 1000BaseX Ethernet",
   1233 	  WM_T_82576,		WMP_F_FIBER },
   1234 
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1236 	  "82576 gigabit Ethernet (SERDES)",
   1237 	  WM_T_82576,		WMP_F_SERDES },
   1238 
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1240 	  "82576 quad-1000BaseT Ethernet",
   1241 	  WM_T_82576,		WMP_F_COPPER },
   1242 
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1244 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1245 	  WM_T_82576,		WMP_F_COPPER },
   1246 
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1248 	  "82576 gigabit Ethernet",
   1249 	  WM_T_82576,		WMP_F_COPPER },
   1250 
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1252 	  "82576 gigabit Ethernet (SERDES)",
   1253 	  WM_T_82576,		WMP_F_SERDES },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1255 	  "82576 quad-gigabit Ethernet (SERDES)",
   1256 	  WM_T_82576,		WMP_F_SERDES },
   1257 
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1259 	  "82580 1000BaseT Ethernet",
   1260 	  WM_T_82580,		WMP_F_COPPER },
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1262 	  "82580 1000BaseX Ethernet",
   1263 	  WM_T_82580,		WMP_F_FIBER },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1266 	  "82580 1000BaseT Ethernet (SERDES)",
   1267 	  WM_T_82580,		WMP_F_SERDES },
   1268 
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1270 	  "82580 gigabit Ethernet (SGMII)",
   1271 	  WM_T_82580,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1273 	  "82580 dual-1000BaseT Ethernet",
   1274 	  WM_T_82580,		WMP_F_COPPER },
   1275 
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1277 	  "82580 quad-1000BaseX Ethernet",
   1278 	  WM_T_82580,		WMP_F_FIBER },
   1279 
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1281 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1282 	  WM_T_82580,		WMP_F_COPPER },
   1283 
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1285 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1286 	  WM_T_82580,		WMP_F_SERDES },
   1287 
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1289 	  "DH89XXCC 1000BASE-KX Ethernet",
   1290 	  WM_T_82580,		WMP_F_SERDES },
   1291 
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1293 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1294 	  WM_T_82580,		WMP_F_SERDES },
   1295 
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1297 	  "I350 Gigabit Network Connection",
   1298 	  WM_T_I350,		WMP_F_COPPER },
   1299 
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1301 	  "I350 Gigabit Fiber Network Connection",
   1302 	  WM_T_I350,		WMP_F_FIBER },
   1303 
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1305 	  "I350 Gigabit Backplane Connection",
   1306 	  WM_T_I350,		WMP_F_SERDES },
   1307 
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1309 	  "I350 Quad Port Gigabit Ethernet",
   1310 	  WM_T_I350,		WMP_F_SERDES },
   1311 
   1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1313 	  "I350 Gigabit Connection",
   1314 	  WM_T_I350,		WMP_F_COPPER },
   1315 
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1317 	  "I354 Gigabit Ethernet (KX)",
   1318 	  WM_T_I354,		WMP_F_SERDES },
   1319 
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1321 	  "I354 Gigabit Ethernet (SGMII)",
   1322 	  WM_T_I354,		WMP_F_COPPER },
   1323 
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1325 	  "I354 Gigabit Ethernet (2.5G)",
   1326 	  WM_T_I354,		WMP_F_COPPER },
   1327 
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1329 	  "I210-T1 Ethernet Server Adapter",
   1330 	  WM_T_I210,		WMP_F_COPPER },
   1331 
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1333 	  "I210 Ethernet (Copper OEM)",
   1334 	  WM_T_I210,		WMP_F_COPPER },
   1335 
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1337 	  "I210 Ethernet (Copper IT)",
   1338 	  WM_T_I210,		WMP_F_COPPER },
   1339 
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1341 	  "I210 Ethernet (FLASH less)",
   1342 	  WM_T_I210,		WMP_F_COPPER },
   1343 
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1345 	  "I210 Gigabit Ethernet (Fiber)",
   1346 	  WM_T_I210,		WMP_F_FIBER },
   1347 
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1349 	  "I210 Gigabit Ethernet (SERDES)",
   1350 	  WM_T_I210,		WMP_F_SERDES },
   1351 
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1353 	  "I210 Gigabit Ethernet (FLASH less)",
   1354 	  WM_T_I210,		WMP_F_SERDES },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1357 	  "I210 Gigabit Ethernet (SGMII)",
   1358 	  WM_T_I210,		WMP_F_COPPER },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1361 	  "I211 Ethernet (COPPER)",
   1362 	  WM_T_I211,		WMP_F_COPPER },
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1364 	  "I217 V Ethernet Connection",
   1365 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1367 	  "I217 LM Ethernet Connection",
   1368 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1370 	  "I218 V Ethernet Connection",
   1371 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1373 	  "I218 V Ethernet Connection",
   1374 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1376 	  "I218 V Ethernet Connection",
   1377 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1379 	  "I218 LM Ethernet Connection",
   1380 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1382 	  "I218 LM Ethernet Connection",
   1383 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1385 	  "I218 LM Ethernet Connection",
   1386 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1387 #if 0
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1389 	  "I219 V Ethernet Connection",
   1390 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1392 	  "I219 V Ethernet Connection",
   1393 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1395 	  "I219 V Ethernet Connection",
   1396 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1398 	  "I219 V Ethernet Connection",
   1399 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1401 	  "I219 LM Ethernet Connection",
   1402 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1404 	  "I219 LM Ethernet Connection",
   1405 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1407 	  "I219 LM Ethernet Connection",
   1408 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1410 	  "I219 LM Ethernet Connection",
   1411 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1413 	  "I219 LM Ethernet Connection",
   1414 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1415 #endif
   1416 	{ 0,			0,
   1417 	  NULL,
   1418 	  0,			0 },
   1419 };
   1420 
   1421 /*
   1422  * Register read/write functions.
   1423  * Other than CSR_{READ|WRITE}().
   1424  */
   1425 
   1426 #if 0 /* Not currently used */
   1427 static inline uint32_t
   1428 wm_io_read(struct wm_softc *sc, int reg)
   1429 {
   1430 
   1431 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1432 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1433 }
   1434 #endif
   1435 
   1436 static inline void
   1437 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1438 {
   1439 
   1440 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1441 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1442 }
   1443 
   1444 static inline void
   1445 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1446     uint32_t data)
   1447 {
   1448 	uint32_t regval;
   1449 	int i;
   1450 
   1451 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1452 
   1453 	CSR_WRITE(sc, reg, regval);
   1454 
   1455 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1456 		delay(5);
   1457 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1458 			break;
   1459 	}
   1460 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1461 		aprint_error("%s: WARNING:"
   1462 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1463 		    device_xname(sc->sc_dev), reg);
   1464 	}
   1465 }
   1466 
   1467 static inline void
   1468 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1469 {
   1470 	wa->wa_low = htole32(v & 0xffffffffU);
   1471 	if (sizeof(bus_addr_t) == 8)
   1472 		wa->wa_high = htole32((uint64_t) v >> 32);
   1473 	else
   1474 		wa->wa_high = 0;
   1475 }
   1476 
   1477 /*
   1478  * Descriptor sync/init functions.
   1479  */
   1480 static inline void
   1481 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1482 {
   1483 	struct wm_softc *sc = txq->txq_sc;
   1484 
   1485 	/* If it will wrap around, sync to the end of the ring. */
   1486 	if ((start + num) > WM_NTXDESC(txq)) {
   1487 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1488 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1489 		    (WM_NTXDESC(txq) - start), ops);
   1490 		num -= (WM_NTXDESC(txq) - start);
   1491 		start = 0;
   1492 	}
   1493 
   1494 	/* Now sync whatever is left. */
   1495 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1496 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1497 }
   1498 
   1499 static inline void
   1500 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1501 {
   1502 	struct wm_softc *sc = rxq->rxq_sc;
   1503 
   1504 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1505 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1506 }
   1507 
   1508 static inline void
   1509 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1510 {
   1511 	struct wm_softc *sc = rxq->rxq_sc;
   1512 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1513 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1514 	struct mbuf *m = rxs->rxs_mbuf;
   1515 
   1516 	/*
   1517 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1518 	 * so that the payload after the Ethernet header is aligned
   1519 	 * to a 4-byte boundary.
   1520 
   1521 	 * XXX BRAINDAMAGE ALERT!
   1522 	 * The stupid chip uses the same size for every buffer, which
   1523 	 * is set in the Receive Control register.  We are using the 2K
   1524 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1525 	 * reason, we can't "scoot" packets longer than the standard
   1526 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1527 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1528 	 * the upper layer copy the headers.
   1529 	 */
   1530 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1531 
   1532 	wm_set_dma_addr(&rxd->wrx_addr,
   1533 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1534 	rxd->wrx_len = 0;
   1535 	rxd->wrx_cksum = 0;
   1536 	rxd->wrx_status = 0;
   1537 	rxd->wrx_errors = 0;
   1538 	rxd->wrx_special = 0;
   1539 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1540 
   1541 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1542 }
   1543 
   1544 /*
   1545  * Device driver interface functions and commonly used functions.
   1546  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1547  */
   1548 
   1549 /* Lookup supported device table */
   1550 static const struct wm_product *
   1551 wm_lookup(const struct pci_attach_args *pa)
   1552 {
   1553 	const struct wm_product *wmp;
   1554 
   1555 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1556 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1557 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1558 			return wmp;
   1559 	}
   1560 	return NULL;
   1561 }
   1562 
   1563 /* The match function (ca_match) */
   1564 static int
   1565 wm_match(device_t parent, cfdata_t cf, void *aux)
   1566 {
   1567 	struct pci_attach_args *pa = aux;
   1568 
   1569 	if (wm_lookup(pa) != NULL)
   1570 		return 1;
   1571 
   1572 	return 0;
   1573 }
   1574 
   1575 /* The attach function (ca_attach) */
   1576 static void
   1577 wm_attach(device_t parent, device_t self, void *aux)
   1578 {
   1579 	struct wm_softc *sc = device_private(self);
   1580 	struct pci_attach_args *pa = aux;
   1581 	prop_dictionary_t dict;
   1582 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1583 	pci_chipset_tag_t pc = pa->pa_pc;
   1584 	int counts[PCI_INTR_TYPE_SIZE];
   1585 	pci_intr_type_t max_type;
   1586 	const char *eetype, *xname;
   1587 	bus_space_tag_t memt;
   1588 	bus_space_handle_t memh;
   1589 	bus_size_t memsize;
   1590 	int memh_valid;
   1591 	int i, error;
   1592 	const struct wm_product *wmp;
   1593 	prop_data_t ea;
   1594 	prop_number_t pn;
   1595 	uint8_t enaddr[ETHER_ADDR_LEN];
   1596 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1597 	pcireg_t preg, memtype;
   1598 	uint16_t eeprom_data, apme_mask;
   1599 	bool force_clear_smbi;
   1600 	uint32_t link_mode;
   1601 	uint32_t reg;
   1602 
   1603 	sc->sc_dev = self;
   1604 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1605 	sc->sc_core_stopping = false;
   1606 
   1607 	wmp = wm_lookup(pa);
   1608 #ifdef DIAGNOSTIC
   1609 	if (wmp == NULL) {
   1610 		printf("\n");
   1611 		panic("wm_attach: impossible");
   1612 	}
   1613 #endif
   1614 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1615 
   1616 	sc->sc_pc = pa->pa_pc;
   1617 	sc->sc_pcitag = pa->pa_tag;
   1618 
   1619 	if (pci_dma64_available(pa))
   1620 		sc->sc_dmat = pa->pa_dmat64;
   1621 	else
   1622 		sc->sc_dmat = pa->pa_dmat;
   1623 
   1624 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1625 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1626 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1627 
   1628 	sc->sc_type = wmp->wmp_type;
   1629 
   1630 	/* Set default function pointers */
   1631 	sc->phy.acquire = wm_get_null;
   1632 	sc->phy.release = wm_put_null;
   1633 
   1634 	if (sc->sc_type < WM_T_82543) {
   1635 		if (sc->sc_rev < 2) {
   1636 			aprint_error_dev(sc->sc_dev,
   1637 			    "i82542 must be at least rev. 2\n");
   1638 			return;
   1639 		}
   1640 		if (sc->sc_rev < 3)
   1641 			sc->sc_type = WM_T_82542_2_0;
   1642 	}
   1643 
   1644 	/*
   1645 	 * Disable MSI for Errata:
   1646 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1647 	 *
   1648 	 *  82544: Errata 25
   1649 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1650 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1651 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1652 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1653 	 *
   1654 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1655 	 *
   1656 	 *  82571 & 82572: Errata 63
   1657 	 */
   1658 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1659 	    || (sc->sc_type == WM_T_82572))
   1660 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1661 
   1662 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1663 	    || (sc->sc_type == WM_T_82580)
   1664 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1665 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1666 		sc->sc_flags |= WM_F_NEWQUEUE;
   1667 
   1668 	/* Set device properties (mactype) */
   1669 	dict = device_properties(sc->sc_dev);
   1670 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1671 
   1672 	/*
   1673 	 * Map the device.  All devices support memory-mapped acccess,
   1674 	 * and it is really required for normal operation.
   1675 	 */
   1676 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1677 	switch (memtype) {
   1678 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1679 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1680 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1681 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1682 		break;
   1683 	default:
   1684 		memh_valid = 0;
   1685 		break;
   1686 	}
   1687 
   1688 	if (memh_valid) {
   1689 		sc->sc_st = memt;
   1690 		sc->sc_sh = memh;
   1691 		sc->sc_ss = memsize;
   1692 	} else {
   1693 		aprint_error_dev(sc->sc_dev,
   1694 		    "unable to map device registers\n");
   1695 		return;
   1696 	}
   1697 
   1698 	/*
   1699 	 * In addition, i82544 and later support I/O mapped indirect
   1700 	 * register access.  It is not desirable (nor supported in
   1701 	 * this driver) to use it for normal operation, though it is
   1702 	 * required to work around bugs in some chip versions.
   1703 	 */
   1704 	if (sc->sc_type >= WM_T_82544) {
   1705 		/* First we have to find the I/O BAR. */
   1706 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1707 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1708 			if (memtype == PCI_MAPREG_TYPE_IO)
   1709 				break;
   1710 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1711 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1712 				i += 4;	/* skip high bits, too */
   1713 		}
   1714 		if (i < PCI_MAPREG_END) {
   1715 			/*
   1716 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1717 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1718 			 * It's no problem because newer chips has no this
   1719 			 * bug.
   1720 			 *
   1721 			 * The i8254x doesn't apparently respond when the
   1722 			 * I/O BAR is 0, which looks somewhat like it's not
   1723 			 * been configured.
   1724 			 */
   1725 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1726 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1727 				aprint_error_dev(sc->sc_dev,
   1728 				    "WARNING: I/O BAR at zero.\n");
   1729 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1730 					0, &sc->sc_iot, &sc->sc_ioh,
   1731 					NULL, &sc->sc_ios) == 0) {
   1732 				sc->sc_flags |= WM_F_IOH_VALID;
   1733 			} else {
   1734 				aprint_error_dev(sc->sc_dev,
   1735 				    "WARNING: unable to map I/O space\n");
   1736 			}
   1737 		}
   1738 
   1739 	}
   1740 
   1741 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1742 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1743 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1744 	if (sc->sc_type < WM_T_82542_2_1)
   1745 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1746 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1747 
   1748 	/* power up chip */
   1749 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1750 	    NULL)) && error != EOPNOTSUPP) {
   1751 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1752 		return;
   1753 	}
   1754 
   1755 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1756 
   1757 	/* Allocation settings */
   1758 	max_type = PCI_INTR_TYPE_MSIX;
   1759 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1760 	counts[PCI_INTR_TYPE_MSI] = 1;
   1761 	counts[PCI_INTR_TYPE_INTX] = 1;
   1762 
   1763 alloc_retry:
   1764 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1765 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1766 		return;
   1767 	}
   1768 
   1769 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1770 		error = wm_setup_msix(sc);
   1771 		if (error) {
   1772 			pci_intr_release(pc, sc->sc_intrs,
   1773 			    counts[PCI_INTR_TYPE_MSIX]);
   1774 
   1775 			/* Setup for MSI: Disable MSI-X */
   1776 			max_type = PCI_INTR_TYPE_MSI;
   1777 			counts[PCI_INTR_TYPE_MSI] = 1;
   1778 			counts[PCI_INTR_TYPE_INTX] = 1;
   1779 			goto alloc_retry;
   1780 		}
   1781 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1782 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1783 		error = wm_setup_legacy(sc);
   1784 		if (error) {
   1785 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1786 			    counts[PCI_INTR_TYPE_MSI]);
   1787 
   1788 			/* The next try is for INTx: Disable MSI */
   1789 			max_type = PCI_INTR_TYPE_INTX;
   1790 			counts[PCI_INTR_TYPE_INTX] = 1;
   1791 			goto alloc_retry;
   1792 		}
   1793 	} else {
   1794 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1795 		error = wm_setup_legacy(sc);
   1796 		if (error) {
   1797 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1798 			    counts[PCI_INTR_TYPE_INTX]);
   1799 			return;
   1800 		}
   1801 	}
   1802 
   1803 	/*
   1804 	 * Check the function ID (unit number of the chip).
   1805 	 */
   1806 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1807 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1808 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1809 	    || (sc->sc_type == WM_T_82580)
   1810 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1811 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1812 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1813 	else
   1814 		sc->sc_funcid = 0;
   1815 
   1816 	/*
   1817 	 * Determine a few things about the bus we're connected to.
   1818 	 */
   1819 	if (sc->sc_type < WM_T_82543) {
   1820 		/* We don't really know the bus characteristics here. */
   1821 		sc->sc_bus_speed = 33;
   1822 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1823 		/*
   1824 		 * CSA (Communication Streaming Architecture) is about as fast
   1825 		 * a 32-bit 66MHz PCI Bus.
   1826 		 */
   1827 		sc->sc_flags |= WM_F_CSA;
   1828 		sc->sc_bus_speed = 66;
   1829 		aprint_verbose_dev(sc->sc_dev,
   1830 		    "Communication Streaming Architecture\n");
   1831 		if (sc->sc_type == WM_T_82547) {
   1832 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1833 			callout_setfunc(&sc->sc_txfifo_ch,
   1834 					wm_82547_txfifo_stall, sc);
   1835 			aprint_verbose_dev(sc->sc_dev,
   1836 			    "using 82547 Tx FIFO stall work-around\n");
   1837 		}
   1838 	} else if (sc->sc_type >= WM_T_82571) {
   1839 		sc->sc_flags |= WM_F_PCIE;
   1840 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1841 		    && (sc->sc_type != WM_T_ICH10)
   1842 		    && (sc->sc_type != WM_T_PCH)
   1843 		    && (sc->sc_type != WM_T_PCH2)
   1844 		    && (sc->sc_type != WM_T_PCH_LPT)
   1845 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1846 			/* ICH* and PCH* have no PCIe capability registers */
   1847 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1848 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1849 				NULL) == 0)
   1850 				aprint_error_dev(sc->sc_dev,
   1851 				    "unable to find PCIe capability\n");
   1852 		}
   1853 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1854 	} else {
   1855 		reg = CSR_READ(sc, WMREG_STATUS);
   1856 		if (reg & STATUS_BUS64)
   1857 			sc->sc_flags |= WM_F_BUS64;
   1858 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1859 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1860 
   1861 			sc->sc_flags |= WM_F_PCIX;
   1862 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1863 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1864 				aprint_error_dev(sc->sc_dev,
   1865 				    "unable to find PCIX capability\n");
   1866 			else if (sc->sc_type != WM_T_82545_3 &&
   1867 				 sc->sc_type != WM_T_82546_3) {
   1868 				/*
   1869 				 * Work around a problem caused by the BIOS
   1870 				 * setting the max memory read byte count
   1871 				 * incorrectly.
   1872 				 */
   1873 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1874 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1875 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1876 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1877 
   1878 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1879 				    PCIX_CMD_BYTECNT_SHIFT;
   1880 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1881 				    PCIX_STATUS_MAXB_SHIFT;
   1882 				if (bytecnt > maxb) {
   1883 					aprint_verbose_dev(sc->sc_dev,
   1884 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1885 					    512 << bytecnt, 512 << maxb);
   1886 					pcix_cmd = (pcix_cmd &
   1887 					    ~PCIX_CMD_BYTECNT_MASK) |
   1888 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1889 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1890 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1891 					    pcix_cmd);
   1892 				}
   1893 			}
   1894 		}
   1895 		/*
   1896 		 * The quad port adapter is special; it has a PCIX-PCIX
   1897 		 * bridge on the board, and can run the secondary bus at
   1898 		 * a higher speed.
   1899 		 */
   1900 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1901 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1902 								      : 66;
   1903 		} else if (sc->sc_flags & WM_F_PCIX) {
   1904 			switch (reg & STATUS_PCIXSPD_MASK) {
   1905 			case STATUS_PCIXSPD_50_66:
   1906 				sc->sc_bus_speed = 66;
   1907 				break;
   1908 			case STATUS_PCIXSPD_66_100:
   1909 				sc->sc_bus_speed = 100;
   1910 				break;
   1911 			case STATUS_PCIXSPD_100_133:
   1912 				sc->sc_bus_speed = 133;
   1913 				break;
   1914 			default:
   1915 				aprint_error_dev(sc->sc_dev,
   1916 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1917 				    reg & STATUS_PCIXSPD_MASK);
   1918 				sc->sc_bus_speed = 66;
   1919 				break;
   1920 			}
   1921 		} else
   1922 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1923 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1924 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1925 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1926 	}
   1927 
   1928 	/* clear interesting stat counters */
   1929 	CSR_READ(sc, WMREG_COLC);
   1930 	CSR_READ(sc, WMREG_RXERRC);
   1931 
   1932 	/* get PHY control from SMBus to PCIe */
   1933 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1934 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   1935 		wm_smbustopci(sc);
   1936 
   1937 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   1938 	    || (sc->sc_type >= WM_T_ICH8))
   1939 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1940 	if (sc->sc_type >= WM_T_ICH8)
   1941 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1942 
   1943 	/* Set PHY, NVM mutex related stuff */
   1944 	switch (sc->sc_type) {
   1945 	case WM_T_82542_2_0:
   1946 	case WM_T_82542_2_1:
   1947 	case WM_T_82543:
   1948 	case WM_T_82544:
   1949 		/* Microwire */
   1950 		sc->sc_nvm_wordsize = 64;
   1951 		sc->sc_nvm_addrbits = 6;
   1952 		break;
   1953 	case WM_T_82540:
   1954 	case WM_T_82545:
   1955 	case WM_T_82545_3:
   1956 	case WM_T_82546:
   1957 	case WM_T_82546_3:
   1958 		/* Microwire */
   1959 		reg = CSR_READ(sc, WMREG_EECD);
   1960 		if (reg & EECD_EE_SIZE) {
   1961 			sc->sc_nvm_wordsize = 256;
   1962 			sc->sc_nvm_addrbits = 8;
   1963 		} else {
   1964 			sc->sc_nvm_wordsize = 64;
   1965 			sc->sc_nvm_addrbits = 6;
   1966 		}
   1967 		sc->sc_flags |= WM_F_LOCK_EECD;
   1968 		break;
   1969 	case WM_T_82541:
   1970 	case WM_T_82541_2:
   1971 	case WM_T_82547:
   1972 	case WM_T_82547_2:
   1973 		sc->sc_flags |= WM_F_LOCK_EECD;
   1974 		reg = CSR_READ(sc, WMREG_EECD);
   1975 		if (reg & EECD_EE_TYPE) {
   1976 			/* SPI */
   1977 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1978 			wm_nvm_set_addrbits_size_eecd(sc);
   1979 		} else {
   1980 			/* Microwire */
   1981 			if ((reg & EECD_EE_ABITS) != 0) {
   1982 				sc->sc_nvm_wordsize = 256;
   1983 				sc->sc_nvm_addrbits = 8;
   1984 			} else {
   1985 				sc->sc_nvm_wordsize = 64;
   1986 				sc->sc_nvm_addrbits = 6;
   1987 			}
   1988 		}
   1989 		break;
   1990 	case WM_T_82571:
   1991 	case WM_T_82572:
   1992 		/* SPI */
   1993 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1994 		wm_nvm_set_addrbits_size_eecd(sc);
   1995 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1996 		sc->phy.acquire = wm_get_swsm_semaphore;
   1997 		sc->phy.release = wm_put_swsm_semaphore;
   1998 		break;
   1999 	case WM_T_82573:
   2000 	case WM_T_82574:
   2001 	case WM_T_82583:
   2002 		if (sc->sc_type == WM_T_82573) {
   2003 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2004 			sc->phy.acquire = wm_get_swsm_semaphore;
   2005 			sc->phy.release = wm_put_swsm_semaphore;
   2006 		} else {
   2007 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2008 			/* Both PHY and NVM use the same semaphore. */
   2009 			sc->phy.acquire
   2010 			    = wm_get_swfwhw_semaphore;
   2011 			sc->phy.release
   2012 			    = wm_put_swfwhw_semaphore;
   2013 		}
   2014 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2015 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2016 			sc->sc_nvm_wordsize = 2048;
   2017 		} else {
   2018 			/* SPI */
   2019 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2020 			wm_nvm_set_addrbits_size_eecd(sc);
   2021 		}
   2022 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2023 		break;
   2024 	case WM_T_82575:
   2025 	case WM_T_82576:
   2026 	case WM_T_82580:
   2027 	case WM_T_I350:
   2028 	case WM_T_I354:
   2029 	case WM_T_80003:
   2030 		/* SPI */
   2031 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2032 		wm_nvm_set_addrbits_size_eecd(sc);
   2033 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2034 		    | WM_F_LOCK_SWSM;
   2035 		sc->phy.acquire = wm_get_phy_82575;
   2036 		sc->phy.release = wm_put_phy_82575;
   2037 		break;
   2038 	case WM_T_ICH8:
   2039 	case WM_T_ICH9:
   2040 	case WM_T_ICH10:
   2041 	case WM_T_PCH:
   2042 	case WM_T_PCH2:
   2043 	case WM_T_PCH_LPT:
   2044 		/* FLASH */
   2045 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2046 		sc->sc_nvm_wordsize = 2048;
   2047 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2048 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2049 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2050 			aprint_error_dev(sc->sc_dev,
   2051 			    "can't map FLASH registers\n");
   2052 			goto out;
   2053 		}
   2054 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2055 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2056 		    ICH_FLASH_SECTOR_SIZE;
   2057 		sc->sc_ich8_flash_bank_size =
   2058 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2059 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2060 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2061 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2062 		sc->sc_flashreg_offset = 0;
   2063 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2064 		sc->phy.release = wm_put_swflag_ich8lan;
   2065 		break;
   2066 	case WM_T_PCH_SPT:
   2067 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2068 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2069 		sc->sc_flasht = sc->sc_st;
   2070 		sc->sc_flashh = sc->sc_sh;
   2071 		sc->sc_ich8_flash_base = 0;
   2072 		sc->sc_nvm_wordsize =
   2073 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2074 			* NVM_SIZE_MULTIPLIER;
   2075 		/* It is size in bytes, we want words */
   2076 		sc->sc_nvm_wordsize /= 2;
   2077 		/* assume 2 banks */
   2078 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2079 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2080 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2081 		sc->phy.release = wm_put_swflag_ich8lan;
   2082 		break;
   2083 	case WM_T_I210:
   2084 	case WM_T_I211:
   2085 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2086 			wm_nvm_set_addrbits_size_eecd(sc);
   2087 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2088 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2089 		} else {
   2090 			sc->sc_nvm_wordsize = INVM_SIZE;
   2091 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2092 		}
   2093 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2094 		sc->phy.acquire = wm_get_phy_82575;
   2095 		sc->phy.release = wm_put_phy_82575;
   2096 		break;
   2097 	default:
   2098 		break;
   2099 	}
   2100 
   2101 	/* Reset the chip to a known state. */
   2102 	wm_reset(sc);
   2103 
   2104 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2105 	switch (sc->sc_type) {
   2106 	case WM_T_82571:
   2107 	case WM_T_82572:
   2108 		reg = CSR_READ(sc, WMREG_SWSM2);
   2109 		if ((reg & SWSM2_LOCK) == 0) {
   2110 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2111 			force_clear_smbi = true;
   2112 		} else
   2113 			force_clear_smbi = false;
   2114 		break;
   2115 	case WM_T_82573:
   2116 	case WM_T_82574:
   2117 	case WM_T_82583:
   2118 		force_clear_smbi = true;
   2119 		break;
   2120 	default:
   2121 		force_clear_smbi = false;
   2122 		break;
   2123 	}
   2124 	if (force_clear_smbi) {
   2125 		reg = CSR_READ(sc, WMREG_SWSM);
   2126 		if ((reg & SWSM_SMBI) != 0)
   2127 			aprint_error_dev(sc->sc_dev,
   2128 			    "Please update the Bootagent\n");
   2129 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2130 	}
   2131 
   2132 	/*
   2133 	 * Defer printing the EEPROM type until after verifying the checksum
   2134 	 * This allows the EEPROM type to be printed correctly in the case
   2135 	 * that no EEPROM is attached.
   2136 	 */
   2137 	/*
   2138 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2139 	 * this for later, so we can fail future reads from the EEPROM.
   2140 	 */
   2141 	if (wm_nvm_validate_checksum(sc)) {
   2142 		/*
   2143 		 * Read twice again because some PCI-e parts fail the
   2144 		 * first check due to the link being in sleep state.
   2145 		 */
   2146 		if (wm_nvm_validate_checksum(sc))
   2147 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2148 	}
   2149 
   2150 	/* Set device properties (macflags) */
   2151 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2152 
   2153 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2154 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2155 	else {
   2156 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2157 		    sc->sc_nvm_wordsize);
   2158 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2159 			aprint_verbose("iNVM");
   2160 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2161 			aprint_verbose("FLASH(HW)");
   2162 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2163 			aprint_verbose("FLASH");
   2164 		else {
   2165 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2166 				eetype = "SPI";
   2167 			else
   2168 				eetype = "MicroWire";
   2169 			aprint_verbose("(%d address bits) %s EEPROM",
   2170 			    sc->sc_nvm_addrbits, eetype);
   2171 		}
   2172 	}
   2173 	wm_nvm_version(sc);
   2174 	aprint_verbose("\n");
   2175 
   2176 	/* Check for I21[01] PLL workaround */
   2177 	if (sc->sc_type == WM_T_I210)
   2178 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2179 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2180 		/* NVM image release 3.25 has a workaround */
   2181 		if ((sc->sc_nvm_ver_major < 3)
   2182 		    || ((sc->sc_nvm_ver_major == 3)
   2183 			&& (sc->sc_nvm_ver_minor < 25))) {
   2184 			aprint_verbose_dev(sc->sc_dev,
   2185 			    "ROM image version %d.%d is older than 3.25\n",
   2186 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2187 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2188 		}
   2189 	}
   2190 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2191 		wm_pll_workaround_i210(sc);
   2192 
   2193 	wm_get_wakeup(sc);
   2194 	switch (sc->sc_type) {
   2195 	case WM_T_82571:
   2196 	case WM_T_82572:
   2197 	case WM_T_82573:
   2198 	case WM_T_82574:
   2199 	case WM_T_82583:
   2200 	case WM_T_80003:
   2201 	case WM_T_ICH8:
   2202 	case WM_T_ICH9:
   2203 	case WM_T_ICH10:
   2204 	case WM_T_PCH:
   2205 	case WM_T_PCH2:
   2206 	case WM_T_PCH_LPT:
   2207 	case WM_T_PCH_SPT:
   2208 		/* Non-AMT based hardware can now take control from firmware */
   2209 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2210 			wm_get_hw_control(sc);
   2211 		break;
   2212 	default:
   2213 		break;
   2214 	}
   2215 
   2216 	/*
   2217 	 * Read the Ethernet address from the EEPROM, if not first found
   2218 	 * in device properties.
   2219 	 */
   2220 	ea = prop_dictionary_get(dict, "mac-address");
   2221 	if (ea != NULL) {
   2222 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2223 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2224 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2225 	} else {
   2226 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2227 			aprint_error_dev(sc->sc_dev,
   2228 			    "unable to read Ethernet address\n");
   2229 			goto out;
   2230 		}
   2231 	}
   2232 
   2233 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2234 	    ether_sprintf(enaddr));
   2235 
   2236 	/*
   2237 	 * Read the config info from the EEPROM, and set up various
   2238 	 * bits in the control registers based on their contents.
   2239 	 */
   2240 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2241 	if (pn != NULL) {
   2242 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2243 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2244 	} else {
   2245 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2246 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2247 			goto out;
   2248 		}
   2249 	}
   2250 
   2251 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2252 	if (pn != NULL) {
   2253 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2254 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2255 	} else {
   2256 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2257 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2258 			goto out;
   2259 		}
   2260 	}
   2261 
   2262 	/* check for WM_F_WOL */
   2263 	switch (sc->sc_type) {
   2264 	case WM_T_82542_2_0:
   2265 	case WM_T_82542_2_1:
   2266 	case WM_T_82543:
   2267 		/* dummy? */
   2268 		eeprom_data = 0;
   2269 		apme_mask = NVM_CFG3_APME;
   2270 		break;
   2271 	case WM_T_82544:
   2272 		apme_mask = NVM_CFG2_82544_APM_EN;
   2273 		eeprom_data = cfg2;
   2274 		break;
   2275 	case WM_T_82546:
   2276 	case WM_T_82546_3:
   2277 	case WM_T_82571:
   2278 	case WM_T_82572:
   2279 	case WM_T_82573:
   2280 	case WM_T_82574:
   2281 	case WM_T_82583:
   2282 	case WM_T_80003:
   2283 	default:
   2284 		apme_mask = NVM_CFG3_APME;
   2285 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2286 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2287 		break;
   2288 	case WM_T_82575:
   2289 	case WM_T_82576:
   2290 	case WM_T_82580:
   2291 	case WM_T_I350:
   2292 	case WM_T_I354: /* XXX ok? */
   2293 	case WM_T_ICH8:
   2294 	case WM_T_ICH9:
   2295 	case WM_T_ICH10:
   2296 	case WM_T_PCH:
   2297 	case WM_T_PCH2:
   2298 	case WM_T_PCH_LPT:
   2299 	case WM_T_PCH_SPT:
   2300 		/* XXX The funcid should be checked on some devices */
   2301 		apme_mask = WUC_APME;
   2302 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2303 		break;
   2304 	}
   2305 
   2306 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2307 	if ((eeprom_data & apme_mask) != 0)
   2308 		sc->sc_flags |= WM_F_WOL;
   2309 #ifdef WM_DEBUG
   2310 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2311 		printf("WOL\n");
   2312 #endif
   2313 
   2314 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2315 		/* Check NVM for autonegotiation */
   2316 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2317 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2318 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2319 		}
   2320 	}
   2321 
   2322 	/*
   2323 	 * XXX need special handling for some multiple port cards
   2324 	 * to disable a paticular port.
   2325 	 */
   2326 
   2327 	if (sc->sc_type >= WM_T_82544) {
   2328 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2329 		if (pn != NULL) {
   2330 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2331 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2332 		} else {
   2333 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2334 				aprint_error_dev(sc->sc_dev,
   2335 				    "unable to read SWDPIN\n");
   2336 				goto out;
   2337 			}
   2338 		}
   2339 	}
   2340 
   2341 	if (cfg1 & NVM_CFG1_ILOS)
   2342 		sc->sc_ctrl |= CTRL_ILOS;
   2343 
   2344 	/*
   2345 	 * XXX
   2346 	 * This code isn't correct because pin 2 and 3 are located
   2347 	 * in different position on newer chips. Check all datasheet.
   2348 	 *
   2349 	 * Until resolve this problem, check if a chip < 82580
   2350 	 */
   2351 	if (sc->sc_type <= WM_T_82580) {
   2352 		if (sc->sc_type >= WM_T_82544) {
   2353 			sc->sc_ctrl |=
   2354 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2355 			    CTRL_SWDPIO_SHIFT;
   2356 			sc->sc_ctrl |=
   2357 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2358 			    CTRL_SWDPINS_SHIFT;
   2359 		} else {
   2360 			sc->sc_ctrl |=
   2361 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2362 			    CTRL_SWDPIO_SHIFT;
   2363 		}
   2364 	}
   2365 
   2366 	/* XXX For other than 82580? */
   2367 	if (sc->sc_type == WM_T_82580) {
   2368 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2369 		if (nvmword & __BIT(13))
   2370 			sc->sc_ctrl |= CTRL_ILOS;
   2371 	}
   2372 
   2373 #if 0
   2374 	if (sc->sc_type >= WM_T_82544) {
   2375 		if (cfg1 & NVM_CFG1_IPS0)
   2376 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2377 		if (cfg1 & NVM_CFG1_IPS1)
   2378 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2379 		sc->sc_ctrl_ext |=
   2380 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2381 		    CTRL_EXT_SWDPIO_SHIFT;
   2382 		sc->sc_ctrl_ext |=
   2383 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2384 		    CTRL_EXT_SWDPINS_SHIFT;
   2385 	} else {
   2386 		sc->sc_ctrl_ext |=
   2387 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2388 		    CTRL_EXT_SWDPIO_SHIFT;
   2389 	}
   2390 #endif
   2391 
   2392 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2393 #if 0
   2394 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2395 #endif
   2396 
   2397 	if (sc->sc_type == WM_T_PCH) {
   2398 		uint16_t val;
   2399 
   2400 		/* Save the NVM K1 bit setting */
   2401 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2402 
   2403 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2404 			sc->sc_nvm_k1_enabled = 1;
   2405 		else
   2406 			sc->sc_nvm_k1_enabled = 0;
   2407 	}
   2408 
   2409 	/*
   2410 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2411 	 * media structures accordingly.
   2412 	 */
   2413 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2414 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2415 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2416 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2417 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2418 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2419 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2420 	} else if (sc->sc_type < WM_T_82543 ||
   2421 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2422 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2423 			aprint_error_dev(sc->sc_dev,
   2424 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2425 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2426 		}
   2427 		wm_tbi_mediainit(sc);
   2428 	} else {
   2429 		switch (sc->sc_type) {
   2430 		case WM_T_82575:
   2431 		case WM_T_82576:
   2432 		case WM_T_82580:
   2433 		case WM_T_I350:
   2434 		case WM_T_I354:
   2435 		case WM_T_I210:
   2436 		case WM_T_I211:
   2437 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2438 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2439 			switch (link_mode) {
   2440 			case CTRL_EXT_LINK_MODE_1000KX:
   2441 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2442 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2443 				break;
   2444 			case CTRL_EXT_LINK_MODE_SGMII:
   2445 				if (wm_sgmii_uses_mdio(sc)) {
   2446 					aprint_verbose_dev(sc->sc_dev,
   2447 					    "SGMII(MDIO)\n");
   2448 					sc->sc_flags |= WM_F_SGMII;
   2449 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2450 					break;
   2451 				}
   2452 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2453 				/*FALLTHROUGH*/
   2454 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2455 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2456 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2457 					if (link_mode
   2458 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2459 						sc->sc_mediatype
   2460 						    = WM_MEDIATYPE_COPPER;
   2461 						sc->sc_flags |= WM_F_SGMII;
   2462 					} else {
   2463 						sc->sc_mediatype
   2464 						    = WM_MEDIATYPE_SERDES;
   2465 						aprint_verbose_dev(sc->sc_dev,
   2466 						    "SERDES\n");
   2467 					}
   2468 					break;
   2469 				}
   2470 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2471 					aprint_verbose_dev(sc->sc_dev,
   2472 					    "SERDES\n");
   2473 
   2474 				/* Change current link mode setting */
   2475 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2476 				switch (sc->sc_mediatype) {
   2477 				case WM_MEDIATYPE_COPPER:
   2478 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2479 					break;
   2480 				case WM_MEDIATYPE_SERDES:
   2481 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2482 					break;
   2483 				default:
   2484 					break;
   2485 				}
   2486 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2487 				break;
   2488 			case CTRL_EXT_LINK_MODE_GMII:
   2489 			default:
   2490 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2491 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2492 				break;
   2493 			}
   2494 
   2495 			reg &= ~CTRL_EXT_I2C_ENA;
   2496 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2497 				reg |= CTRL_EXT_I2C_ENA;
   2498 			else
   2499 				reg &= ~CTRL_EXT_I2C_ENA;
   2500 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2501 
   2502 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2503 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2504 			else
   2505 				wm_tbi_mediainit(sc);
   2506 			break;
   2507 		default:
   2508 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2509 				aprint_error_dev(sc->sc_dev,
   2510 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2511 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2512 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2513 		}
   2514 	}
   2515 
   2516 	ifp = &sc->sc_ethercom.ec_if;
   2517 	xname = device_xname(sc->sc_dev);
   2518 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2519 	ifp->if_softc = sc;
   2520 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2521 	ifp->if_extflags = IFEF_START_MPSAFE;
   2522 	ifp->if_ioctl = wm_ioctl;
   2523 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2524 		ifp->if_start = wm_nq_start;
   2525 		if (sc->sc_nqueues > 1)
   2526 			ifp->if_transmit = wm_nq_transmit;
   2527 	} else
   2528 		ifp->if_start = wm_start;
   2529 	ifp->if_watchdog = wm_watchdog;
   2530 	ifp->if_init = wm_init;
   2531 	ifp->if_stop = wm_stop;
   2532 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2533 	IFQ_SET_READY(&ifp->if_snd);
   2534 
   2535 	/* Check for jumbo frame */
   2536 	switch (sc->sc_type) {
   2537 	case WM_T_82573:
   2538 		/* XXX limited to 9234 if ASPM is disabled */
   2539 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2540 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2541 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2542 		break;
   2543 	case WM_T_82571:
   2544 	case WM_T_82572:
   2545 	case WM_T_82574:
   2546 	case WM_T_82575:
   2547 	case WM_T_82576:
   2548 	case WM_T_82580:
   2549 	case WM_T_I350:
   2550 	case WM_T_I354: /* XXXX ok? */
   2551 	case WM_T_I210:
   2552 	case WM_T_I211:
   2553 	case WM_T_80003:
   2554 	case WM_T_ICH9:
   2555 	case WM_T_ICH10:
   2556 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2557 	case WM_T_PCH_LPT:
   2558 	case WM_T_PCH_SPT:
   2559 		/* XXX limited to 9234 */
   2560 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2561 		break;
   2562 	case WM_T_PCH:
   2563 		/* XXX limited to 4096 */
   2564 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2565 		break;
   2566 	case WM_T_82542_2_0:
   2567 	case WM_T_82542_2_1:
   2568 	case WM_T_82583:
   2569 	case WM_T_ICH8:
   2570 		/* No support for jumbo frame */
   2571 		break;
   2572 	default:
   2573 		/* ETHER_MAX_LEN_JUMBO */
   2574 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2575 		break;
   2576 	}
   2577 
   2578 	/* If we're a i82543 or greater, we can support VLANs. */
   2579 	if (sc->sc_type >= WM_T_82543)
   2580 		sc->sc_ethercom.ec_capabilities |=
   2581 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2582 
   2583 	/*
   2584 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2585 	 * on i82543 and later.
   2586 	 */
   2587 	if (sc->sc_type >= WM_T_82543) {
   2588 		ifp->if_capabilities |=
   2589 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2590 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2591 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2592 		    IFCAP_CSUM_TCPv6_Tx |
   2593 		    IFCAP_CSUM_UDPv6_Tx;
   2594 	}
   2595 
   2596 	/*
   2597 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2598 	 *
   2599 	 *	82541GI (8086:1076) ... no
   2600 	 *	82572EI (8086:10b9) ... yes
   2601 	 */
   2602 	if (sc->sc_type >= WM_T_82571) {
   2603 		ifp->if_capabilities |=
   2604 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2605 	}
   2606 
   2607 	/*
   2608 	 * If we're a i82544 or greater (except i82547), we can do
   2609 	 * TCP segmentation offload.
   2610 	 */
   2611 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2612 		ifp->if_capabilities |= IFCAP_TSOv4;
   2613 	}
   2614 
   2615 	if (sc->sc_type >= WM_T_82571) {
   2616 		ifp->if_capabilities |= IFCAP_TSOv6;
   2617 	}
   2618 
   2619 #ifdef WM_MPSAFE
   2620 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2621 #else
   2622 	sc->sc_core_lock = NULL;
   2623 #endif
   2624 
   2625 	/* Attach the interface. */
   2626 	if_initialize(ifp);
   2627 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2628 	ether_ifattach(ifp, enaddr);
   2629 	if_register(ifp);
   2630 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2631 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2632 			  RND_FLAG_DEFAULT);
   2633 
   2634 #ifdef WM_EVENT_COUNTERS
   2635 	/* Attach event counters. */
   2636 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2637 	    NULL, xname, "linkintr");
   2638 
   2639 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2640 	    NULL, xname, "tx_xoff");
   2641 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2642 	    NULL, xname, "tx_xon");
   2643 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2644 	    NULL, xname, "rx_xoff");
   2645 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2646 	    NULL, xname, "rx_xon");
   2647 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2648 	    NULL, xname, "rx_macctl");
   2649 #endif /* WM_EVENT_COUNTERS */
   2650 
   2651 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2652 		pmf_class_network_register(self, ifp);
   2653 	else
   2654 		aprint_error_dev(self, "couldn't establish power handler\n");
   2655 
   2656 	sc->sc_flags |= WM_F_ATTACHED;
   2657  out:
   2658 	return;
   2659 }
   2660 
   2661 /* The detach function (ca_detach) */
   2662 static int
   2663 wm_detach(device_t self, int flags __unused)
   2664 {
   2665 	struct wm_softc *sc = device_private(self);
   2666 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2667 	int i;
   2668 
   2669 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2670 		return 0;
   2671 
   2672 	/* Stop the interface. Callouts are stopped in it. */
   2673 	wm_stop(ifp, 1);
   2674 
   2675 	pmf_device_deregister(self);
   2676 
   2677 	/* Tell the firmware about the release */
   2678 	WM_CORE_LOCK(sc);
   2679 	wm_release_manageability(sc);
   2680 	wm_release_hw_control(sc);
   2681 	WM_CORE_UNLOCK(sc);
   2682 
   2683 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2684 
   2685 	/* Delete all remaining media. */
   2686 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2687 
   2688 	ether_ifdetach(ifp);
   2689 	if_detach(ifp);
   2690 	if_percpuq_destroy(sc->sc_ipq);
   2691 
   2692 	/* Unload RX dmamaps and free mbufs */
   2693 	for (i = 0; i < sc->sc_nqueues; i++) {
   2694 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2695 		mutex_enter(rxq->rxq_lock);
   2696 		wm_rxdrain(rxq);
   2697 		mutex_exit(rxq->rxq_lock);
   2698 	}
   2699 	/* Must unlock here */
   2700 
   2701 	/* Disestablish the interrupt handler */
   2702 	for (i = 0; i < sc->sc_nintrs; i++) {
   2703 		if (sc->sc_ihs[i] != NULL) {
   2704 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2705 			sc->sc_ihs[i] = NULL;
   2706 		}
   2707 	}
   2708 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2709 
   2710 	wm_free_txrx_queues(sc);
   2711 
   2712 	/* Unmap the registers */
   2713 	if (sc->sc_ss) {
   2714 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2715 		sc->sc_ss = 0;
   2716 	}
   2717 	if (sc->sc_ios) {
   2718 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2719 		sc->sc_ios = 0;
   2720 	}
   2721 	if (sc->sc_flashs) {
   2722 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2723 		sc->sc_flashs = 0;
   2724 	}
   2725 
   2726 	if (sc->sc_core_lock)
   2727 		mutex_obj_free(sc->sc_core_lock);
   2728 	if (sc->sc_ich_phymtx)
   2729 		mutex_obj_free(sc->sc_ich_phymtx);
   2730 	if (sc->sc_ich_nvmmtx)
   2731 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2732 
   2733 	return 0;
   2734 }
   2735 
   2736 static bool
   2737 wm_suspend(device_t self, const pmf_qual_t *qual)
   2738 {
   2739 	struct wm_softc *sc = device_private(self);
   2740 
   2741 	wm_release_manageability(sc);
   2742 	wm_release_hw_control(sc);
   2743 #ifdef WM_WOL
   2744 	wm_enable_wakeup(sc);
   2745 #endif
   2746 
   2747 	return true;
   2748 }
   2749 
   2750 static bool
   2751 wm_resume(device_t self, const pmf_qual_t *qual)
   2752 {
   2753 	struct wm_softc *sc = device_private(self);
   2754 
   2755 	wm_init_manageability(sc);
   2756 
   2757 	return true;
   2758 }
   2759 
   2760 /*
   2761  * wm_watchdog:		[ifnet interface function]
   2762  *
   2763  *	Watchdog timer handler.
   2764  */
   2765 static void
   2766 wm_watchdog(struct ifnet *ifp)
   2767 {
   2768 	int qid;
   2769 	struct wm_softc *sc = ifp->if_softc;
   2770 
   2771 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2772 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2773 
   2774 		wm_watchdog_txq(ifp, txq);
   2775 	}
   2776 
   2777 	/* Reset the interface. */
   2778 	(void) wm_init(ifp);
   2779 
   2780 	/*
   2781 	 * There are still some upper layer processing which call
   2782 	 * ifp->if_start(). e.g. ALTQ
   2783 	 */
   2784 	/* Try to get more packets going. */
   2785 	ifp->if_start(ifp);
   2786 }
   2787 
   2788 static void
   2789 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2790 {
   2791 	struct wm_softc *sc = ifp->if_softc;
   2792 
   2793 	/*
   2794 	 * Since we're using delayed interrupts, sweep up
   2795 	 * before we report an error.
   2796 	 */
   2797 	mutex_enter(txq->txq_lock);
   2798 	wm_txeof(sc, txq);
   2799 	mutex_exit(txq->txq_lock);
   2800 
   2801 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2802 #ifdef WM_DEBUG
   2803 		int i, j;
   2804 		struct wm_txsoft *txs;
   2805 #endif
   2806 		log(LOG_ERR,
   2807 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2808 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2809 		    txq->txq_next);
   2810 		ifp->if_oerrors++;
   2811 #ifdef WM_DEBUG
   2812 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2813 		    i = WM_NEXTTXS(txq, i)) {
   2814 		    txs = &txq->txq_soft[i];
   2815 		    printf("txs %d tx %d -> %d\n",
   2816 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2817 		    for (j = txs->txs_firstdesc; ;
   2818 			j = WM_NEXTTX(txq, j)) {
   2819 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2820 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2821 			printf("\t %#08x%08x\n",
   2822 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2823 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2824 			if (j == txs->txs_lastdesc)
   2825 				break;
   2826 			}
   2827 		}
   2828 #endif
   2829 	}
   2830 }
   2831 
   2832 /*
   2833  * wm_tick:
   2834  *
   2835  *	One second timer, used to check link status, sweep up
   2836  *	completed transmit jobs, etc.
   2837  */
   2838 static void
   2839 wm_tick(void *arg)
   2840 {
   2841 	struct wm_softc *sc = arg;
   2842 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2843 #ifndef WM_MPSAFE
   2844 	int s = splnet();
   2845 #endif
   2846 
   2847 	WM_CORE_LOCK(sc);
   2848 
   2849 	if (sc->sc_core_stopping)
   2850 		goto out;
   2851 
   2852 	if (sc->sc_type >= WM_T_82542_2_1) {
   2853 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2854 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2855 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2856 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2857 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2858 	}
   2859 
   2860 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2861 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2862 	    + CSR_READ(sc, WMREG_CRCERRS)
   2863 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2864 	    + CSR_READ(sc, WMREG_SYMERRC)
   2865 	    + CSR_READ(sc, WMREG_RXERRC)
   2866 	    + CSR_READ(sc, WMREG_SEC)
   2867 	    + CSR_READ(sc, WMREG_CEXTERR)
   2868 	    + CSR_READ(sc, WMREG_RLEC);
   2869 	/*
   2870 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2871 	 * memory. It does not mean the number of dropped packet. Because
   2872 	 * ethernet controller can receive packets in such case if there is
   2873 	 * space in phy's FIFO.
   2874 	 *
   2875 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2876 	 * own EVCNT instead of if_iqdrops.
   2877 	 */
   2878 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2879 
   2880 	if (sc->sc_flags & WM_F_HAS_MII)
   2881 		mii_tick(&sc->sc_mii);
   2882 	else if ((sc->sc_type >= WM_T_82575)
   2883 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2884 		wm_serdes_tick(sc);
   2885 	else
   2886 		wm_tbi_tick(sc);
   2887 
   2888 out:
   2889 	WM_CORE_UNLOCK(sc);
   2890 #ifndef WM_MPSAFE
   2891 	splx(s);
   2892 #endif
   2893 
   2894 	if (!sc->sc_core_stopping)
   2895 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2896 }
   2897 
   2898 static int
   2899 wm_ifflags_cb(struct ethercom *ec)
   2900 {
   2901 	struct ifnet *ifp = &ec->ec_if;
   2902 	struct wm_softc *sc = ifp->if_softc;
   2903 	int rc = 0;
   2904 
   2905 	WM_CORE_LOCK(sc);
   2906 
   2907 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2908 	sc->sc_if_flags = ifp->if_flags;
   2909 
   2910 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2911 		rc = ENETRESET;
   2912 		goto out;
   2913 	}
   2914 
   2915 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2916 		wm_set_filter(sc);
   2917 
   2918 	wm_set_vlan(sc);
   2919 
   2920 out:
   2921 	WM_CORE_UNLOCK(sc);
   2922 
   2923 	return rc;
   2924 }
   2925 
   2926 /*
   2927  * wm_ioctl:		[ifnet interface function]
   2928  *
   2929  *	Handle control requests from the operator.
   2930  */
   2931 static int
   2932 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2933 {
   2934 	struct wm_softc *sc = ifp->if_softc;
   2935 	struct ifreq *ifr = (struct ifreq *) data;
   2936 	struct ifaddr *ifa = (struct ifaddr *)data;
   2937 	struct sockaddr_dl *sdl;
   2938 	int s, error;
   2939 
   2940 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2941 		device_xname(sc->sc_dev), __func__));
   2942 
   2943 #ifndef WM_MPSAFE
   2944 	s = splnet();
   2945 #endif
   2946 	switch (cmd) {
   2947 	case SIOCSIFMEDIA:
   2948 	case SIOCGIFMEDIA:
   2949 		WM_CORE_LOCK(sc);
   2950 		/* Flow control requires full-duplex mode. */
   2951 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2952 		    (ifr->ifr_media & IFM_FDX) == 0)
   2953 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2954 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2955 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2956 				/* We can do both TXPAUSE and RXPAUSE. */
   2957 				ifr->ifr_media |=
   2958 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2959 			}
   2960 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2961 		}
   2962 		WM_CORE_UNLOCK(sc);
   2963 #ifdef WM_MPSAFE
   2964 		s = splnet();
   2965 #endif
   2966 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2967 #ifdef WM_MPSAFE
   2968 		splx(s);
   2969 #endif
   2970 		break;
   2971 	case SIOCINITIFADDR:
   2972 		WM_CORE_LOCK(sc);
   2973 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2974 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2975 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2976 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2977 			/* unicast address is first multicast entry */
   2978 			wm_set_filter(sc);
   2979 			error = 0;
   2980 			WM_CORE_UNLOCK(sc);
   2981 			break;
   2982 		}
   2983 		WM_CORE_UNLOCK(sc);
   2984 		/*FALLTHROUGH*/
   2985 	default:
   2986 #ifdef WM_MPSAFE
   2987 		s = splnet();
   2988 #endif
   2989 		/* It may call wm_start, so unlock here */
   2990 		error = ether_ioctl(ifp, cmd, data);
   2991 #ifdef WM_MPSAFE
   2992 		splx(s);
   2993 #endif
   2994 		if (error != ENETRESET)
   2995 			break;
   2996 
   2997 		error = 0;
   2998 
   2999 		if (cmd == SIOCSIFCAP) {
   3000 			error = (*ifp->if_init)(ifp);
   3001 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3002 			;
   3003 		else if (ifp->if_flags & IFF_RUNNING) {
   3004 			/*
   3005 			 * Multicast list has changed; set the hardware filter
   3006 			 * accordingly.
   3007 			 */
   3008 			WM_CORE_LOCK(sc);
   3009 			wm_set_filter(sc);
   3010 			WM_CORE_UNLOCK(sc);
   3011 		}
   3012 		break;
   3013 	}
   3014 
   3015 #ifndef WM_MPSAFE
   3016 	splx(s);
   3017 #endif
   3018 	return error;
   3019 }
   3020 
   3021 /* MAC address related */
   3022 
   3023 /*
   3024  * Get the offset of MAC address and return it.
   3025  * If error occured, use offset 0.
   3026  */
   3027 static uint16_t
   3028 wm_check_alt_mac_addr(struct wm_softc *sc)
   3029 {
   3030 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3031 	uint16_t offset = NVM_OFF_MACADDR;
   3032 
   3033 	/* Try to read alternative MAC address pointer */
   3034 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3035 		return 0;
   3036 
   3037 	/* Check pointer if it's valid or not. */
   3038 	if ((offset == 0x0000) || (offset == 0xffff))
   3039 		return 0;
   3040 
   3041 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3042 	/*
   3043 	 * Check whether alternative MAC address is valid or not.
   3044 	 * Some cards have non 0xffff pointer but those don't use
   3045 	 * alternative MAC address in reality.
   3046 	 *
   3047 	 * Check whether the broadcast bit is set or not.
   3048 	 */
   3049 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3050 		if (((myea[0] & 0xff) & 0x01) == 0)
   3051 			return offset; /* Found */
   3052 
   3053 	/* Not found */
   3054 	return 0;
   3055 }
   3056 
   3057 static int
   3058 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3059 {
   3060 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3061 	uint16_t offset = NVM_OFF_MACADDR;
   3062 	int do_invert = 0;
   3063 
   3064 	switch (sc->sc_type) {
   3065 	case WM_T_82580:
   3066 	case WM_T_I350:
   3067 	case WM_T_I354:
   3068 		/* EEPROM Top Level Partitioning */
   3069 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3070 		break;
   3071 	case WM_T_82571:
   3072 	case WM_T_82575:
   3073 	case WM_T_82576:
   3074 	case WM_T_80003:
   3075 	case WM_T_I210:
   3076 	case WM_T_I211:
   3077 		offset = wm_check_alt_mac_addr(sc);
   3078 		if (offset == 0)
   3079 			if ((sc->sc_funcid & 0x01) == 1)
   3080 				do_invert = 1;
   3081 		break;
   3082 	default:
   3083 		if ((sc->sc_funcid & 0x01) == 1)
   3084 			do_invert = 1;
   3085 		break;
   3086 	}
   3087 
   3088 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3089 		goto bad;
   3090 
   3091 	enaddr[0] = myea[0] & 0xff;
   3092 	enaddr[1] = myea[0] >> 8;
   3093 	enaddr[2] = myea[1] & 0xff;
   3094 	enaddr[3] = myea[1] >> 8;
   3095 	enaddr[4] = myea[2] & 0xff;
   3096 	enaddr[5] = myea[2] >> 8;
   3097 
   3098 	/*
   3099 	 * Toggle the LSB of the MAC address on the second port
   3100 	 * of some dual port cards.
   3101 	 */
   3102 	if (do_invert != 0)
   3103 		enaddr[5] ^= 1;
   3104 
   3105 	return 0;
   3106 
   3107  bad:
   3108 	return -1;
   3109 }
   3110 
   3111 /*
   3112  * wm_set_ral:
   3113  *
   3114  *	Set an entery in the receive address list.
   3115  */
   3116 static void
   3117 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3118 {
   3119 	uint32_t ral_lo, ral_hi;
   3120 
   3121 	if (enaddr != NULL) {
   3122 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3123 		    (enaddr[3] << 24);
   3124 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3125 		ral_hi |= RAL_AV;
   3126 	} else {
   3127 		ral_lo = 0;
   3128 		ral_hi = 0;
   3129 	}
   3130 
   3131 	if (sc->sc_type >= WM_T_82544) {
   3132 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3133 		    ral_lo);
   3134 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3135 		    ral_hi);
   3136 	} else {
   3137 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3138 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3139 	}
   3140 }
   3141 
   3142 /*
   3143  * wm_mchash:
   3144  *
   3145  *	Compute the hash of the multicast address for the 4096-bit
   3146  *	multicast filter.
   3147  */
   3148 static uint32_t
   3149 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3150 {
   3151 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3152 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3153 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3154 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3155 	uint32_t hash;
   3156 
   3157 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3158 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3159 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3160 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3161 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3162 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3163 		return (hash & 0x3ff);
   3164 	}
   3165 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3166 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3167 
   3168 	return (hash & 0xfff);
   3169 }
   3170 
   3171 /*
   3172  * wm_set_filter:
   3173  *
   3174  *	Set up the receive filter.
   3175  */
   3176 static void
   3177 wm_set_filter(struct wm_softc *sc)
   3178 {
   3179 	struct ethercom *ec = &sc->sc_ethercom;
   3180 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3181 	struct ether_multi *enm;
   3182 	struct ether_multistep step;
   3183 	bus_addr_t mta_reg;
   3184 	uint32_t hash, reg, bit;
   3185 	int i, size, ralmax;
   3186 
   3187 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3188 		device_xname(sc->sc_dev), __func__));
   3189 
   3190 	if (sc->sc_type >= WM_T_82544)
   3191 		mta_reg = WMREG_CORDOVA_MTA;
   3192 	else
   3193 		mta_reg = WMREG_MTA;
   3194 
   3195 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3196 
   3197 	if (ifp->if_flags & IFF_BROADCAST)
   3198 		sc->sc_rctl |= RCTL_BAM;
   3199 	if (ifp->if_flags & IFF_PROMISC) {
   3200 		sc->sc_rctl |= RCTL_UPE;
   3201 		goto allmulti;
   3202 	}
   3203 
   3204 	/*
   3205 	 * Set the station address in the first RAL slot, and
   3206 	 * clear the remaining slots.
   3207 	 */
   3208 	if (sc->sc_type == WM_T_ICH8)
   3209 		size = WM_RAL_TABSIZE_ICH8 -1;
   3210 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3211 	    || (sc->sc_type == WM_T_PCH))
   3212 		size = WM_RAL_TABSIZE_ICH8;
   3213 	else if (sc->sc_type == WM_T_PCH2)
   3214 		size = WM_RAL_TABSIZE_PCH2;
   3215 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3216 		size = WM_RAL_TABSIZE_PCH_LPT;
   3217 	else if (sc->sc_type == WM_T_82575)
   3218 		size = WM_RAL_TABSIZE_82575;
   3219 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3220 		size = WM_RAL_TABSIZE_82576;
   3221 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3222 		size = WM_RAL_TABSIZE_I350;
   3223 	else
   3224 		size = WM_RAL_TABSIZE;
   3225 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3226 
   3227 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3228 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3229 		switch (i) {
   3230 		case 0:
   3231 			/* We can use all entries */
   3232 			ralmax = size;
   3233 			break;
   3234 		case 1:
   3235 			/* Only RAR[0] */
   3236 			ralmax = 1;
   3237 			break;
   3238 		default:
   3239 			/* available SHRA + RAR[0] */
   3240 			ralmax = i + 1;
   3241 		}
   3242 	} else
   3243 		ralmax = size;
   3244 	for (i = 1; i < size; i++) {
   3245 		if (i < ralmax)
   3246 			wm_set_ral(sc, NULL, i);
   3247 	}
   3248 
   3249 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3250 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3251 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3252 	    || (sc->sc_type == WM_T_PCH_SPT))
   3253 		size = WM_ICH8_MC_TABSIZE;
   3254 	else
   3255 		size = WM_MC_TABSIZE;
   3256 	/* Clear out the multicast table. */
   3257 	for (i = 0; i < size; i++)
   3258 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3259 
   3260 	ETHER_FIRST_MULTI(step, ec, enm);
   3261 	while (enm != NULL) {
   3262 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3263 			/*
   3264 			 * We must listen to a range of multicast addresses.
   3265 			 * For now, just accept all multicasts, rather than
   3266 			 * trying to set only those filter bits needed to match
   3267 			 * the range.  (At this time, the only use of address
   3268 			 * ranges is for IP multicast routing, for which the
   3269 			 * range is big enough to require all bits set.)
   3270 			 */
   3271 			goto allmulti;
   3272 		}
   3273 
   3274 		hash = wm_mchash(sc, enm->enm_addrlo);
   3275 
   3276 		reg = (hash >> 5);
   3277 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3278 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3279 		    || (sc->sc_type == WM_T_PCH2)
   3280 		    || (sc->sc_type == WM_T_PCH_LPT)
   3281 		    || (sc->sc_type == WM_T_PCH_SPT))
   3282 			reg &= 0x1f;
   3283 		else
   3284 			reg &= 0x7f;
   3285 		bit = hash & 0x1f;
   3286 
   3287 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3288 		hash |= 1U << bit;
   3289 
   3290 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3291 			/*
   3292 			 * 82544 Errata 9: Certain register cannot be written
   3293 			 * with particular alignments in PCI-X bus operation
   3294 			 * (FCAH, MTA and VFTA).
   3295 			 */
   3296 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3297 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3298 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3299 		} else
   3300 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3301 
   3302 		ETHER_NEXT_MULTI(step, enm);
   3303 	}
   3304 
   3305 	ifp->if_flags &= ~IFF_ALLMULTI;
   3306 	goto setit;
   3307 
   3308  allmulti:
   3309 	ifp->if_flags |= IFF_ALLMULTI;
   3310 	sc->sc_rctl |= RCTL_MPE;
   3311 
   3312  setit:
   3313 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3314 }
   3315 
   3316 /* Reset and init related */
   3317 
   3318 static void
   3319 wm_set_vlan(struct wm_softc *sc)
   3320 {
   3321 
   3322 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3323 		device_xname(sc->sc_dev), __func__));
   3324 
   3325 	/* Deal with VLAN enables. */
   3326 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3327 		sc->sc_ctrl |= CTRL_VME;
   3328 	else
   3329 		sc->sc_ctrl &= ~CTRL_VME;
   3330 
   3331 	/* Write the control registers. */
   3332 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3333 }
   3334 
   3335 static void
   3336 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3337 {
   3338 	uint32_t gcr;
   3339 	pcireg_t ctrl2;
   3340 
   3341 	gcr = CSR_READ(sc, WMREG_GCR);
   3342 
   3343 	/* Only take action if timeout value is defaulted to 0 */
   3344 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3345 		goto out;
   3346 
   3347 	if ((gcr & GCR_CAP_VER2) == 0) {
   3348 		gcr |= GCR_CMPL_TMOUT_10MS;
   3349 		goto out;
   3350 	}
   3351 
   3352 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3353 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3354 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3355 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3356 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3357 
   3358 out:
   3359 	/* Disable completion timeout resend */
   3360 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3361 
   3362 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3363 }
   3364 
   3365 void
   3366 wm_get_auto_rd_done(struct wm_softc *sc)
   3367 {
   3368 	int i;
   3369 
   3370 	/* wait for eeprom to reload */
   3371 	switch (sc->sc_type) {
   3372 	case WM_T_82571:
   3373 	case WM_T_82572:
   3374 	case WM_T_82573:
   3375 	case WM_T_82574:
   3376 	case WM_T_82583:
   3377 	case WM_T_82575:
   3378 	case WM_T_82576:
   3379 	case WM_T_82580:
   3380 	case WM_T_I350:
   3381 	case WM_T_I354:
   3382 	case WM_T_I210:
   3383 	case WM_T_I211:
   3384 	case WM_T_80003:
   3385 	case WM_T_ICH8:
   3386 	case WM_T_ICH9:
   3387 		for (i = 0; i < 10; i++) {
   3388 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3389 				break;
   3390 			delay(1000);
   3391 		}
   3392 		if (i == 10) {
   3393 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3394 			    "complete\n", device_xname(sc->sc_dev));
   3395 		}
   3396 		break;
   3397 	default:
   3398 		break;
   3399 	}
   3400 }
   3401 
   3402 void
   3403 wm_lan_init_done(struct wm_softc *sc)
   3404 {
   3405 	uint32_t reg = 0;
   3406 	int i;
   3407 
   3408 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3409 		device_xname(sc->sc_dev), __func__));
   3410 
   3411 	/* Wait for eeprom to reload */
   3412 	switch (sc->sc_type) {
   3413 	case WM_T_ICH10:
   3414 	case WM_T_PCH:
   3415 	case WM_T_PCH2:
   3416 	case WM_T_PCH_LPT:
   3417 	case WM_T_PCH_SPT:
   3418 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3419 			reg = CSR_READ(sc, WMREG_STATUS);
   3420 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3421 				break;
   3422 			delay(100);
   3423 		}
   3424 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3425 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3426 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3427 		}
   3428 		break;
   3429 	default:
   3430 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3431 		    __func__);
   3432 		break;
   3433 	}
   3434 
   3435 	reg &= ~STATUS_LAN_INIT_DONE;
   3436 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3437 }
   3438 
   3439 void
   3440 wm_get_cfg_done(struct wm_softc *sc)
   3441 {
   3442 	int mask;
   3443 	uint32_t reg;
   3444 	int i;
   3445 
   3446 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3447 		device_xname(sc->sc_dev), __func__));
   3448 
   3449 	/* Wait for eeprom to reload */
   3450 	switch (sc->sc_type) {
   3451 	case WM_T_82542_2_0:
   3452 	case WM_T_82542_2_1:
   3453 		/* null */
   3454 		break;
   3455 	case WM_T_82543:
   3456 	case WM_T_82544:
   3457 	case WM_T_82540:
   3458 	case WM_T_82545:
   3459 	case WM_T_82545_3:
   3460 	case WM_T_82546:
   3461 	case WM_T_82546_3:
   3462 	case WM_T_82541:
   3463 	case WM_T_82541_2:
   3464 	case WM_T_82547:
   3465 	case WM_T_82547_2:
   3466 	case WM_T_82573:
   3467 	case WM_T_82574:
   3468 	case WM_T_82583:
   3469 		/* generic */
   3470 		delay(10*1000);
   3471 		break;
   3472 	case WM_T_80003:
   3473 	case WM_T_82571:
   3474 	case WM_T_82572:
   3475 	case WM_T_82575:
   3476 	case WM_T_82576:
   3477 	case WM_T_82580:
   3478 	case WM_T_I350:
   3479 	case WM_T_I354:
   3480 	case WM_T_I210:
   3481 	case WM_T_I211:
   3482 		if (sc->sc_type == WM_T_82571) {
   3483 			/* Only 82571 shares port 0 */
   3484 			mask = EEMNGCTL_CFGDONE_0;
   3485 		} else
   3486 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3487 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3488 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3489 				break;
   3490 			delay(1000);
   3491 		}
   3492 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3493 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3494 				device_xname(sc->sc_dev), __func__));
   3495 		}
   3496 		break;
   3497 	case WM_T_ICH8:
   3498 	case WM_T_ICH9:
   3499 	case WM_T_ICH10:
   3500 	case WM_T_PCH:
   3501 	case WM_T_PCH2:
   3502 	case WM_T_PCH_LPT:
   3503 	case WM_T_PCH_SPT:
   3504 		delay(10*1000);
   3505 		if (sc->sc_type >= WM_T_ICH10)
   3506 			wm_lan_init_done(sc);
   3507 		else
   3508 			wm_get_auto_rd_done(sc);
   3509 
   3510 		reg = CSR_READ(sc, WMREG_STATUS);
   3511 		if ((reg & STATUS_PHYRA) != 0)
   3512 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3513 		break;
   3514 	default:
   3515 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3516 		    __func__);
   3517 		break;
   3518 	}
   3519 }
   3520 
   3521 /* Init hardware bits */
   3522 void
   3523 wm_initialize_hardware_bits(struct wm_softc *sc)
   3524 {
   3525 	uint32_t tarc0, tarc1, reg;
   3526 
   3527 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3528 		device_xname(sc->sc_dev), __func__));
   3529 
   3530 	/* For 82571 variant, 80003 and ICHs */
   3531 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3532 	    || (sc->sc_type >= WM_T_80003)) {
   3533 
   3534 		/* Transmit Descriptor Control 0 */
   3535 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3536 		reg |= TXDCTL_COUNT_DESC;
   3537 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3538 
   3539 		/* Transmit Descriptor Control 1 */
   3540 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3541 		reg |= TXDCTL_COUNT_DESC;
   3542 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3543 
   3544 		/* TARC0 */
   3545 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3546 		switch (sc->sc_type) {
   3547 		case WM_T_82571:
   3548 		case WM_T_82572:
   3549 		case WM_T_82573:
   3550 		case WM_T_82574:
   3551 		case WM_T_82583:
   3552 		case WM_T_80003:
   3553 			/* Clear bits 30..27 */
   3554 			tarc0 &= ~__BITS(30, 27);
   3555 			break;
   3556 		default:
   3557 			break;
   3558 		}
   3559 
   3560 		switch (sc->sc_type) {
   3561 		case WM_T_82571:
   3562 		case WM_T_82572:
   3563 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3564 
   3565 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3566 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3567 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3568 			/* 8257[12] Errata No.7 */
   3569 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3570 
   3571 			/* TARC1 bit 28 */
   3572 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3573 				tarc1 &= ~__BIT(28);
   3574 			else
   3575 				tarc1 |= __BIT(28);
   3576 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3577 
   3578 			/*
   3579 			 * 8257[12] Errata No.13
   3580 			 * Disable Dyamic Clock Gating.
   3581 			 */
   3582 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3583 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3584 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3585 			break;
   3586 		case WM_T_82573:
   3587 		case WM_T_82574:
   3588 		case WM_T_82583:
   3589 			if ((sc->sc_type == WM_T_82574)
   3590 			    || (sc->sc_type == WM_T_82583))
   3591 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3592 
   3593 			/* Extended Device Control */
   3594 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3595 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3596 			reg |= __BIT(22);	/* Set bit 22 */
   3597 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3598 
   3599 			/* Device Control */
   3600 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3601 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3602 
   3603 			/* PCIe Control Register */
   3604 			/*
   3605 			 * 82573 Errata (unknown).
   3606 			 *
   3607 			 * 82574 Errata 25 and 82583 Errata 12
   3608 			 * "Dropped Rx Packets":
   3609 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3610 			 */
   3611 			reg = CSR_READ(sc, WMREG_GCR);
   3612 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3613 			CSR_WRITE(sc, WMREG_GCR, reg);
   3614 
   3615 			if ((sc->sc_type == WM_T_82574)
   3616 			    || (sc->sc_type == WM_T_82583)) {
   3617 				/*
   3618 				 * Document says this bit must be set for
   3619 				 * proper operation.
   3620 				 */
   3621 				reg = CSR_READ(sc, WMREG_GCR);
   3622 				reg |= __BIT(22);
   3623 				CSR_WRITE(sc, WMREG_GCR, reg);
   3624 
   3625 				/*
   3626 				 * Apply workaround for hardware errata
   3627 				 * documented in errata docs Fixes issue where
   3628 				 * some error prone or unreliable PCIe
   3629 				 * completions are occurring, particularly
   3630 				 * with ASPM enabled. Without fix, issue can
   3631 				 * cause Tx timeouts.
   3632 				 */
   3633 				reg = CSR_READ(sc, WMREG_GCR2);
   3634 				reg |= __BIT(0);
   3635 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3636 			}
   3637 			break;
   3638 		case WM_T_80003:
   3639 			/* TARC0 */
   3640 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3641 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3642 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3643 
   3644 			/* TARC1 bit 28 */
   3645 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3646 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3647 				tarc1 &= ~__BIT(28);
   3648 			else
   3649 				tarc1 |= __BIT(28);
   3650 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3651 			break;
   3652 		case WM_T_ICH8:
   3653 		case WM_T_ICH9:
   3654 		case WM_T_ICH10:
   3655 		case WM_T_PCH:
   3656 		case WM_T_PCH2:
   3657 		case WM_T_PCH_LPT:
   3658 		case WM_T_PCH_SPT:
   3659 			/* TARC0 */
   3660 			if ((sc->sc_type == WM_T_ICH8)
   3661 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3662 				/* Set TARC0 bits 29 and 28 */
   3663 				tarc0 |= __BITS(29, 28);
   3664 			}
   3665 			/* Set TARC0 bits 23,24,26,27 */
   3666 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3667 
   3668 			/* CTRL_EXT */
   3669 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3670 			reg |= __BIT(22);	/* Set bit 22 */
   3671 			/*
   3672 			 * Enable PHY low-power state when MAC is at D3
   3673 			 * w/o WoL
   3674 			 */
   3675 			if (sc->sc_type >= WM_T_PCH)
   3676 				reg |= CTRL_EXT_PHYPDEN;
   3677 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3678 
   3679 			/* TARC1 */
   3680 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3681 			/* bit 28 */
   3682 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3683 				tarc1 &= ~__BIT(28);
   3684 			else
   3685 				tarc1 |= __BIT(28);
   3686 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3687 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3688 
   3689 			/* Device Status */
   3690 			if (sc->sc_type == WM_T_ICH8) {
   3691 				reg = CSR_READ(sc, WMREG_STATUS);
   3692 				reg &= ~__BIT(31);
   3693 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3694 
   3695 			}
   3696 
   3697 			/* IOSFPC */
   3698 			if (sc->sc_type == WM_T_PCH_SPT) {
   3699 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3700 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3701 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3702 			}
   3703 			/*
   3704 			 * Work-around descriptor data corruption issue during
   3705 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3706 			 * capability.
   3707 			 */
   3708 			reg = CSR_READ(sc, WMREG_RFCTL);
   3709 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3710 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3711 			break;
   3712 		default:
   3713 			break;
   3714 		}
   3715 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3716 
   3717 		/*
   3718 		 * 8257[12] Errata No.52 and some others.
   3719 		 * Avoid RSS Hash Value bug.
   3720 		 */
   3721 		switch (sc->sc_type) {
   3722 		case WM_T_82571:
   3723 		case WM_T_82572:
   3724 		case WM_T_82573:
   3725 		case WM_T_80003:
   3726 		case WM_T_ICH8:
   3727 			reg = CSR_READ(sc, WMREG_RFCTL);
   3728 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3729 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3730 			break;
   3731 		default:
   3732 			break;
   3733 		}
   3734 	}
   3735 }
   3736 
   3737 static uint32_t
   3738 wm_rxpbs_adjust_82580(uint32_t val)
   3739 {
   3740 	uint32_t rv = 0;
   3741 
   3742 	if (val < __arraycount(wm_82580_rxpbs_table))
   3743 		rv = wm_82580_rxpbs_table[val];
   3744 
   3745 	return rv;
   3746 }
   3747 
   3748 /*
   3749  * wm_reset:
   3750  *
   3751  *	Reset the i82542 chip.
   3752  */
   3753 static void
   3754 wm_reset(struct wm_softc *sc)
   3755 {
   3756 	int phy_reset = 0;
   3757 	int i, error = 0;
   3758 	uint32_t reg;
   3759 
   3760 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3761 		device_xname(sc->sc_dev), __func__));
   3762 	KASSERT(sc->sc_type != 0);
   3763 
   3764 	/*
   3765 	 * Allocate on-chip memory according to the MTU size.
   3766 	 * The Packet Buffer Allocation register must be written
   3767 	 * before the chip is reset.
   3768 	 */
   3769 	switch (sc->sc_type) {
   3770 	case WM_T_82547:
   3771 	case WM_T_82547_2:
   3772 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3773 		    PBA_22K : PBA_30K;
   3774 		for (i = 0; i < sc->sc_nqueues; i++) {
   3775 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3776 			txq->txq_fifo_head = 0;
   3777 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3778 			txq->txq_fifo_size =
   3779 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3780 			txq->txq_fifo_stall = 0;
   3781 		}
   3782 		break;
   3783 	case WM_T_82571:
   3784 	case WM_T_82572:
   3785 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3786 	case WM_T_80003:
   3787 		sc->sc_pba = PBA_32K;
   3788 		break;
   3789 	case WM_T_82573:
   3790 		sc->sc_pba = PBA_12K;
   3791 		break;
   3792 	case WM_T_82574:
   3793 	case WM_T_82583:
   3794 		sc->sc_pba = PBA_20K;
   3795 		break;
   3796 	case WM_T_82576:
   3797 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3798 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3799 		break;
   3800 	case WM_T_82580:
   3801 	case WM_T_I350:
   3802 	case WM_T_I354:
   3803 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3804 		break;
   3805 	case WM_T_I210:
   3806 	case WM_T_I211:
   3807 		sc->sc_pba = PBA_34K;
   3808 		break;
   3809 	case WM_T_ICH8:
   3810 		/* Workaround for a bit corruption issue in FIFO memory */
   3811 		sc->sc_pba = PBA_8K;
   3812 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3813 		break;
   3814 	case WM_T_ICH9:
   3815 	case WM_T_ICH10:
   3816 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3817 		    PBA_14K : PBA_10K;
   3818 		break;
   3819 	case WM_T_PCH:
   3820 	case WM_T_PCH2:
   3821 	case WM_T_PCH_LPT:
   3822 	case WM_T_PCH_SPT:
   3823 		sc->sc_pba = PBA_26K;
   3824 		break;
   3825 	default:
   3826 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3827 		    PBA_40K : PBA_48K;
   3828 		break;
   3829 	}
   3830 	/*
   3831 	 * Only old or non-multiqueue devices have the PBA register
   3832 	 * XXX Need special handling for 82575.
   3833 	 */
   3834 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3835 	    || (sc->sc_type == WM_T_82575))
   3836 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3837 
   3838 	/* Prevent the PCI-E bus from sticking */
   3839 	if (sc->sc_flags & WM_F_PCIE) {
   3840 		int timeout = 800;
   3841 
   3842 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3843 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3844 
   3845 		while (timeout--) {
   3846 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3847 			    == 0)
   3848 				break;
   3849 			delay(100);
   3850 		}
   3851 	}
   3852 
   3853 	/* Set the completion timeout for interface */
   3854 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3855 	    || (sc->sc_type == WM_T_82580)
   3856 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3857 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3858 		wm_set_pcie_completion_timeout(sc);
   3859 
   3860 	/* Clear interrupt */
   3861 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3862 	if (sc->sc_nintrs > 1) {
   3863 		if (sc->sc_type != WM_T_82574) {
   3864 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3865 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3866 		} else {
   3867 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3868 		}
   3869 	}
   3870 
   3871 	/* Stop the transmit and receive processes. */
   3872 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3873 	sc->sc_rctl &= ~RCTL_EN;
   3874 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3875 	CSR_WRITE_FLUSH(sc);
   3876 
   3877 	/* XXX set_tbi_sbp_82543() */
   3878 
   3879 	delay(10*1000);
   3880 
   3881 	/* Must acquire the MDIO ownership before MAC reset */
   3882 	switch (sc->sc_type) {
   3883 	case WM_T_82573:
   3884 	case WM_T_82574:
   3885 	case WM_T_82583:
   3886 		error = wm_get_hw_semaphore_82573(sc);
   3887 		break;
   3888 	default:
   3889 		break;
   3890 	}
   3891 
   3892 	/*
   3893 	 * 82541 Errata 29? & 82547 Errata 28?
   3894 	 * See also the description about PHY_RST bit in CTRL register
   3895 	 * in 8254x_GBe_SDM.pdf.
   3896 	 */
   3897 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3898 		CSR_WRITE(sc, WMREG_CTRL,
   3899 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3900 		CSR_WRITE_FLUSH(sc);
   3901 		delay(5000);
   3902 	}
   3903 
   3904 	switch (sc->sc_type) {
   3905 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3906 	case WM_T_82541:
   3907 	case WM_T_82541_2:
   3908 	case WM_T_82547:
   3909 	case WM_T_82547_2:
   3910 		/*
   3911 		 * On some chipsets, a reset through a memory-mapped write
   3912 		 * cycle can cause the chip to reset before completing the
   3913 		 * write cycle.  This causes major headache that can be
   3914 		 * avoided by issuing the reset via indirect register writes
   3915 		 * through I/O space.
   3916 		 *
   3917 		 * So, if we successfully mapped the I/O BAR at attach time,
   3918 		 * use that.  Otherwise, try our luck with a memory-mapped
   3919 		 * reset.
   3920 		 */
   3921 		if (sc->sc_flags & WM_F_IOH_VALID)
   3922 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3923 		else
   3924 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3925 		break;
   3926 	case WM_T_82545_3:
   3927 	case WM_T_82546_3:
   3928 		/* Use the shadow control register on these chips. */
   3929 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3930 		break;
   3931 	case WM_T_80003:
   3932 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3933 		sc->phy.acquire(sc);
   3934 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3935 		sc->phy.release(sc);
   3936 		break;
   3937 	case WM_T_ICH8:
   3938 	case WM_T_ICH9:
   3939 	case WM_T_ICH10:
   3940 	case WM_T_PCH:
   3941 	case WM_T_PCH2:
   3942 	case WM_T_PCH_LPT:
   3943 	case WM_T_PCH_SPT:
   3944 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3945 		if (wm_phy_resetisblocked(sc) == false) {
   3946 			/*
   3947 			 * Gate automatic PHY configuration by hardware on
   3948 			 * non-managed 82579
   3949 			 */
   3950 			if ((sc->sc_type == WM_T_PCH2)
   3951 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3952 				== 0))
   3953 				wm_gate_hw_phy_config_ich8lan(sc, true);
   3954 
   3955 			reg |= CTRL_PHY_RESET;
   3956 			phy_reset = 1;
   3957 		} else
   3958 			printf("XXX reset is blocked!!!\n");
   3959 		sc->phy.acquire(sc);
   3960 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3961 		/* Don't insert a completion barrier when reset */
   3962 		delay(20*1000);
   3963 		mutex_exit(sc->sc_ich_phymtx);
   3964 		break;
   3965 	case WM_T_82580:
   3966 	case WM_T_I350:
   3967 	case WM_T_I354:
   3968 	case WM_T_I210:
   3969 	case WM_T_I211:
   3970 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3971 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   3972 			CSR_WRITE_FLUSH(sc);
   3973 		delay(5000);
   3974 		break;
   3975 	case WM_T_82542_2_0:
   3976 	case WM_T_82542_2_1:
   3977 	case WM_T_82543:
   3978 	case WM_T_82540:
   3979 	case WM_T_82545:
   3980 	case WM_T_82546:
   3981 	case WM_T_82571:
   3982 	case WM_T_82572:
   3983 	case WM_T_82573:
   3984 	case WM_T_82574:
   3985 	case WM_T_82575:
   3986 	case WM_T_82576:
   3987 	case WM_T_82583:
   3988 	default:
   3989 		/* Everything else can safely use the documented method. */
   3990 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3991 		break;
   3992 	}
   3993 
   3994 	/* Must release the MDIO ownership after MAC reset */
   3995 	switch (sc->sc_type) {
   3996 	case WM_T_82573:
   3997 	case WM_T_82574:
   3998 	case WM_T_82583:
   3999 		if (error == 0)
   4000 			wm_put_hw_semaphore_82573(sc);
   4001 		break;
   4002 	default:
   4003 		break;
   4004 	}
   4005 
   4006 	if (phy_reset != 0) {
   4007 		wm_get_cfg_done(sc);
   4008 		delay(10 * 1000);
   4009 		if (sc->sc_type >= WM_T_PCH) {
   4010 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4011 			    BM_PORT_GEN_CFG);
   4012 			reg &= ~BM_WUC_HOST_WU_BIT;
   4013 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   4014 			    BM_PORT_GEN_CFG, reg);
   4015 		}
   4016 	}
   4017 
   4018 	/* reload EEPROM */
   4019 	switch (sc->sc_type) {
   4020 	case WM_T_82542_2_0:
   4021 	case WM_T_82542_2_1:
   4022 	case WM_T_82543:
   4023 	case WM_T_82544:
   4024 		delay(10);
   4025 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4026 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4027 		CSR_WRITE_FLUSH(sc);
   4028 		delay(2000);
   4029 		break;
   4030 	case WM_T_82540:
   4031 	case WM_T_82545:
   4032 	case WM_T_82545_3:
   4033 	case WM_T_82546:
   4034 	case WM_T_82546_3:
   4035 		delay(5*1000);
   4036 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4037 		break;
   4038 	case WM_T_82541:
   4039 	case WM_T_82541_2:
   4040 	case WM_T_82547:
   4041 	case WM_T_82547_2:
   4042 		delay(20000);
   4043 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4044 		break;
   4045 	case WM_T_82571:
   4046 	case WM_T_82572:
   4047 	case WM_T_82573:
   4048 	case WM_T_82574:
   4049 	case WM_T_82583:
   4050 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4051 			delay(10);
   4052 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4053 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4054 			CSR_WRITE_FLUSH(sc);
   4055 		}
   4056 		/* check EECD_EE_AUTORD */
   4057 		wm_get_auto_rd_done(sc);
   4058 		/*
   4059 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4060 		 * is set.
   4061 		 */
   4062 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4063 		    || (sc->sc_type == WM_T_82583))
   4064 			delay(25*1000);
   4065 		break;
   4066 	case WM_T_82575:
   4067 	case WM_T_82576:
   4068 	case WM_T_82580:
   4069 	case WM_T_I350:
   4070 	case WM_T_I354:
   4071 	case WM_T_I210:
   4072 	case WM_T_I211:
   4073 	case WM_T_80003:
   4074 		/* check EECD_EE_AUTORD */
   4075 		wm_get_auto_rd_done(sc);
   4076 		break;
   4077 	case WM_T_ICH8:
   4078 	case WM_T_ICH9:
   4079 	case WM_T_ICH10:
   4080 	case WM_T_PCH:
   4081 	case WM_T_PCH2:
   4082 	case WM_T_PCH_LPT:
   4083 	case WM_T_PCH_SPT:
   4084 		break;
   4085 	default:
   4086 		panic("%s: unknown type\n", __func__);
   4087 	}
   4088 
   4089 	/* Check whether EEPROM is present or not */
   4090 	switch (sc->sc_type) {
   4091 	case WM_T_82575:
   4092 	case WM_T_82576:
   4093 	case WM_T_82580:
   4094 	case WM_T_I350:
   4095 	case WM_T_I354:
   4096 	case WM_T_ICH8:
   4097 	case WM_T_ICH9:
   4098 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4099 			/* Not found */
   4100 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4101 			if (sc->sc_type == WM_T_82575)
   4102 				wm_reset_init_script_82575(sc);
   4103 		}
   4104 		break;
   4105 	default:
   4106 		break;
   4107 	}
   4108 
   4109 	if ((sc->sc_type == WM_T_82580)
   4110 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4111 		/* clear global device reset status bit */
   4112 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4113 	}
   4114 
   4115 	/* Clear any pending interrupt events. */
   4116 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4117 	reg = CSR_READ(sc, WMREG_ICR);
   4118 	if (sc->sc_nintrs > 1) {
   4119 		if (sc->sc_type != WM_T_82574) {
   4120 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4121 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4122 		} else
   4123 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4124 	}
   4125 
   4126 	/* reload sc_ctrl */
   4127 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4128 
   4129 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4130 		wm_set_eee_i350(sc);
   4131 
   4132 	/* dummy read from WUC */
   4133 	if (sc->sc_type == WM_T_PCH)
   4134 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   4135 	/*
   4136 	 * For PCH, this write will make sure that any noise will be detected
   4137 	 * as a CRC error and be dropped rather than show up as a bad packet
   4138 	 * to the DMA engine
   4139 	 */
   4140 	if (sc->sc_type == WM_T_PCH)
   4141 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4142 
   4143 	if (sc->sc_type >= WM_T_82544)
   4144 		CSR_WRITE(sc, WMREG_WUC, 0);
   4145 
   4146 	wm_reset_mdicnfg_82580(sc);
   4147 
   4148 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4149 		wm_pll_workaround_i210(sc);
   4150 }
   4151 
   4152 /*
   4153  * wm_add_rxbuf:
   4154  *
   4155  *	Add a receive buffer to the indiciated descriptor.
   4156  */
   4157 static int
   4158 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4159 {
   4160 	struct wm_softc *sc = rxq->rxq_sc;
   4161 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4162 	struct mbuf *m;
   4163 	int error;
   4164 
   4165 	KASSERT(mutex_owned(rxq->rxq_lock));
   4166 
   4167 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4168 	if (m == NULL)
   4169 		return ENOBUFS;
   4170 
   4171 	MCLGET(m, M_DONTWAIT);
   4172 	if ((m->m_flags & M_EXT) == 0) {
   4173 		m_freem(m);
   4174 		return ENOBUFS;
   4175 	}
   4176 
   4177 	if (rxs->rxs_mbuf != NULL)
   4178 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4179 
   4180 	rxs->rxs_mbuf = m;
   4181 
   4182 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4183 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4184 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4185 	if (error) {
   4186 		/* XXX XXX XXX */
   4187 		aprint_error_dev(sc->sc_dev,
   4188 		    "unable to load rx DMA map %d, error = %d\n",
   4189 		    idx, error);
   4190 		panic("wm_add_rxbuf");
   4191 	}
   4192 
   4193 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4194 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4195 
   4196 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4197 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4198 			wm_init_rxdesc(rxq, idx);
   4199 	} else
   4200 		wm_init_rxdesc(rxq, idx);
   4201 
   4202 	return 0;
   4203 }
   4204 
   4205 /*
   4206  * wm_rxdrain:
   4207  *
   4208  *	Drain the receive queue.
   4209  */
   4210 static void
   4211 wm_rxdrain(struct wm_rxqueue *rxq)
   4212 {
   4213 	struct wm_softc *sc = rxq->rxq_sc;
   4214 	struct wm_rxsoft *rxs;
   4215 	int i;
   4216 
   4217 	KASSERT(mutex_owned(rxq->rxq_lock));
   4218 
   4219 	for (i = 0; i < WM_NRXDESC; i++) {
   4220 		rxs = &rxq->rxq_soft[i];
   4221 		if (rxs->rxs_mbuf != NULL) {
   4222 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4223 			m_freem(rxs->rxs_mbuf);
   4224 			rxs->rxs_mbuf = NULL;
   4225 		}
   4226 	}
   4227 }
   4228 
   4229 
   4230 /*
   4231  * XXX copy from FreeBSD's sys/net/rss_config.c
   4232  */
   4233 /*
   4234  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4235  * effectiveness may be limited by algorithm choice and available entropy
   4236  * during the boot.
   4237  *
   4238  * XXXRW: And that we don't randomize it yet!
   4239  *
   4240  * This is the default Microsoft RSS specification key which is also
   4241  * the Chelsio T5 firmware default key.
   4242  */
   4243 #define RSS_KEYSIZE 40
   4244 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4245 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4246 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4247 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4248 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4249 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4250 };
   4251 
   4252 /*
   4253  * Caller must pass an array of size sizeof(rss_key).
   4254  *
   4255  * XXX
   4256  * As if_ixgbe may use this function, this function should not be
   4257  * if_wm specific function.
   4258  */
   4259 static void
   4260 wm_rss_getkey(uint8_t *key)
   4261 {
   4262 
   4263 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4264 }
   4265 
   4266 /*
   4267  * Setup registers for RSS.
   4268  *
   4269  * XXX not yet VMDq support
   4270  */
   4271 static void
   4272 wm_init_rss(struct wm_softc *sc)
   4273 {
   4274 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4275 	int i;
   4276 
   4277 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4278 
   4279 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4280 		int qid, reta_ent;
   4281 
   4282 		qid  = i % sc->sc_nqueues;
   4283 		switch(sc->sc_type) {
   4284 		case WM_T_82574:
   4285 			reta_ent = __SHIFTIN(qid,
   4286 			    RETA_ENT_QINDEX_MASK_82574);
   4287 			break;
   4288 		case WM_T_82575:
   4289 			reta_ent = __SHIFTIN(qid,
   4290 			    RETA_ENT_QINDEX1_MASK_82575);
   4291 			break;
   4292 		default:
   4293 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4294 			break;
   4295 		}
   4296 
   4297 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4298 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4299 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4300 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4301 	}
   4302 
   4303 	wm_rss_getkey((uint8_t *)rss_key);
   4304 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4305 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4306 
   4307 	if (sc->sc_type == WM_T_82574)
   4308 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4309 	else
   4310 		mrqc = MRQC_ENABLE_RSS_MQ;
   4311 
   4312 	/* XXXX
   4313 	 * The same as FreeBSD igb.
   4314 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4315 	 */
   4316 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4317 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4318 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4319 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4320 
   4321 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4322 }
   4323 
   4324 /*
   4325  * Adjust TX and RX queue numbers which the system actulally uses.
   4326  *
   4327  * The numbers are affected by below parameters.
   4328  *     - The nubmer of hardware queues
   4329  *     - The number of MSI-X vectors (= "nvectors" argument)
   4330  *     - ncpu
   4331  */
   4332 static void
   4333 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4334 {
   4335 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4336 
   4337 	if (nvectors < 2) {
   4338 		sc->sc_nqueues = 1;
   4339 		return;
   4340 	}
   4341 
   4342 	switch(sc->sc_type) {
   4343 	case WM_T_82572:
   4344 		hw_ntxqueues = 2;
   4345 		hw_nrxqueues = 2;
   4346 		break;
   4347 	case WM_T_82574:
   4348 		hw_ntxqueues = 2;
   4349 		hw_nrxqueues = 2;
   4350 		break;
   4351 	case WM_T_82575:
   4352 		hw_ntxqueues = 4;
   4353 		hw_nrxqueues = 4;
   4354 		break;
   4355 	case WM_T_82576:
   4356 		hw_ntxqueues = 16;
   4357 		hw_nrxqueues = 16;
   4358 		break;
   4359 	case WM_T_82580:
   4360 	case WM_T_I350:
   4361 	case WM_T_I354:
   4362 		hw_ntxqueues = 8;
   4363 		hw_nrxqueues = 8;
   4364 		break;
   4365 	case WM_T_I210:
   4366 		hw_ntxqueues = 4;
   4367 		hw_nrxqueues = 4;
   4368 		break;
   4369 	case WM_T_I211:
   4370 		hw_ntxqueues = 2;
   4371 		hw_nrxqueues = 2;
   4372 		break;
   4373 		/*
   4374 		 * As below ethernet controllers does not support MSI-X,
   4375 		 * this driver let them not use multiqueue.
   4376 		 *     - WM_T_80003
   4377 		 *     - WM_T_ICH8
   4378 		 *     - WM_T_ICH9
   4379 		 *     - WM_T_ICH10
   4380 		 *     - WM_T_PCH
   4381 		 *     - WM_T_PCH2
   4382 		 *     - WM_T_PCH_LPT
   4383 		 */
   4384 	default:
   4385 		hw_ntxqueues = 1;
   4386 		hw_nrxqueues = 1;
   4387 		break;
   4388 	}
   4389 
   4390 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4391 
   4392 	/*
   4393 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4394 	 * the number of queues used actually.
   4395 	 */
   4396 	if (nvectors < hw_nqueues + 1) {
   4397 		sc->sc_nqueues = nvectors - 1;
   4398 	} else {
   4399 		sc->sc_nqueues = hw_nqueues;
   4400 	}
   4401 
   4402 	/*
   4403 	 * As queues more then cpus cannot improve scaling, we limit
   4404 	 * the number of queues used actually.
   4405 	 */
   4406 	if (ncpu < sc->sc_nqueues)
   4407 		sc->sc_nqueues = ncpu;
   4408 }
   4409 
   4410 /*
   4411  * Both single interrupt MSI and INTx can use this function.
   4412  */
   4413 static int
   4414 wm_setup_legacy(struct wm_softc *sc)
   4415 {
   4416 	pci_chipset_tag_t pc = sc->sc_pc;
   4417 	const char *intrstr = NULL;
   4418 	char intrbuf[PCI_INTRSTR_LEN];
   4419 	int error;
   4420 
   4421 	error = wm_alloc_txrx_queues(sc);
   4422 	if (error) {
   4423 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4424 		    error);
   4425 		return ENOMEM;
   4426 	}
   4427 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4428 	    sizeof(intrbuf));
   4429 #ifdef WM_MPSAFE
   4430 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4431 #endif
   4432 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4433 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4434 	if (sc->sc_ihs[0] == NULL) {
   4435 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4436 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4437 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4438 		return ENOMEM;
   4439 	}
   4440 
   4441 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4442 	sc->sc_nintrs = 1;
   4443 	return 0;
   4444 }
   4445 
   4446 static int
   4447 wm_setup_msix(struct wm_softc *sc)
   4448 {
   4449 	void *vih;
   4450 	kcpuset_t *affinity;
   4451 	int qidx, error, intr_idx, txrx_established;
   4452 	pci_chipset_tag_t pc = sc->sc_pc;
   4453 	const char *intrstr = NULL;
   4454 	char intrbuf[PCI_INTRSTR_LEN];
   4455 	char intr_xname[INTRDEVNAMEBUF];
   4456 
   4457 	if (sc->sc_nqueues < ncpu) {
   4458 		/*
   4459 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4460 		 * interrupts start from CPU#1.
   4461 		 */
   4462 		sc->sc_affinity_offset = 1;
   4463 	} else {
   4464 		/*
   4465 		 * In this case, this device use all CPUs. So, we unify
   4466 		 * affinitied cpu_index to msix vector number for readability.
   4467 		 */
   4468 		sc->sc_affinity_offset = 0;
   4469 	}
   4470 
   4471 	error = wm_alloc_txrx_queues(sc);
   4472 	if (error) {
   4473 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4474 		    error);
   4475 		return ENOMEM;
   4476 	}
   4477 
   4478 	kcpuset_create(&affinity, false);
   4479 	intr_idx = 0;
   4480 
   4481 	/*
   4482 	 * TX and RX
   4483 	 */
   4484 	txrx_established = 0;
   4485 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4486 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4487 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4488 
   4489 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4490 		    sizeof(intrbuf));
   4491 #ifdef WM_MPSAFE
   4492 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4493 		    PCI_INTR_MPSAFE, true);
   4494 #endif
   4495 		memset(intr_xname, 0, sizeof(intr_xname));
   4496 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4497 		    device_xname(sc->sc_dev), qidx);
   4498 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4499 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4500 		if (vih == NULL) {
   4501 			aprint_error_dev(sc->sc_dev,
   4502 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4503 			    intrstr ? " at " : "",
   4504 			    intrstr ? intrstr : "");
   4505 
   4506 			goto fail;
   4507 		}
   4508 		kcpuset_zero(affinity);
   4509 		/* Round-robin affinity */
   4510 		kcpuset_set(affinity, affinity_to);
   4511 		error = interrupt_distribute(vih, affinity, NULL);
   4512 		if (error == 0) {
   4513 			aprint_normal_dev(sc->sc_dev,
   4514 			    "for TX and RX interrupting at %s affinity to %u\n",
   4515 			    intrstr, affinity_to);
   4516 		} else {
   4517 			aprint_normal_dev(sc->sc_dev,
   4518 			    "for TX and RX interrupting at %s\n", intrstr);
   4519 		}
   4520 		sc->sc_ihs[intr_idx] = vih;
   4521 		wmq->wmq_id= qidx;
   4522 		wmq->wmq_intr_idx = intr_idx;
   4523 
   4524 		txrx_established++;
   4525 		intr_idx++;
   4526 	}
   4527 
   4528 	/*
   4529 	 * LINK
   4530 	 */
   4531 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4532 	    sizeof(intrbuf));
   4533 #ifdef WM_MPSAFE
   4534 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4535 #endif
   4536 	memset(intr_xname, 0, sizeof(intr_xname));
   4537 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4538 	    device_xname(sc->sc_dev));
   4539 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4540 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4541 	if (vih == NULL) {
   4542 		aprint_error_dev(sc->sc_dev,
   4543 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4544 		    intrstr ? " at " : "",
   4545 		    intrstr ? intrstr : "");
   4546 
   4547 		goto fail;
   4548 	}
   4549 	/* keep default affinity to LINK interrupt */
   4550 	aprint_normal_dev(sc->sc_dev,
   4551 	    "for LINK interrupting at %s\n", intrstr);
   4552 	sc->sc_ihs[intr_idx] = vih;
   4553 	sc->sc_link_intr_idx = intr_idx;
   4554 
   4555 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4556 	kcpuset_destroy(affinity);
   4557 	return 0;
   4558 
   4559  fail:
   4560 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4561 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4562 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4563 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4564 	}
   4565 
   4566 	kcpuset_destroy(affinity);
   4567 	return ENOMEM;
   4568 }
   4569 
   4570 static void
   4571 wm_turnon(struct wm_softc *sc)
   4572 {
   4573 	int i;
   4574 
   4575 	for(i = 0; i < sc->sc_nqueues; i++) {
   4576 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4577 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4578 
   4579 		mutex_enter(txq->txq_lock);
   4580 		txq->txq_stopping = false;
   4581 		mutex_exit(txq->txq_lock);
   4582 
   4583 		mutex_enter(rxq->rxq_lock);
   4584 		rxq->rxq_stopping = false;
   4585 		mutex_exit(rxq->rxq_lock);
   4586 	}
   4587 
   4588 	WM_CORE_LOCK(sc);
   4589 	sc->sc_core_stopping = false;
   4590 	WM_CORE_UNLOCK(sc);
   4591 }
   4592 
   4593 static void
   4594 wm_turnoff(struct wm_softc *sc)
   4595 {
   4596 	int i;
   4597 
   4598 	WM_CORE_LOCK(sc);
   4599 	sc->sc_core_stopping = true;
   4600 	WM_CORE_UNLOCK(sc);
   4601 
   4602 	for(i = 0; i < sc->sc_nqueues; i++) {
   4603 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4604 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4605 
   4606 		mutex_enter(rxq->rxq_lock);
   4607 		rxq->rxq_stopping = true;
   4608 		mutex_exit(rxq->rxq_lock);
   4609 
   4610 		mutex_enter(txq->txq_lock);
   4611 		txq->txq_stopping = true;
   4612 		mutex_exit(txq->txq_lock);
   4613 	}
   4614 }
   4615 
   4616 /*
   4617  * wm_init:		[ifnet interface function]
   4618  *
   4619  *	Initialize the interface.
   4620  */
   4621 static int
   4622 wm_init(struct ifnet *ifp)
   4623 {
   4624 	struct wm_softc *sc = ifp->if_softc;
   4625 	int ret;
   4626 
   4627 	WM_CORE_LOCK(sc);
   4628 	ret = wm_init_locked(ifp);
   4629 	WM_CORE_UNLOCK(sc);
   4630 
   4631 	return ret;
   4632 }
   4633 
   4634 static int
   4635 wm_init_locked(struct ifnet *ifp)
   4636 {
   4637 	struct wm_softc *sc = ifp->if_softc;
   4638 	int i, j, trynum, error = 0;
   4639 	uint32_t reg;
   4640 
   4641 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4642 		device_xname(sc->sc_dev), __func__));
   4643 	KASSERT(WM_CORE_LOCKED(sc));
   4644 
   4645 	/*
   4646 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4647 	 * There is a small but measurable benefit to avoiding the adjusment
   4648 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4649 	 * on such platforms.  One possibility is that the DMA itself is
   4650 	 * slightly more efficient if the front of the entire packet (instead
   4651 	 * of the front of the headers) is aligned.
   4652 	 *
   4653 	 * Note we must always set align_tweak to 0 if we are using
   4654 	 * jumbo frames.
   4655 	 */
   4656 #ifdef __NO_STRICT_ALIGNMENT
   4657 	sc->sc_align_tweak = 0;
   4658 #else
   4659 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4660 		sc->sc_align_tweak = 0;
   4661 	else
   4662 		sc->sc_align_tweak = 2;
   4663 #endif /* __NO_STRICT_ALIGNMENT */
   4664 
   4665 	/* Cancel any pending I/O. */
   4666 	wm_stop_locked(ifp, 0);
   4667 
   4668 	/* update statistics before reset */
   4669 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4670 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4671 
   4672 	/* Reset the chip to a known state. */
   4673 	wm_reset(sc);
   4674 
   4675 	switch (sc->sc_type) {
   4676 	case WM_T_82571:
   4677 	case WM_T_82572:
   4678 	case WM_T_82573:
   4679 	case WM_T_82574:
   4680 	case WM_T_82583:
   4681 	case WM_T_80003:
   4682 	case WM_T_ICH8:
   4683 	case WM_T_ICH9:
   4684 	case WM_T_ICH10:
   4685 	case WM_T_PCH:
   4686 	case WM_T_PCH2:
   4687 	case WM_T_PCH_LPT:
   4688 	case WM_T_PCH_SPT:
   4689 		/* AMT based hardware can now take control from firmware */
   4690 		if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4691 			wm_get_hw_control(sc);
   4692 		break;
   4693 	default:
   4694 		break;
   4695 	}
   4696 
   4697 	/* Init hardware bits */
   4698 	wm_initialize_hardware_bits(sc);
   4699 
   4700 	/* Reset the PHY. */
   4701 	if (sc->sc_flags & WM_F_HAS_MII)
   4702 		wm_gmii_reset(sc);
   4703 
   4704 	/* Calculate (E)ITR value */
   4705 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4706 		sc->sc_itr = 450;	/* For EITR */
   4707 	} else if (sc->sc_type >= WM_T_82543) {
   4708 		/*
   4709 		 * Set up the interrupt throttling register (units of 256ns)
   4710 		 * Note that a footnote in Intel's documentation says this
   4711 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4712 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4713 		 * that that is also true for the 1024ns units of the other
   4714 		 * interrupt-related timer registers -- so, really, we ought
   4715 		 * to divide this value by 4 when the link speed is low.
   4716 		 *
   4717 		 * XXX implement this division at link speed change!
   4718 		 */
   4719 
   4720 		/*
   4721 		 * For N interrupts/sec, set this value to:
   4722 		 * 1000000000 / (N * 256).  Note that we set the
   4723 		 * absolute and packet timer values to this value
   4724 		 * divided by 4 to get "simple timer" behavior.
   4725 		 */
   4726 
   4727 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4728 	}
   4729 
   4730 	error = wm_init_txrx_queues(sc);
   4731 	if (error)
   4732 		goto out;
   4733 
   4734 	/*
   4735 	 * Clear out the VLAN table -- we don't use it (yet).
   4736 	 */
   4737 	CSR_WRITE(sc, WMREG_VET, 0);
   4738 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4739 		trynum = 10; /* Due to hw errata */
   4740 	else
   4741 		trynum = 1;
   4742 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4743 		for (j = 0; j < trynum; j++)
   4744 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4745 
   4746 	/*
   4747 	 * Set up flow-control parameters.
   4748 	 *
   4749 	 * XXX Values could probably stand some tuning.
   4750 	 */
   4751 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4752 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4753 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4754 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4755 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4756 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4757 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4758 	}
   4759 
   4760 	sc->sc_fcrtl = FCRTL_DFLT;
   4761 	if (sc->sc_type < WM_T_82543) {
   4762 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4763 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4764 	} else {
   4765 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4766 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4767 	}
   4768 
   4769 	if (sc->sc_type == WM_T_80003)
   4770 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4771 	else
   4772 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4773 
   4774 	/* Writes the control register. */
   4775 	wm_set_vlan(sc);
   4776 
   4777 	if (sc->sc_flags & WM_F_HAS_MII) {
   4778 		int val;
   4779 
   4780 		switch (sc->sc_type) {
   4781 		case WM_T_80003:
   4782 		case WM_T_ICH8:
   4783 		case WM_T_ICH9:
   4784 		case WM_T_ICH10:
   4785 		case WM_T_PCH:
   4786 		case WM_T_PCH2:
   4787 		case WM_T_PCH_LPT:
   4788 		case WM_T_PCH_SPT:
   4789 			/*
   4790 			 * Set the mac to wait the maximum time between each
   4791 			 * iteration and increase the max iterations when
   4792 			 * polling the phy; this fixes erroneous timeouts at
   4793 			 * 10Mbps.
   4794 			 */
   4795 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4796 			    0xFFFF);
   4797 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4798 			val |= 0x3F;
   4799 			wm_kmrn_writereg(sc,
   4800 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4801 			break;
   4802 		default:
   4803 			break;
   4804 		}
   4805 
   4806 		if (sc->sc_type == WM_T_80003) {
   4807 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4808 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4809 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4810 
   4811 			/* Bypass RX and TX FIFO's */
   4812 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4813 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4814 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4815 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4816 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4817 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4818 		}
   4819 	}
   4820 #if 0
   4821 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4822 #endif
   4823 
   4824 	/* Set up checksum offload parameters. */
   4825 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4826 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4827 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4828 		reg |= RXCSUM_IPOFL;
   4829 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4830 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4831 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4832 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4833 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4834 
   4835 	/* Set up MSI-X */
   4836 	if (sc->sc_nintrs > 1) {
   4837 		uint32_t ivar;
   4838 		struct wm_queue *wmq;
   4839 		int qid, qintr_idx;
   4840 
   4841 		if (sc->sc_type == WM_T_82575) {
   4842 			/* Interrupt control */
   4843 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4844 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4845 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4846 
   4847 			/* TX and RX */
   4848 			for (i = 0; i < sc->sc_nqueues; i++) {
   4849 				wmq = &sc->sc_queue[i];
   4850 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   4851 				    EITR_TX_QUEUE(wmq->wmq_id)
   4852 				    | EITR_RX_QUEUE(wmq->wmq_id));
   4853 			}
   4854 			/* Link status */
   4855 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4856 			    EITR_OTHER);
   4857 		} else if (sc->sc_type == WM_T_82574) {
   4858 			/* Interrupt control */
   4859 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4860 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4861 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4862 
   4863 			ivar = 0;
   4864 			/* TX and RX */
   4865 			for (i = 0; i < sc->sc_nqueues; i++) {
   4866 				wmq = &sc->sc_queue[i];
   4867 				qid = wmq->wmq_id;
   4868 				qintr_idx = wmq->wmq_intr_idx;
   4869 
   4870 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4871 				    IVAR_TX_MASK_Q_82574(qid));
   4872 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4873 				    IVAR_RX_MASK_Q_82574(qid));
   4874 			}
   4875 			/* Link status */
   4876 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4877 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   4878 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4879 		} else {
   4880 			/* Interrupt control */
   4881 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   4882 			    | GPIE_EIAME | GPIE_PBA);
   4883 
   4884 			switch (sc->sc_type) {
   4885 			case WM_T_82580:
   4886 			case WM_T_I350:
   4887 			case WM_T_I354:
   4888 			case WM_T_I210:
   4889 			case WM_T_I211:
   4890 				/* TX and RX */
   4891 				for (i = 0; i < sc->sc_nqueues; i++) {
   4892 					wmq = &sc->sc_queue[i];
   4893 					qid = wmq->wmq_id;
   4894 					qintr_idx = wmq->wmq_intr_idx;
   4895 
   4896 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4897 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4898 					ivar |= __SHIFTIN((qintr_idx
   4899 						| IVAR_VALID),
   4900 					    IVAR_TX_MASK_Q(qid));
   4901 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4902 					ivar |= __SHIFTIN((qintr_idx
   4903 						| IVAR_VALID),
   4904 					    IVAR_RX_MASK_Q(qid));
   4905 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4906 				}
   4907 				break;
   4908 			case WM_T_82576:
   4909 				/* TX and RX */
   4910 				for (i = 0; i < sc->sc_nqueues; i++) {
   4911 					wmq = &sc->sc_queue[i];
   4912 					qid = wmq->wmq_id;
   4913 					qintr_idx = wmq->wmq_intr_idx;
   4914 
   4915 					ivar = CSR_READ(sc,
   4916 					    WMREG_IVAR_Q_82576(qid));
   4917 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   4918 					ivar |= __SHIFTIN((qintr_idx
   4919 						| IVAR_VALID),
   4920 					    IVAR_TX_MASK_Q_82576(qid));
   4921 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   4922 					ivar |= __SHIFTIN((qintr_idx
   4923 						| IVAR_VALID),
   4924 					    IVAR_RX_MASK_Q_82576(qid));
   4925 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   4926 					    ivar);
   4927 				}
   4928 				break;
   4929 			default:
   4930 				break;
   4931 			}
   4932 
   4933 			/* Link status */
   4934 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   4935 			    IVAR_MISC_OTHER);
   4936 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   4937 		}
   4938 
   4939 		if (sc->sc_nqueues > 1) {
   4940 			wm_init_rss(sc);
   4941 
   4942 			/*
   4943 			** NOTE: Receive Full-Packet Checksum Offload
   4944 			** is mutually exclusive with Multiqueue. However
   4945 			** this is not the same as TCP/IP checksums which
   4946 			** still work.
   4947 			*/
   4948 			reg = CSR_READ(sc, WMREG_RXCSUM);
   4949 			reg |= RXCSUM_PCSD;
   4950 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4951 		}
   4952 	}
   4953 
   4954 	/* Set up the interrupt registers. */
   4955 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4956 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4957 	    ICR_RXO | ICR_RXT0;
   4958 	if (sc->sc_nintrs > 1) {
   4959 		uint32_t mask;
   4960 		struct wm_queue *wmq;
   4961 
   4962 		switch (sc->sc_type) {
   4963 		case WM_T_82574:
   4964 			CSR_WRITE(sc, WMREG_EIAC_82574,
   4965 			    WMREG_EIAC_82574_MSIX_MASK);
   4966 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   4967 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4968 			break;
   4969 		default:
   4970 			if (sc->sc_type == WM_T_82575) {
   4971 				mask = 0;
   4972 				for (i = 0; i < sc->sc_nqueues; i++) {
   4973 					wmq = &sc->sc_queue[i];
   4974 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   4975 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   4976 				}
   4977 				mask |= EITR_OTHER;
   4978 			} else {
   4979 				mask = 0;
   4980 				for (i = 0; i < sc->sc_nqueues; i++) {
   4981 					wmq = &sc->sc_queue[i];
   4982 					mask |= 1 << wmq->wmq_intr_idx;
   4983 				}
   4984 				mask |= 1 << sc->sc_link_intr_idx;
   4985 			}
   4986 			CSR_WRITE(sc, WMREG_EIAC, mask);
   4987 			CSR_WRITE(sc, WMREG_EIAM, mask);
   4988 			CSR_WRITE(sc, WMREG_EIMS, mask);
   4989 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   4990 			break;
   4991 		}
   4992 	} else
   4993 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4994 
   4995 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4996 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4997 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4998 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4999 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5000 		reg |= KABGTXD_BGSQLBIAS;
   5001 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5002 	}
   5003 
   5004 	/* Set up the inter-packet gap. */
   5005 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5006 
   5007 	if (sc->sc_type >= WM_T_82543) {
   5008 		/*
   5009 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   5010 		 * the multi queue function with MSI-X.
   5011 		 */
   5012 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5013 			int qidx;
   5014 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5015 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   5016 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
   5017 				    sc->sc_itr);
   5018 			}
   5019 			/*
   5020 			 * Link interrupts occur much less than TX
   5021 			 * interrupts and RX interrupts. So, we don't
   5022 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5023 			 * FreeBSD's if_igb.
   5024 			 */
   5025 		} else
   5026 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   5027 	}
   5028 
   5029 	/* Set the VLAN ethernetype. */
   5030 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5031 
   5032 	/*
   5033 	 * Set up the transmit control register; we start out with
   5034 	 * a collision distance suitable for FDX, but update it whe
   5035 	 * we resolve the media type.
   5036 	 */
   5037 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5038 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5039 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5040 	if (sc->sc_type >= WM_T_82571)
   5041 		sc->sc_tctl |= TCTL_MULR;
   5042 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5043 
   5044 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5045 		/* Write TDT after TCTL.EN is set. See the document. */
   5046 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5047 	}
   5048 
   5049 	if (sc->sc_type == WM_T_80003) {
   5050 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5051 		reg &= ~TCTL_EXT_GCEX_MASK;
   5052 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5053 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5054 	}
   5055 
   5056 	/* Set the media. */
   5057 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5058 		goto out;
   5059 
   5060 	/* Configure for OS presence */
   5061 	wm_init_manageability(sc);
   5062 
   5063 	/*
   5064 	 * Set up the receive control register; we actually program
   5065 	 * the register when we set the receive filter.  Use multicast
   5066 	 * address offset type 0.
   5067 	 *
   5068 	 * Only the i82544 has the ability to strip the incoming
   5069 	 * CRC, so we don't enable that feature.
   5070 	 */
   5071 	sc->sc_mchash_type = 0;
   5072 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5073 	    | RCTL_MO(sc->sc_mchash_type);
   5074 
   5075 	/*
   5076 	 * The I350 has a bug where it always strips the CRC whether
   5077 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5078 	 */
   5079 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5080 	    || (sc->sc_type == WM_T_I210))
   5081 		sc->sc_rctl |= RCTL_SECRC;
   5082 
   5083 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5084 	    && (ifp->if_mtu > ETHERMTU)) {
   5085 		sc->sc_rctl |= RCTL_LPE;
   5086 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5087 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5088 	}
   5089 
   5090 	if (MCLBYTES == 2048) {
   5091 		sc->sc_rctl |= RCTL_2k;
   5092 	} else {
   5093 		if (sc->sc_type >= WM_T_82543) {
   5094 			switch (MCLBYTES) {
   5095 			case 4096:
   5096 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5097 				break;
   5098 			case 8192:
   5099 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5100 				break;
   5101 			case 16384:
   5102 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5103 				break;
   5104 			default:
   5105 				panic("wm_init: MCLBYTES %d unsupported",
   5106 				    MCLBYTES);
   5107 				break;
   5108 			}
   5109 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5110 	}
   5111 
   5112 	/* Set the receive filter. */
   5113 	wm_set_filter(sc);
   5114 
   5115 	/* Enable ECC */
   5116 	switch (sc->sc_type) {
   5117 	case WM_T_82571:
   5118 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5119 		reg |= PBA_ECC_CORR_EN;
   5120 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5121 		break;
   5122 	case WM_T_PCH_LPT:
   5123 	case WM_T_PCH_SPT:
   5124 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5125 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5126 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5127 
   5128 		reg = CSR_READ(sc, WMREG_CTRL);
   5129 		reg |= CTRL_MEHE;
   5130 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5131 		break;
   5132 	default:
   5133 		break;
   5134 	}
   5135 
   5136 	/* On 575 and later set RDT only if RX enabled */
   5137 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5138 		int qidx;
   5139 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5140 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5141 			for (i = 0; i < WM_NRXDESC; i++) {
   5142 				mutex_enter(rxq->rxq_lock);
   5143 				wm_init_rxdesc(rxq, i);
   5144 				mutex_exit(rxq->rxq_lock);
   5145 
   5146 			}
   5147 		}
   5148 	}
   5149 
   5150 	wm_turnon(sc);
   5151 
   5152 	/* Start the one second link check clock. */
   5153 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5154 
   5155 	/* ...all done! */
   5156 	ifp->if_flags |= IFF_RUNNING;
   5157 	ifp->if_flags &= ~IFF_OACTIVE;
   5158 
   5159  out:
   5160 	sc->sc_if_flags = ifp->if_flags;
   5161 	if (error)
   5162 		log(LOG_ERR, "%s: interface not running\n",
   5163 		    device_xname(sc->sc_dev));
   5164 	return error;
   5165 }
   5166 
   5167 /*
   5168  * wm_stop:		[ifnet interface function]
   5169  *
   5170  *	Stop transmission on the interface.
   5171  */
   5172 static void
   5173 wm_stop(struct ifnet *ifp, int disable)
   5174 {
   5175 	struct wm_softc *sc = ifp->if_softc;
   5176 
   5177 	WM_CORE_LOCK(sc);
   5178 	wm_stop_locked(ifp, disable);
   5179 	WM_CORE_UNLOCK(sc);
   5180 }
   5181 
   5182 static void
   5183 wm_stop_locked(struct ifnet *ifp, int disable)
   5184 {
   5185 	struct wm_softc *sc = ifp->if_softc;
   5186 	struct wm_txsoft *txs;
   5187 	int i, qidx;
   5188 
   5189 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5190 		device_xname(sc->sc_dev), __func__));
   5191 	KASSERT(WM_CORE_LOCKED(sc));
   5192 
   5193 	wm_turnoff(sc);
   5194 
   5195 	/* Stop the one second clock. */
   5196 	callout_stop(&sc->sc_tick_ch);
   5197 
   5198 	/* Stop the 82547 Tx FIFO stall check timer. */
   5199 	if (sc->sc_type == WM_T_82547)
   5200 		callout_stop(&sc->sc_txfifo_ch);
   5201 
   5202 	if (sc->sc_flags & WM_F_HAS_MII) {
   5203 		/* Down the MII. */
   5204 		mii_down(&sc->sc_mii);
   5205 	} else {
   5206 #if 0
   5207 		/* Should we clear PHY's status properly? */
   5208 		wm_reset(sc);
   5209 #endif
   5210 	}
   5211 
   5212 	/* Stop the transmit and receive processes. */
   5213 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5214 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5215 	sc->sc_rctl &= ~RCTL_EN;
   5216 
   5217 	/*
   5218 	 * Clear the interrupt mask to ensure the device cannot assert its
   5219 	 * interrupt line.
   5220 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5221 	 * service any currently pending or shared interrupt.
   5222 	 */
   5223 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5224 	sc->sc_icr = 0;
   5225 	if (sc->sc_nintrs > 1) {
   5226 		if (sc->sc_type != WM_T_82574) {
   5227 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5228 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5229 		} else
   5230 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5231 	}
   5232 
   5233 	/* Release any queued transmit buffers. */
   5234 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5235 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5236 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5237 		mutex_enter(txq->txq_lock);
   5238 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5239 			txs = &txq->txq_soft[i];
   5240 			if (txs->txs_mbuf != NULL) {
   5241 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5242 				m_freem(txs->txs_mbuf);
   5243 				txs->txs_mbuf = NULL;
   5244 			}
   5245 		}
   5246 		if (sc->sc_type == WM_T_PCH_SPT) {
   5247 			pcireg_t preg;
   5248 			uint32_t reg;
   5249 			int nexttx;
   5250 
   5251 			/* First, disable MULR fix in FEXTNVM11 */
   5252 			reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5253 			reg |= FEXTNVM11_DIS_MULRFIX;
   5254 			CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5255 
   5256 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5257 			    WM_PCI_DESCRING_STATUS);
   5258 			reg = CSR_READ(sc, WMREG_TDLEN(0));
   5259 			printf("XXX RST: FLUSH = %08x, len = %u\n",
   5260 			    (uint32_t)(preg & DESCRING_STATUS_FLUSH_REQ), reg);
   5261 			if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0)
   5262 			    && (reg != 0)) {
   5263 				/* TX */
   5264 				printf("XXX need TX flush (reg = %08x)\n",
   5265 				    preg);
   5266 				wm_init_tx_descs(sc, txq);
   5267 				wm_init_tx_regs(sc, wmq, txq);
   5268 				nexttx = txq->txq_next;
   5269 				wm_set_dma_addr(
   5270 					&txq->txq_descs[nexttx].wtx_addr,
   5271 					WM_CDTXADDR(txq, nexttx));
   5272 				txq->txq_descs[nexttx].wtx_cmdlen
   5273 				    = htole32(WTX_CMD_IFCS | 512);
   5274 				wm_cdtxsync(txq, nexttx, 1,
   5275 				    BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
   5276 				CSR_WRITE(sc, WMREG_TCTL, TCTL_EN);
   5277 				CSR_WRITE(sc, WMREG_TDT(0), nexttx);
   5278 				CSR_WRITE_FLUSH(sc);
   5279 				delay(250);
   5280 				CSR_WRITE(sc, WMREG_TCTL, 0);
   5281 			}
   5282 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5283 			    WM_PCI_DESCRING_STATUS);
   5284 			if (preg & DESCRING_STATUS_FLUSH_REQ) {
   5285 				/* RX */
   5286 				printf("XXX need RX flush\n");
   5287 			}
   5288 		}
   5289 		mutex_exit(txq->txq_lock);
   5290 	}
   5291 
   5292 	/* Mark the interface as down and cancel the watchdog timer. */
   5293 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5294 	ifp->if_timer = 0;
   5295 
   5296 	if (disable) {
   5297 		for (i = 0; i < sc->sc_nqueues; i++) {
   5298 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5299 			mutex_enter(rxq->rxq_lock);
   5300 			wm_rxdrain(rxq);
   5301 			mutex_exit(rxq->rxq_lock);
   5302 		}
   5303 	}
   5304 
   5305 #if 0 /* notyet */
   5306 	if (sc->sc_type >= WM_T_82544)
   5307 		CSR_WRITE(sc, WMREG_WUC, 0);
   5308 #endif
   5309 }
   5310 
   5311 static void
   5312 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5313 {
   5314 	struct mbuf *m;
   5315 	int i;
   5316 
   5317 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5318 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5319 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5320 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5321 		    m->m_data, m->m_len, m->m_flags);
   5322 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5323 	    i, i == 1 ? "" : "s");
   5324 }
   5325 
   5326 /*
   5327  * wm_82547_txfifo_stall:
   5328  *
   5329  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5330  *	reset the FIFO pointers, and restart packet transmission.
   5331  */
   5332 static void
   5333 wm_82547_txfifo_stall(void *arg)
   5334 {
   5335 	struct wm_softc *sc = arg;
   5336 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5337 
   5338 	mutex_enter(txq->txq_lock);
   5339 
   5340 	if (txq->txq_stopping)
   5341 		goto out;
   5342 
   5343 	if (txq->txq_fifo_stall) {
   5344 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5345 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5346 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5347 			/*
   5348 			 * Packets have drained.  Stop transmitter, reset
   5349 			 * FIFO pointers, restart transmitter, and kick
   5350 			 * the packet queue.
   5351 			 */
   5352 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5353 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5354 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5355 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5356 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5357 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5358 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5359 			CSR_WRITE_FLUSH(sc);
   5360 
   5361 			txq->txq_fifo_head = 0;
   5362 			txq->txq_fifo_stall = 0;
   5363 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5364 		} else {
   5365 			/*
   5366 			 * Still waiting for packets to drain; try again in
   5367 			 * another tick.
   5368 			 */
   5369 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5370 		}
   5371 	}
   5372 
   5373 out:
   5374 	mutex_exit(txq->txq_lock);
   5375 }
   5376 
   5377 /*
   5378  * wm_82547_txfifo_bugchk:
   5379  *
   5380  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5381  *	prevent enqueueing a packet that would wrap around the end
   5382  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5383  *
   5384  *	We do this by checking the amount of space before the end
   5385  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5386  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5387  *	the internal FIFO pointers to the beginning, and restart
   5388  *	transmission on the interface.
   5389  */
   5390 #define	WM_FIFO_HDR		0x10
   5391 #define	WM_82547_PAD_LEN	0x3e0
   5392 static int
   5393 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5394 {
   5395 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5396 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5397 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5398 
   5399 	/* Just return if already stalled. */
   5400 	if (txq->txq_fifo_stall)
   5401 		return 1;
   5402 
   5403 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5404 		/* Stall only occurs in half-duplex mode. */
   5405 		goto send_packet;
   5406 	}
   5407 
   5408 	if (len >= WM_82547_PAD_LEN + space) {
   5409 		txq->txq_fifo_stall = 1;
   5410 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5411 		return 1;
   5412 	}
   5413 
   5414  send_packet:
   5415 	txq->txq_fifo_head += len;
   5416 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5417 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5418 
   5419 	return 0;
   5420 }
   5421 
   5422 static int
   5423 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5424 {
   5425 	int error;
   5426 
   5427 	/*
   5428 	 * Allocate the control data structures, and create and load the
   5429 	 * DMA map for it.
   5430 	 *
   5431 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5432 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5433 	 * both sets within the same 4G segment.
   5434 	 */
   5435 	if (sc->sc_type < WM_T_82544)
   5436 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5437 	else
   5438 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5439 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5440 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5441 	else
   5442 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5443 
   5444 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5445 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5446 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5447 		aprint_error_dev(sc->sc_dev,
   5448 		    "unable to allocate TX control data, error = %d\n",
   5449 		    error);
   5450 		goto fail_0;
   5451 	}
   5452 
   5453 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5454 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5455 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5456 		aprint_error_dev(sc->sc_dev,
   5457 		    "unable to map TX control data, error = %d\n", error);
   5458 		goto fail_1;
   5459 	}
   5460 
   5461 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5462 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5463 		aprint_error_dev(sc->sc_dev,
   5464 		    "unable to create TX control data DMA map, error = %d\n",
   5465 		    error);
   5466 		goto fail_2;
   5467 	}
   5468 
   5469 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5470 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5471 		aprint_error_dev(sc->sc_dev,
   5472 		    "unable to load TX control data DMA map, error = %d\n",
   5473 		    error);
   5474 		goto fail_3;
   5475 	}
   5476 
   5477 	return 0;
   5478 
   5479  fail_3:
   5480 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5481  fail_2:
   5482 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5483 	    WM_TXDESCS_SIZE(txq));
   5484  fail_1:
   5485 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5486  fail_0:
   5487 	return error;
   5488 }
   5489 
   5490 static void
   5491 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5492 {
   5493 
   5494 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5495 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5496 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5497 	    WM_TXDESCS_SIZE(txq));
   5498 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5499 }
   5500 
   5501 static int
   5502 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5503 {
   5504 	int error;
   5505 
   5506 	/*
   5507 	 * Allocate the control data structures, and create and load the
   5508 	 * DMA map for it.
   5509 	 *
   5510 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5511 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5512 	 * both sets within the same 4G segment.
   5513 	 */
   5514 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5515 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
   5516 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5517 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5518 		aprint_error_dev(sc->sc_dev,
   5519 		    "unable to allocate RX control data, error = %d\n",
   5520 		    error);
   5521 		goto fail_0;
   5522 	}
   5523 
   5524 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5525 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5526 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5527 		aprint_error_dev(sc->sc_dev,
   5528 		    "unable to map RX control data, error = %d\n", error);
   5529 		goto fail_1;
   5530 	}
   5531 
   5532 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5533 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5534 		aprint_error_dev(sc->sc_dev,
   5535 		    "unable to create RX control data DMA map, error = %d\n",
   5536 		    error);
   5537 		goto fail_2;
   5538 	}
   5539 
   5540 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5541 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5542 		aprint_error_dev(sc->sc_dev,
   5543 		    "unable to load RX control data DMA map, error = %d\n",
   5544 		    error);
   5545 		goto fail_3;
   5546 	}
   5547 
   5548 	return 0;
   5549 
   5550  fail_3:
   5551 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5552  fail_2:
   5553 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5554 	    rxq->rxq_desc_size);
   5555  fail_1:
   5556 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5557  fail_0:
   5558 	return error;
   5559 }
   5560 
   5561 static void
   5562 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5563 {
   5564 
   5565 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5566 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5567 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5568 	    rxq->rxq_desc_size);
   5569 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5570 }
   5571 
   5572 
   5573 static int
   5574 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5575 {
   5576 	int i, error;
   5577 
   5578 	/* Create the transmit buffer DMA maps. */
   5579 	WM_TXQUEUELEN(txq) =
   5580 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5581 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5582 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5583 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5584 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5585 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5586 			aprint_error_dev(sc->sc_dev,
   5587 			    "unable to create Tx DMA map %d, error = %d\n",
   5588 			    i, error);
   5589 			goto fail;
   5590 		}
   5591 	}
   5592 
   5593 	return 0;
   5594 
   5595  fail:
   5596 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5597 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5598 			bus_dmamap_destroy(sc->sc_dmat,
   5599 			    txq->txq_soft[i].txs_dmamap);
   5600 	}
   5601 	return error;
   5602 }
   5603 
   5604 static void
   5605 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5606 {
   5607 	int i;
   5608 
   5609 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5610 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5611 			bus_dmamap_destroy(sc->sc_dmat,
   5612 			    txq->txq_soft[i].txs_dmamap);
   5613 	}
   5614 }
   5615 
   5616 static int
   5617 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5618 {
   5619 	int i, error;
   5620 
   5621 	/* Create the receive buffer DMA maps. */
   5622 	for (i = 0; i < WM_NRXDESC; i++) {
   5623 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5624 			    MCLBYTES, 0, 0,
   5625 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5626 			aprint_error_dev(sc->sc_dev,
   5627 			    "unable to create Rx DMA map %d error = %d\n",
   5628 			    i, error);
   5629 			goto fail;
   5630 		}
   5631 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5632 	}
   5633 
   5634 	return 0;
   5635 
   5636  fail:
   5637 	for (i = 0; i < WM_NRXDESC; i++) {
   5638 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5639 			bus_dmamap_destroy(sc->sc_dmat,
   5640 			    rxq->rxq_soft[i].rxs_dmamap);
   5641 	}
   5642 	return error;
   5643 }
   5644 
   5645 static void
   5646 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5647 {
   5648 	int i;
   5649 
   5650 	for (i = 0; i < WM_NRXDESC; i++) {
   5651 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5652 			bus_dmamap_destroy(sc->sc_dmat,
   5653 			    rxq->rxq_soft[i].rxs_dmamap);
   5654 	}
   5655 }
   5656 
   5657 /*
   5658  * wm_alloc_quques:
   5659  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5660  */
   5661 static int
   5662 wm_alloc_txrx_queues(struct wm_softc *sc)
   5663 {
   5664 	int i, error, tx_done, rx_done;
   5665 
   5666 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5667 	    KM_SLEEP);
   5668 	if (sc->sc_queue == NULL) {
   5669 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5670 		error = ENOMEM;
   5671 		goto fail_0;
   5672 	}
   5673 
   5674 	/*
   5675 	 * For transmission
   5676 	 */
   5677 	error = 0;
   5678 	tx_done = 0;
   5679 	for (i = 0; i < sc->sc_nqueues; i++) {
   5680 #ifdef WM_EVENT_COUNTERS
   5681 		int j;
   5682 		const char *xname;
   5683 #endif
   5684 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5685 		txq->txq_sc = sc;
   5686 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5687 
   5688 		error = wm_alloc_tx_descs(sc, txq);
   5689 		if (error)
   5690 			break;
   5691 		error = wm_alloc_tx_buffer(sc, txq);
   5692 		if (error) {
   5693 			wm_free_tx_descs(sc, txq);
   5694 			break;
   5695 		}
   5696 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5697 		if (txq->txq_interq == NULL) {
   5698 			wm_free_tx_descs(sc, txq);
   5699 			wm_free_tx_buffer(sc, txq);
   5700 			error = ENOMEM;
   5701 			break;
   5702 		}
   5703 
   5704 #ifdef WM_EVENT_COUNTERS
   5705 		xname = device_xname(sc->sc_dev);
   5706 
   5707 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5708 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5709 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5710 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5711 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5712 
   5713 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5714 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5715 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5716 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5717 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5718 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5719 
   5720 		for (j = 0; j < WM_NTXSEGS; j++) {
   5721 			snprintf(txq->txq_txseg_evcnt_names[j],
   5722 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   5723 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   5724 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   5725 		}
   5726 
   5727 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   5728 
   5729 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   5730 #endif /* WM_EVENT_COUNTERS */
   5731 
   5732 		tx_done++;
   5733 	}
   5734 	if (error)
   5735 		goto fail_1;
   5736 
   5737 	/*
   5738 	 * For recieve
   5739 	 */
   5740 	error = 0;
   5741 	rx_done = 0;
   5742 	for (i = 0; i < sc->sc_nqueues; i++) {
   5743 #ifdef WM_EVENT_COUNTERS
   5744 		const char *xname;
   5745 #endif
   5746 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5747 		rxq->rxq_sc = sc;
   5748 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5749 
   5750 		error = wm_alloc_rx_descs(sc, rxq);
   5751 		if (error)
   5752 			break;
   5753 
   5754 		error = wm_alloc_rx_buffer(sc, rxq);
   5755 		if (error) {
   5756 			wm_free_rx_descs(sc, rxq);
   5757 			break;
   5758 		}
   5759 
   5760 #ifdef WM_EVENT_COUNTERS
   5761 		xname = device_xname(sc->sc_dev);
   5762 
   5763 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   5764 
   5765 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   5766 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   5767 #endif /* WM_EVENT_COUNTERS */
   5768 
   5769 		rx_done++;
   5770 	}
   5771 	if (error)
   5772 		goto fail_2;
   5773 
   5774 	return 0;
   5775 
   5776  fail_2:
   5777 	for (i = 0; i < rx_done; i++) {
   5778 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5779 		wm_free_rx_buffer(sc, rxq);
   5780 		wm_free_rx_descs(sc, rxq);
   5781 		if (rxq->rxq_lock)
   5782 			mutex_obj_free(rxq->rxq_lock);
   5783 	}
   5784  fail_1:
   5785 	for (i = 0; i < tx_done; i++) {
   5786 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5787 		pcq_destroy(txq->txq_interq);
   5788 		wm_free_tx_buffer(sc, txq);
   5789 		wm_free_tx_descs(sc, txq);
   5790 		if (txq->txq_lock)
   5791 			mutex_obj_free(txq->txq_lock);
   5792 	}
   5793 
   5794 	kmem_free(sc->sc_queue,
   5795 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   5796  fail_0:
   5797 	return error;
   5798 }
   5799 
   5800 /*
   5801  * wm_free_quques:
   5802  *	Free {tx,rx}descs and {tx,rx} buffers
   5803  */
   5804 static void
   5805 wm_free_txrx_queues(struct wm_softc *sc)
   5806 {
   5807 	int i;
   5808 
   5809 	for (i = 0; i < sc->sc_nqueues; i++) {
   5810 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5811 		wm_free_rx_buffer(sc, rxq);
   5812 		wm_free_rx_descs(sc, rxq);
   5813 		if (rxq->rxq_lock)
   5814 			mutex_obj_free(rxq->rxq_lock);
   5815 	}
   5816 
   5817 	for (i = 0; i < sc->sc_nqueues; i++) {
   5818 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5819 		wm_free_tx_buffer(sc, txq);
   5820 		wm_free_tx_descs(sc, txq);
   5821 		if (txq->txq_lock)
   5822 			mutex_obj_free(txq->txq_lock);
   5823 	}
   5824 
   5825 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   5826 }
   5827 
   5828 static void
   5829 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5830 {
   5831 
   5832 	KASSERT(mutex_owned(txq->txq_lock));
   5833 
   5834 	/* Initialize the transmit descriptor ring. */
   5835 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5836 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5837 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5838 	txq->txq_free = WM_NTXDESC(txq);
   5839 	txq->txq_next = 0;
   5840 }
   5841 
   5842 static void
   5843 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5844     struct wm_txqueue *txq)
   5845 {
   5846 
   5847 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5848 		device_xname(sc->sc_dev), __func__));
   5849 	KASSERT(mutex_owned(txq->txq_lock));
   5850 
   5851 	if (sc->sc_type < WM_T_82543) {
   5852 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5853 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5854 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5855 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5856 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5857 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5858 	} else {
   5859 		int qid = wmq->wmq_id;
   5860 
   5861 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5862 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5863 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5864 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5865 
   5866 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5867 			/*
   5868 			 * Don't write TDT before TCTL.EN is set.
   5869 			 * See the document.
   5870 			 */
   5871 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5872 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5873 			    | TXDCTL_WTHRESH(0));
   5874 		else {
   5875 			/* ITR / 4 */
   5876 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5877 			if (sc->sc_type >= WM_T_82540) {
   5878 				/* should be same */
   5879 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5880 			}
   5881 
   5882 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5883 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5884 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5885 		}
   5886 	}
   5887 }
   5888 
   5889 static void
   5890 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5891 {
   5892 	int i;
   5893 
   5894 	KASSERT(mutex_owned(txq->txq_lock));
   5895 
   5896 	/* Initialize the transmit job descriptors. */
   5897 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5898 		txq->txq_soft[i].txs_mbuf = NULL;
   5899 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5900 	txq->txq_snext = 0;
   5901 	txq->txq_sdirty = 0;
   5902 }
   5903 
   5904 static void
   5905 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5906     struct wm_txqueue *txq)
   5907 {
   5908 
   5909 	KASSERT(mutex_owned(txq->txq_lock));
   5910 
   5911 	/*
   5912 	 * Set up some register offsets that are different between
   5913 	 * the i82542 and the i82543 and later chips.
   5914 	 */
   5915 	if (sc->sc_type < WM_T_82543)
   5916 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5917 	else
   5918 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   5919 
   5920 	wm_init_tx_descs(sc, txq);
   5921 	wm_init_tx_regs(sc, wmq, txq);
   5922 	wm_init_tx_buffer(sc, txq);
   5923 }
   5924 
   5925 static void
   5926 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5927     struct wm_rxqueue *rxq)
   5928 {
   5929 
   5930 	KASSERT(mutex_owned(rxq->rxq_lock));
   5931 
   5932 	/*
   5933 	 * Initialize the receive descriptor and receive job
   5934 	 * descriptor rings.
   5935 	 */
   5936 	if (sc->sc_type < WM_T_82543) {
   5937 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5938 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5939 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5940 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5941 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5942 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5943 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5944 
   5945 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5946 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5947 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5948 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5949 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5950 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5951 	} else {
   5952 		int qid = wmq->wmq_id;
   5953 
   5954 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5955 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5956 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   5957 
   5958 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5959 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   5960 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   5961 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   5962 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   5963 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   5964 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   5965 			    | RXDCTL_WTHRESH(1));
   5966 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5967 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5968 		} else {
   5969 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5970 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5971 			/* ITR / 4 */
   5972 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   5973 			/* MUST be same */
   5974 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   5975 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   5976 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   5977 		}
   5978 	}
   5979 }
   5980 
   5981 static int
   5982 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5983 {
   5984 	struct wm_rxsoft *rxs;
   5985 	int error, i;
   5986 
   5987 	KASSERT(mutex_owned(rxq->rxq_lock));
   5988 
   5989 	for (i = 0; i < WM_NRXDESC; i++) {
   5990 		rxs = &rxq->rxq_soft[i];
   5991 		if (rxs->rxs_mbuf == NULL) {
   5992 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   5993 				log(LOG_ERR, "%s: unable to allocate or map "
   5994 				    "rx buffer %d, error = %d\n",
   5995 				    device_xname(sc->sc_dev), i, error);
   5996 				/*
   5997 				 * XXX Should attempt to run with fewer receive
   5998 				 * XXX buffers instead of just failing.
   5999 				 */
   6000 				wm_rxdrain(rxq);
   6001 				return ENOMEM;
   6002 			}
   6003 		} else {
   6004 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6005 				wm_init_rxdesc(rxq, i);
   6006 			/*
   6007 			 * For 82575 and newer device, the RX descriptors
   6008 			 * must be initialized after the setting of RCTL.EN in
   6009 			 * wm_set_filter()
   6010 			 */
   6011 		}
   6012 	}
   6013 	rxq->rxq_ptr = 0;
   6014 	rxq->rxq_discard = 0;
   6015 	WM_RXCHAIN_RESET(rxq);
   6016 
   6017 	return 0;
   6018 }
   6019 
   6020 static int
   6021 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6022     struct wm_rxqueue *rxq)
   6023 {
   6024 
   6025 	KASSERT(mutex_owned(rxq->rxq_lock));
   6026 
   6027 	/*
   6028 	 * Set up some register offsets that are different between
   6029 	 * the i82542 and the i82543 and later chips.
   6030 	 */
   6031 	if (sc->sc_type < WM_T_82543)
   6032 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6033 	else
   6034 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6035 
   6036 	wm_init_rx_regs(sc, wmq, rxq);
   6037 	return wm_init_rx_buffer(sc, rxq);
   6038 }
   6039 
   6040 /*
   6041  * wm_init_quques:
   6042  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6043  */
   6044 static int
   6045 wm_init_txrx_queues(struct wm_softc *sc)
   6046 {
   6047 	int i, error = 0;
   6048 
   6049 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6050 		device_xname(sc->sc_dev), __func__));
   6051 
   6052 	for (i = 0; i < sc->sc_nqueues; i++) {
   6053 		struct wm_queue *wmq = &sc->sc_queue[i];
   6054 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6055 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6056 
   6057 		mutex_enter(txq->txq_lock);
   6058 		wm_init_tx_queue(sc, wmq, txq);
   6059 		mutex_exit(txq->txq_lock);
   6060 
   6061 		mutex_enter(rxq->rxq_lock);
   6062 		error = wm_init_rx_queue(sc, wmq, rxq);
   6063 		mutex_exit(rxq->rxq_lock);
   6064 		if (error)
   6065 			break;
   6066 	}
   6067 
   6068 	return error;
   6069 }
   6070 
   6071 /*
   6072  * wm_tx_offload:
   6073  *
   6074  *	Set up TCP/IP checksumming parameters for the
   6075  *	specified packet.
   6076  */
   6077 static int
   6078 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   6079     uint8_t *fieldsp)
   6080 {
   6081 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6082 	struct mbuf *m0 = txs->txs_mbuf;
   6083 	struct livengood_tcpip_ctxdesc *t;
   6084 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6085 	uint32_t ipcse;
   6086 	struct ether_header *eh;
   6087 	int offset, iphl;
   6088 	uint8_t fields;
   6089 
   6090 	/*
   6091 	 * XXX It would be nice if the mbuf pkthdr had offset
   6092 	 * fields for the protocol headers.
   6093 	 */
   6094 
   6095 	eh = mtod(m0, struct ether_header *);
   6096 	switch (htons(eh->ether_type)) {
   6097 	case ETHERTYPE_IP:
   6098 	case ETHERTYPE_IPV6:
   6099 		offset = ETHER_HDR_LEN;
   6100 		break;
   6101 
   6102 	case ETHERTYPE_VLAN:
   6103 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6104 		break;
   6105 
   6106 	default:
   6107 		/*
   6108 		 * Don't support this protocol or encapsulation.
   6109 		 */
   6110 		*fieldsp = 0;
   6111 		*cmdp = 0;
   6112 		return 0;
   6113 	}
   6114 
   6115 	if ((m0->m_pkthdr.csum_flags &
   6116 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   6117 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6118 	} else {
   6119 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6120 	}
   6121 	ipcse = offset + iphl - 1;
   6122 
   6123 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6124 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6125 	seg = 0;
   6126 	fields = 0;
   6127 
   6128 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6129 		int hlen = offset + iphl;
   6130 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6131 
   6132 		if (__predict_false(m0->m_len <
   6133 				    (hlen + sizeof(struct tcphdr)))) {
   6134 			/*
   6135 			 * TCP/IP headers are not in the first mbuf; we need
   6136 			 * to do this the slow and painful way.  Let's just
   6137 			 * hope this doesn't happen very often.
   6138 			 */
   6139 			struct tcphdr th;
   6140 
   6141 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6142 
   6143 			m_copydata(m0, hlen, sizeof(th), &th);
   6144 			if (v4) {
   6145 				struct ip ip;
   6146 
   6147 				m_copydata(m0, offset, sizeof(ip), &ip);
   6148 				ip.ip_len = 0;
   6149 				m_copyback(m0,
   6150 				    offset + offsetof(struct ip, ip_len),
   6151 				    sizeof(ip.ip_len), &ip.ip_len);
   6152 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6153 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6154 			} else {
   6155 				struct ip6_hdr ip6;
   6156 
   6157 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6158 				ip6.ip6_plen = 0;
   6159 				m_copyback(m0,
   6160 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6161 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6162 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6163 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6164 			}
   6165 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6166 			    sizeof(th.th_sum), &th.th_sum);
   6167 
   6168 			hlen += th.th_off << 2;
   6169 		} else {
   6170 			/*
   6171 			 * TCP/IP headers are in the first mbuf; we can do
   6172 			 * this the easy way.
   6173 			 */
   6174 			struct tcphdr *th;
   6175 
   6176 			if (v4) {
   6177 				struct ip *ip =
   6178 				    (void *)(mtod(m0, char *) + offset);
   6179 				th = (void *)(mtod(m0, char *) + hlen);
   6180 
   6181 				ip->ip_len = 0;
   6182 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6183 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6184 			} else {
   6185 				struct ip6_hdr *ip6 =
   6186 				    (void *)(mtod(m0, char *) + offset);
   6187 				th = (void *)(mtod(m0, char *) + hlen);
   6188 
   6189 				ip6->ip6_plen = 0;
   6190 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6191 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6192 			}
   6193 			hlen += th->th_off << 2;
   6194 		}
   6195 
   6196 		if (v4) {
   6197 			WM_Q_EVCNT_INCR(txq, txtso);
   6198 			cmdlen |= WTX_TCPIP_CMD_IP;
   6199 		} else {
   6200 			WM_Q_EVCNT_INCR(txq, txtso6);
   6201 			ipcse = 0;
   6202 		}
   6203 		cmd |= WTX_TCPIP_CMD_TSE;
   6204 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6205 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6206 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6207 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6208 	}
   6209 
   6210 	/*
   6211 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6212 	 * offload feature, if we load the context descriptor, we
   6213 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6214 	 */
   6215 
   6216 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6217 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6218 	    WTX_TCPIP_IPCSE(ipcse);
   6219 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6220 		WM_Q_EVCNT_INCR(txq, txipsum);
   6221 		fields |= WTX_IXSM;
   6222 	}
   6223 
   6224 	offset += iphl;
   6225 
   6226 	if (m0->m_pkthdr.csum_flags &
   6227 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6228 		WM_Q_EVCNT_INCR(txq, txtusum);
   6229 		fields |= WTX_TXSM;
   6230 		tucs = WTX_TCPIP_TUCSS(offset) |
   6231 		    WTX_TCPIP_TUCSO(offset +
   6232 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6233 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6234 	} else if ((m0->m_pkthdr.csum_flags &
   6235 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6236 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6237 		fields |= WTX_TXSM;
   6238 		tucs = WTX_TCPIP_TUCSS(offset) |
   6239 		    WTX_TCPIP_TUCSO(offset +
   6240 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6241 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6242 	} else {
   6243 		/* Just initialize it to a valid TCP context. */
   6244 		tucs = WTX_TCPIP_TUCSS(offset) |
   6245 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6246 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6247 	}
   6248 
   6249 	/* Fill in the context descriptor. */
   6250 	t = (struct livengood_tcpip_ctxdesc *)
   6251 	    &txq->txq_descs[txq->txq_next];
   6252 	t->tcpip_ipcs = htole32(ipcs);
   6253 	t->tcpip_tucs = htole32(tucs);
   6254 	t->tcpip_cmdlen = htole32(cmdlen);
   6255 	t->tcpip_seg = htole32(seg);
   6256 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6257 
   6258 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6259 	txs->txs_ndesc++;
   6260 
   6261 	*cmdp = cmd;
   6262 	*fieldsp = fields;
   6263 
   6264 	return 0;
   6265 }
   6266 
   6267 /*
   6268  * wm_start:		[ifnet interface function]
   6269  *
   6270  *	Start packet transmission on the interface.
   6271  */
   6272 static void
   6273 wm_start(struct ifnet *ifp)
   6274 {
   6275 	struct wm_softc *sc = ifp->if_softc;
   6276 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6277 
   6278 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6279 
   6280 	mutex_enter(txq->txq_lock);
   6281 	if (!txq->txq_stopping)
   6282 		wm_start_locked(ifp);
   6283 	mutex_exit(txq->txq_lock);
   6284 }
   6285 
   6286 static void
   6287 wm_start_locked(struct ifnet *ifp)
   6288 {
   6289 	struct wm_softc *sc = ifp->if_softc;
   6290 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6291 	struct mbuf *m0;
   6292 	struct m_tag *mtag;
   6293 	struct wm_txsoft *txs;
   6294 	bus_dmamap_t dmamap;
   6295 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6296 	bus_addr_t curaddr;
   6297 	bus_size_t seglen, curlen;
   6298 	uint32_t cksumcmd;
   6299 	uint8_t cksumfields;
   6300 
   6301 	KASSERT(mutex_owned(txq->txq_lock));
   6302 
   6303 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6304 		return;
   6305 
   6306 	/* Remember the previous number of free descriptors. */
   6307 	ofree = txq->txq_free;
   6308 
   6309 	/*
   6310 	 * Loop through the send queue, setting up transmit descriptors
   6311 	 * until we drain the queue, or use up all available transmit
   6312 	 * descriptors.
   6313 	 */
   6314 	for (;;) {
   6315 		m0 = NULL;
   6316 
   6317 		/* Get a work queue entry. */
   6318 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6319 			wm_txeof(sc, txq);
   6320 			if (txq->txq_sfree == 0) {
   6321 				DPRINTF(WM_DEBUG_TX,
   6322 				    ("%s: TX: no free job descriptors\n",
   6323 					device_xname(sc->sc_dev)));
   6324 				WM_Q_EVCNT_INCR(txq, txsstall);
   6325 				break;
   6326 			}
   6327 		}
   6328 
   6329 		/* Grab a packet off the queue. */
   6330 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6331 		if (m0 == NULL)
   6332 			break;
   6333 
   6334 		DPRINTF(WM_DEBUG_TX,
   6335 		    ("%s: TX: have packet to transmit: %p\n",
   6336 		    device_xname(sc->sc_dev), m0));
   6337 
   6338 		txs = &txq->txq_soft[txq->txq_snext];
   6339 		dmamap = txs->txs_dmamap;
   6340 
   6341 		use_tso = (m0->m_pkthdr.csum_flags &
   6342 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6343 
   6344 		/*
   6345 		 * So says the Linux driver:
   6346 		 * The controller does a simple calculation to make sure
   6347 		 * there is enough room in the FIFO before initiating the
   6348 		 * DMA for each buffer.  The calc is:
   6349 		 *	4 = ceil(buffer len / MSS)
   6350 		 * To make sure we don't overrun the FIFO, adjust the max
   6351 		 * buffer len if the MSS drops.
   6352 		 */
   6353 		dmamap->dm_maxsegsz =
   6354 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6355 		    ? m0->m_pkthdr.segsz << 2
   6356 		    : WTX_MAX_LEN;
   6357 
   6358 		/*
   6359 		 * Load the DMA map.  If this fails, the packet either
   6360 		 * didn't fit in the allotted number of segments, or we
   6361 		 * were short on resources.  For the too-many-segments
   6362 		 * case, we simply report an error and drop the packet,
   6363 		 * since we can't sanely copy a jumbo packet to a single
   6364 		 * buffer.
   6365 		 */
   6366 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6367 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6368 		if (error) {
   6369 			if (error == EFBIG) {
   6370 				WM_Q_EVCNT_INCR(txq, txdrop);
   6371 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6372 				    "DMA segments, dropping...\n",
   6373 				    device_xname(sc->sc_dev));
   6374 				wm_dump_mbuf_chain(sc, m0);
   6375 				m_freem(m0);
   6376 				continue;
   6377 			}
   6378 			/*  Short on resources, just stop for now. */
   6379 			DPRINTF(WM_DEBUG_TX,
   6380 			    ("%s: TX: dmamap load failed: %d\n",
   6381 			    device_xname(sc->sc_dev), error));
   6382 			break;
   6383 		}
   6384 
   6385 		segs_needed = dmamap->dm_nsegs;
   6386 		if (use_tso) {
   6387 			/* For sentinel descriptor; see below. */
   6388 			segs_needed++;
   6389 		}
   6390 
   6391 		/*
   6392 		 * Ensure we have enough descriptors free to describe
   6393 		 * the packet.  Note, we always reserve one descriptor
   6394 		 * at the end of the ring due to the semantics of the
   6395 		 * TDT register, plus one more in the event we need
   6396 		 * to load offload context.
   6397 		 */
   6398 		if (segs_needed > txq->txq_free - 2) {
   6399 			/*
   6400 			 * Not enough free descriptors to transmit this
   6401 			 * packet.  We haven't committed anything yet,
   6402 			 * so just unload the DMA map, put the packet
   6403 			 * pack on the queue, and punt.  Notify the upper
   6404 			 * layer that there are no more slots left.
   6405 			 */
   6406 			DPRINTF(WM_DEBUG_TX,
   6407 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6408 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6409 			    segs_needed, txq->txq_free - 1));
   6410 			ifp->if_flags |= IFF_OACTIVE;
   6411 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6412 			WM_Q_EVCNT_INCR(txq, txdstall);
   6413 			break;
   6414 		}
   6415 
   6416 		/*
   6417 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6418 		 * once we know we can transmit the packet, since we
   6419 		 * do some internal FIFO space accounting here.
   6420 		 */
   6421 		if (sc->sc_type == WM_T_82547 &&
   6422 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6423 			DPRINTF(WM_DEBUG_TX,
   6424 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6425 			    device_xname(sc->sc_dev)));
   6426 			ifp->if_flags |= IFF_OACTIVE;
   6427 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6428 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6429 			break;
   6430 		}
   6431 
   6432 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6433 
   6434 		DPRINTF(WM_DEBUG_TX,
   6435 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6436 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6437 
   6438 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6439 
   6440 		/*
   6441 		 * Store a pointer to the packet so that we can free it
   6442 		 * later.
   6443 		 *
   6444 		 * Initially, we consider the number of descriptors the
   6445 		 * packet uses the number of DMA segments.  This may be
   6446 		 * incremented by 1 if we do checksum offload (a descriptor
   6447 		 * is used to set the checksum context).
   6448 		 */
   6449 		txs->txs_mbuf = m0;
   6450 		txs->txs_firstdesc = txq->txq_next;
   6451 		txs->txs_ndesc = segs_needed;
   6452 
   6453 		/* Set up offload parameters for this packet. */
   6454 		if (m0->m_pkthdr.csum_flags &
   6455 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6456 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6457 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6458 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6459 					  &cksumfields) != 0) {
   6460 				/* Error message already displayed. */
   6461 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6462 				continue;
   6463 			}
   6464 		} else {
   6465 			cksumcmd = 0;
   6466 			cksumfields = 0;
   6467 		}
   6468 
   6469 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6470 
   6471 		/* Sync the DMA map. */
   6472 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6473 		    BUS_DMASYNC_PREWRITE);
   6474 
   6475 		/* Initialize the transmit descriptor. */
   6476 		for (nexttx = txq->txq_next, seg = 0;
   6477 		     seg < dmamap->dm_nsegs; seg++) {
   6478 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6479 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6480 			     seglen != 0;
   6481 			     curaddr += curlen, seglen -= curlen,
   6482 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6483 				curlen = seglen;
   6484 
   6485 				/*
   6486 				 * So says the Linux driver:
   6487 				 * Work around for premature descriptor
   6488 				 * write-backs in TSO mode.  Append a
   6489 				 * 4-byte sentinel descriptor.
   6490 				 */
   6491 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6492 				    curlen > 8)
   6493 					curlen -= 4;
   6494 
   6495 				wm_set_dma_addr(
   6496 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6497 				txq->txq_descs[nexttx].wtx_cmdlen
   6498 				    = htole32(cksumcmd | curlen);
   6499 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6500 				    = 0;
   6501 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6502 				    = cksumfields;
   6503 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6504 				lasttx = nexttx;
   6505 
   6506 				DPRINTF(WM_DEBUG_TX,
   6507 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6508 				     "len %#04zx\n",
   6509 				    device_xname(sc->sc_dev), nexttx,
   6510 				    (uint64_t)curaddr, curlen));
   6511 			}
   6512 		}
   6513 
   6514 		KASSERT(lasttx != -1);
   6515 
   6516 		/*
   6517 		 * Set up the command byte on the last descriptor of
   6518 		 * the packet.  If we're in the interrupt delay window,
   6519 		 * delay the interrupt.
   6520 		 */
   6521 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6522 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6523 
   6524 		/*
   6525 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6526 		 * up the descriptor to encapsulate the packet for us.
   6527 		 *
   6528 		 * This is only valid on the last descriptor of the packet.
   6529 		 */
   6530 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6531 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6532 			    htole32(WTX_CMD_VLE);
   6533 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6534 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6535 		}
   6536 
   6537 		txs->txs_lastdesc = lasttx;
   6538 
   6539 		DPRINTF(WM_DEBUG_TX,
   6540 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6541 		    device_xname(sc->sc_dev),
   6542 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6543 
   6544 		/* Sync the descriptors we're using. */
   6545 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6546 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6547 
   6548 		/* Give the packet to the chip. */
   6549 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6550 
   6551 		DPRINTF(WM_DEBUG_TX,
   6552 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6553 
   6554 		DPRINTF(WM_DEBUG_TX,
   6555 		    ("%s: TX: finished transmitting packet, job %d\n",
   6556 		    device_xname(sc->sc_dev), txq->txq_snext));
   6557 
   6558 		/* Advance the tx pointer. */
   6559 		txq->txq_free -= txs->txs_ndesc;
   6560 		txq->txq_next = nexttx;
   6561 
   6562 		txq->txq_sfree--;
   6563 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6564 
   6565 		/* Pass the packet to any BPF listeners. */
   6566 		bpf_mtap(ifp, m0);
   6567 	}
   6568 
   6569 	if (m0 != NULL) {
   6570 		ifp->if_flags |= IFF_OACTIVE;
   6571 		WM_Q_EVCNT_INCR(txq, txdrop);
   6572 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6573 			__func__));
   6574 		m_freem(m0);
   6575 	}
   6576 
   6577 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6578 		/* No more slots; notify upper layer. */
   6579 		ifp->if_flags |= IFF_OACTIVE;
   6580 	}
   6581 
   6582 	if (txq->txq_free != ofree) {
   6583 		/* Set a watchdog timer in case the chip flakes out. */
   6584 		ifp->if_timer = 5;
   6585 	}
   6586 }
   6587 
   6588 /*
   6589  * wm_nq_tx_offload:
   6590  *
   6591  *	Set up TCP/IP checksumming parameters for the
   6592  *	specified packet, for NEWQUEUE devices
   6593  */
   6594 static int
   6595 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6596     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6597 {
   6598 	struct mbuf *m0 = txs->txs_mbuf;
   6599 	struct m_tag *mtag;
   6600 	uint32_t vl_len, mssidx, cmdc;
   6601 	struct ether_header *eh;
   6602 	int offset, iphl;
   6603 
   6604 	/*
   6605 	 * XXX It would be nice if the mbuf pkthdr had offset
   6606 	 * fields for the protocol headers.
   6607 	 */
   6608 	*cmdlenp = 0;
   6609 	*fieldsp = 0;
   6610 
   6611 	eh = mtod(m0, struct ether_header *);
   6612 	switch (htons(eh->ether_type)) {
   6613 	case ETHERTYPE_IP:
   6614 	case ETHERTYPE_IPV6:
   6615 		offset = ETHER_HDR_LEN;
   6616 		break;
   6617 
   6618 	case ETHERTYPE_VLAN:
   6619 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6620 		break;
   6621 
   6622 	default:
   6623 		/* Don't support this protocol or encapsulation. */
   6624 		*do_csum = false;
   6625 		return 0;
   6626 	}
   6627 	*do_csum = true;
   6628 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6629 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6630 
   6631 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6632 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6633 
   6634 	if ((m0->m_pkthdr.csum_flags &
   6635 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6636 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6637 	} else {
   6638 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6639 	}
   6640 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6641 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6642 
   6643 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6644 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6645 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6646 		*cmdlenp |= NQTX_CMD_VLE;
   6647 	}
   6648 
   6649 	mssidx = 0;
   6650 
   6651 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6652 		int hlen = offset + iphl;
   6653 		int tcp_hlen;
   6654 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6655 
   6656 		if (__predict_false(m0->m_len <
   6657 				    (hlen + sizeof(struct tcphdr)))) {
   6658 			/*
   6659 			 * TCP/IP headers are not in the first mbuf; we need
   6660 			 * to do this the slow and painful way.  Let's just
   6661 			 * hope this doesn't happen very often.
   6662 			 */
   6663 			struct tcphdr th;
   6664 
   6665 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6666 
   6667 			m_copydata(m0, hlen, sizeof(th), &th);
   6668 			if (v4) {
   6669 				struct ip ip;
   6670 
   6671 				m_copydata(m0, offset, sizeof(ip), &ip);
   6672 				ip.ip_len = 0;
   6673 				m_copyback(m0,
   6674 				    offset + offsetof(struct ip, ip_len),
   6675 				    sizeof(ip.ip_len), &ip.ip_len);
   6676 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6677 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6678 			} else {
   6679 				struct ip6_hdr ip6;
   6680 
   6681 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6682 				ip6.ip6_plen = 0;
   6683 				m_copyback(m0,
   6684 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6685 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6686 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6687 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6688 			}
   6689 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6690 			    sizeof(th.th_sum), &th.th_sum);
   6691 
   6692 			tcp_hlen = th.th_off << 2;
   6693 		} else {
   6694 			/*
   6695 			 * TCP/IP headers are in the first mbuf; we can do
   6696 			 * this the easy way.
   6697 			 */
   6698 			struct tcphdr *th;
   6699 
   6700 			if (v4) {
   6701 				struct ip *ip =
   6702 				    (void *)(mtod(m0, char *) + offset);
   6703 				th = (void *)(mtod(m0, char *) + hlen);
   6704 
   6705 				ip->ip_len = 0;
   6706 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6707 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6708 			} else {
   6709 				struct ip6_hdr *ip6 =
   6710 				    (void *)(mtod(m0, char *) + offset);
   6711 				th = (void *)(mtod(m0, char *) + hlen);
   6712 
   6713 				ip6->ip6_plen = 0;
   6714 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6715 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6716 			}
   6717 			tcp_hlen = th->th_off << 2;
   6718 		}
   6719 		hlen += tcp_hlen;
   6720 		*cmdlenp |= NQTX_CMD_TSE;
   6721 
   6722 		if (v4) {
   6723 			WM_Q_EVCNT_INCR(txq, txtso);
   6724 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6725 		} else {
   6726 			WM_Q_EVCNT_INCR(txq, txtso6);
   6727 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6728 		}
   6729 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6730 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6731 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6732 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6733 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6734 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6735 	} else {
   6736 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6737 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6738 	}
   6739 
   6740 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6741 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6742 		cmdc |= NQTXC_CMD_IP4;
   6743 	}
   6744 
   6745 	if (m0->m_pkthdr.csum_flags &
   6746 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6747 		WM_Q_EVCNT_INCR(txq, txtusum);
   6748 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6749 			cmdc |= NQTXC_CMD_TCP;
   6750 		} else {
   6751 			cmdc |= NQTXC_CMD_UDP;
   6752 		}
   6753 		cmdc |= NQTXC_CMD_IP4;
   6754 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6755 	}
   6756 	if (m0->m_pkthdr.csum_flags &
   6757 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6758 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6759 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6760 			cmdc |= NQTXC_CMD_TCP;
   6761 		} else {
   6762 			cmdc |= NQTXC_CMD_UDP;
   6763 		}
   6764 		cmdc |= NQTXC_CMD_IP6;
   6765 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6766 	}
   6767 
   6768 	/* Fill in the context descriptor. */
   6769 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6770 	    htole32(vl_len);
   6771 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6772 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6773 	    htole32(cmdc);
   6774 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6775 	    htole32(mssidx);
   6776 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6777 	DPRINTF(WM_DEBUG_TX,
   6778 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6779 	    txq->txq_next, 0, vl_len));
   6780 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6781 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6782 	txs->txs_ndesc++;
   6783 	return 0;
   6784 }
   6785 
   6786 /*
   6787  * wm_nq_start:		[ifnet interface function]
   6788  *
   6789  *	Start packet transmission on the interface for NEWQUEUE devices
   6790  */
   6791 static void
   6792 wm_nq_start(struct ifnet *ifp)
   6793 {
   6794 	struct wm_softc *sc = ifp->if_softc;
   6795 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6796 
   6797 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6798 
   6799 	mutex_enter(txq->txq_lock);
   6800 	if (!txq->txq_stopping)
   6801 		wm_nq_start_locked(ifp);
   6802 	mutex_exit(txq->txq_lock);
   6803 }
   6804 
   6805 static void
   6806 wm_nq_start_locked(struct ifnet *ifp)
   6807 {
   6808 	struct wm_softc *sc = ifp->if_softc;
   6809 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6810 
   6811 	wm_nq_send_common_locked(ifp, txq, false);
   6812 }
   6813 
   6814 static inline int
   6815 wm_nq_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6816 {
   6817 	struct wm_softc *sc = ifp->if_softc;
   6818 	u_int cpuid = cpu_index(curcpu());
   6819 
   6820 	/*
   6821 	 * Currently, simple distribute strategy.
   6822 	 * TODO:
   6823 	 * destribute by flowid(RSS has value).
   6824 	 */
   6825 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
   6826 }
   6827 
   6828 static int
   6829 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   6830 {
   6831 	int qid;
   6832 	struct wm_softc *sc = ifp->if_softc;
   6833 	struct wm_txqueue *txq;
   6834 
   6835 	qid = wm_nq_select_txqueue(ifp, m);
   6836 	txq = &sc->sc_queue[qid].wmq_txq;
   6837 
   6838 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6839 		m_freem(m);
   6840 		WM_Q_EVCNT_INCR(txq, txdrop);
   6841 		return ENOBUFS;
   6842 	}
   6843 
   6844 	if (mutex_tryenter(txq->txq_lock)) {
   6845 		/* XXXX should be per TX queue */
   6846 		ifp->if_obytes += m->m_pkthdr.len;
   6847 		if (m->m_flags & M_MCAST)
   6848 			ifp->if_omcasts++;
   6849 
   6850 		if (!txq->txq_stopping)
   6851 			wm_nq_transmit_locked(ifp, txq);
   6852 		mutex_exit(txq->txq_lock);
   6853 	}
   6854 
   6855 	return 0;
   6856 }
   6857 
   6858 static void
   6859 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6860 {
   6861 
   6862 	wm_nq_send_common_locked(ifp, txq, true);
   6863 }
   6864 
   6865 static void
   6866 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6867     bool is_transmit)
   6868 {
   6869 	struct wm_softc *sc = ifp->if_softc;
   6870 	struct mbuf *m0;
   6871 	struct m_tag *mtag;
   6872 	struct wm_txsoft *txs;
   6873 	bus_dmamap_t dmamap;
   6874 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6875 	bool do_csum, sent;
   6876 
   6877 	KASSERT(mutex_owned(txq->txq_lock));
   6878 
   6879 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6880 		return;
   6881 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6882 		return;
   6883 
   6884 	sent = false;
   6885 
   6886 	/*
   6887 	 * Loop through the send queue, setting up transmit descriptors
   6888 	 * until we drain the queue, or use up all available transmit
   6889 	 * descriptors.
   6890 	 */
   6891 	for (;;) {
   6892 		m0 = NULL;
   6893 
   6894 		/* Get a work queue entry. */
   6895 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6896 			wm_txeof(sc, txq);
   6897 			if (txq->txq_sfree == 0) {
   6898 				DPRINTF(WM_DEBUG_TX,
   6899 				    ("%s: TX: no free job descriptors\n",
   6900 					device_xname(sc->sc_dev)));
   6901 				WM_Q_EVCNT_INCR(txq, txsstall);
   6902 				break;
   6903 			}
   6904 		}
   6905 
   6906 		/* Grab a packet off the queue. */
   6907 		if (is_transmit)
   6908 			m0 = pcq_get(txq->txq_interq);
   6909 		else
   6910 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6911 		if (m0 == NULL)
   6912 			break;
   6913 
   6914 		DPRINTF(WM_DEBUG_TX,
   6915 		    ("%s: TX: have packet to transmit: %p\n",
   6916 		    device_xname(sc->sc_dev), m0));
   6917 
   6918 		txs = &txq->txq_soft[txq->txq_snext];
   6919 		dmamap = txs->txs_dmamap;
   6920 
   6921 		/*
   6922 		 * Load the DMA map.  If this fails, the packet either
   6923 		 * didn't fit in the allotted number of segments, or we
   6924 		 * were short on resources.  For the too-many-segments
   6925 		 * case, we simply report an error and drop the packet,
   6926 		 * since we can't sanely copy a jumbo packet to a single
   6927 		 * buffer.
   6928 		 */
   6929 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6930 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6931 		if (error) {
   6932 			if (error == EFBIG) {
   6933 				WM_Q_EVCNT_INCR(txq, txdrop);
   6934 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6935 				    "DMA segments, dropping...\n",
   6936 				    device_xname(sc->sc_dev));
   6937 				wm_dump_mbuf_chain(sc, m0);
   6938 				m_freem(m0);
   6939 				continue;
   6940 			}
   6941 			/* Short on resources, just stop for now. */
   6942 			DPRINTF(WM_DEBUG_TX,
   6943 			    ("%s: TX: dmamap load failed: %d\n",
   6944 			    device_xname(sc->sc_dev), error));
   6945 			break;
   6946 		}
   6947 
   6948 		segs_needed = dmamap->dm_nsegs;
   6949 
   6950 		/*
   6951 		 * Ensure we have enough descriptors free to describe
   6952 		 * the packet.  Note, we always reserve one descriptor
   6953 		 * at the end of the ring due to the semantics of the
   6954 		 * TDT register, plus one more in the event we need
   6955 		 * to load offload context.
   6956 		 */
   6957 		if (segs_needed > txq->txq_free - 2) {
   6958 			/*
   6959 			 * Not enough free descriptors to transmit this
   6960 			 * packet.  We haven't committed anything yet,
   6961 			 * so just unload the DMA map, put the packet
   6962 			 * pack on the queue, and punt.  Notify the upper
   6963 			 * layer that there are no more slots left.
   6964 			 */
   6965 			DPRINTF(WM_DEBUG_TX,
   6966 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6967 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6968 			    segs_needed, txq->txq_free - 1));
   6969 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6970 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6971 			WM_Q_EVCNT_INCR(txq, txdstall);
   6972 			break;
   6973 		}
   6974 
   6975 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6976 
   6977 		DPRINTF(WM_DEBUG_TX,
   6978 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6979 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6980 
   6981 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6982 
   6983 		/*
   6984 		 * Store a pointer to the packet so that we can free it
   6985 		 * later.
   6986 		 *
   6987 		 * Initially, we consider the number of descriptors the
   6988 		 * packet uses the number of DMA segments.  This may be
   6989 		 * incremented by 1 if we do checksum offload (a descriptor
   6990 		 * is used to set the checksum context).
   6991 		 */
   6992 		txs->txs_mbuf = m0;
   6993 		txs->txs_firstdesc = txq->txq_next;
   6994 		txs->txs_ndesc = segs_needed;
   6995 
   6996 		/* Set up offload parameters for this packet. */
   6997 		uint32_t cmdlen, fields, dcmdlen;
   6998 		if (m0->m_pkthdr.csum_flags &
   6999 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7000 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7001 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7002 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7003 			    &do_csum) != 0) {
   7004 				/* Error message already displayed. */
   7005 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7006 				continue;
   7007 			}
   7008 		} else {
   7009 			do_csum = false;
   7010 			cmdlen = 0;
   7011 			fields = 0;
   7012 		}
   7013 
   7014 		/* Sync the DMA map. */
   7015 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7016 		    BUS_DMASYNC_PREWRITE);
   7017 
   7018 		/* Initialize the first transmit descriptor. */
   7019 		nexttx = txq->txq_next;
   7020 		if (!do_csum) {
   7021 			/* setup a legacy descriptor */
   7022 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7023 			    dmamap->dm_segs[0].ds_addr);
   7024 			txq->txq_descs[nexttx].wtx_cmdlen =
   7025 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7026 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7027 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7028 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7029 			    NULL) {
   7030 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7031 				    htole32(WTX_CMD_VLE);
   7032 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7033 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7034 			} else {
   7035 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7036 			}
   7037 			dcmdlen = 0;
   7038 		} else {
   7039 			/* setup an advanced data descriptor */
   7040 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7041 			    htole64(dmamap->dm_segs[0].ds_addr);
   7042 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7043 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7044 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7045 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7046 			    htole32(fields);
   7047 			DPRINTF(WM_DEBUG_TX,
   7048 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7049 			    device_xname(sc->sc_dev), nexttx,
   7050 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7051 			DPRINTF(WM_DEBUG_TX,
   7052 			    ("\t 0x%08x%08x\n", fields,
   7053 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7054 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7055 		}
   7056 
   7057 		lasttx = nexttx;
   7058 		nexttx = WM_NEXTTX(txq, nexttx);
   7059 		/*
   7060 		 * fill in the next descriptors. legacy or adcanced format
   7061 		 * is the same here
   7062 		 */
   7063 		for (seg = 1; seg < dmamap->dm_nsegs;
   7064 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7065 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7066 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7067 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7068 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7069 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7070 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7071 			lasttx = nexttx;
   7072 
   7073 			DPRINTF(WM_DEBUG_TX,
   7074 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7075 			     "len %#04zx\n",
   7076 			    device_xname(sc->sc_dev), nexttx,
   7077 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7078 			    dmamap->dm_segs[seg].ds_len));
   7079 		}
   7080 
   7081 		KASSERT(lasttx != -1);
   7082 
   7083 		/*
   7084 		 * Set up the command byte on the last descriptor of
   7085 		 * the packet.  If we're in the interrupt delay window,
   7086 		 * delay the interrupt.
   7087 		 */
   7088 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7089 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7090 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7091 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7092 
   7093 		txs->txs_lastdesc = lasttx;
   7094 
   7095 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7096 		    device_xname(sc->sc_dev),
   7097 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7098 
   7099 		/* Sync the descriptors we're using. */
   7100 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7101 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7102 
   7103 		/* Give the packet to the chip. */
   7104 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7105 		sent = true;
   7106 
   7107 		DPRINTF(WM_DEBUG_TX,
   7108 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7109 
   7110 		DPRINTF(WM_DEBUG_TX,
   7111 		    ("%s: TX: finished transmitting packet, job %d\n",
   7112 		    device_xname(sc->sc_dev), txq->txq_snext));
   7113 
   7114 		/* Advance the tx pointer. */
   7115 		txq->txq_free -= txs->txs_ndesc;
   7116 		txq->txq_next = nexttx;
   7117 
   7118 		txq->txq_sfree--;
   7119 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7120 
   7121 		/* Pass the packet to any BPF listeners. */
   7122 		bpf_mtap(ifp, m0);
   7123 	}
   7124 
   7125 	if (m0 != NULL) {
   7126 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7127 		WM_Q_EVCNT_INCR(txq, txdrop);
   7128 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7129 			__func__));
   7130 		m_freem(m0);
   7131 	}
   7132 
   7133 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7134 		/* No more slots; notify upper layer. */
   7135 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7136 	}
   7137 
   7138 	if (sent) {
   7139 		/* Set a watchdog timer in case the chip flakes out. */
   7140 		ifp->if_timer = 5;
   7141 	}
   7142 }
   7143 
   7144 /* Interrupt */
   7145 
   7146 /*
   7147  * wm_txeof:
   7148  *
   7149  *	Helper; handle transmit interrupts.
   7150  */
   7151 static int
   7152 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7153 {
   7154 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7155 	struct wm_txsoft *txs;
   7156 	bool processed = false;
   7157 	int count = 0;
   7158 	int i;
   7159 	uint8_t status;
   7160 
   7161 	KASSERT(mutex_owned(txq->txq_lock));
   7162 
   7163 	if (txq->txq_stopping)
   7164 		return 0;
   7165 
   7166 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7167 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7168 	else
   7169 		ifp->if_flags &= ~IFF_OACTIVE;
   7170 
   7171 	/*
   7172 	 * Go through the Tx list and free mbufs for those
   7173 	 * frames which have been transmitted.
   7174 	 */
   7175 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7176 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7177 		txs = &txq->txq_soft[i];
   7178 
   7179 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7180 			device_xname(sc->sc_dev), i));
   7181 
   7182 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7183 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7184 
   7185 		status =
   7186 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7187 		if ((status & WTX_ST_DD) == 0) {
   7188 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7189 			    BUS_DMASYNC_PREREAD);
   7190 			break;
   7191 		}
   7192 
   7193 		processed = true;
   7194 		count++;
   7195 		DPRINTF(WM_DEBUG_TX,
   7196 		    ("%s: TX: job %d done: descs %d..%d\n",
   7197 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7198 		    txs->txs_lastdesc));
   7199 
   7200 		/*
   7201 		 * XXX We should probably be using the statistics
   7202 		 * XXX registers, but I don't know if they exist
   7203 		 * XXX on chips before the i82544.
   7204 		 */
   7205 
   7206 #ifdef WM_EVENT_COUNTERS
   7207 		if (status & WTX_ST_TU)
   7208 			WM_Q_EVCNT_INCR(txq, tu);
   7209 #endif /* WM_EVENT_COUNTERS */
   7210 
   7211 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7212 			ifp->if_oerrors++;
   7213 			if (status & WTX_ST_LC)
   7214 				log(LOG_WARNING, "%s: late collision\n",
   7215 				    device_xname(sc->sc_dev));
   7216 			else if (status & WTX_ST_EC) {
   7217 				ifp->if_collisions += 16;
   7218 				log(LOG_WARNING, "%s: excessive collisions\n",
   7219 				    device_xname(sc->sc_dev));
   7220 			}
   7221 		} else
   7222 			ifp->if_opackets++;
   7223 
   7224 		txq->txq_free += txs->txs_ndesc;
   7225 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7226 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7227 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7228 		m_freem(txs->txs_mbuf);
   7229 		txs->txs_mbuf = NULL;
   7230 	}
   7231 
   7232 	/* Update the dirty transmit buffer pointer. */
   7233 	txq->txq_sdirty = i;
   7234 	DPRINTF(WM_DEBUG_TX,
   7235 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7236 
   7237 	if (count != 0)
   7238 		rnd_add_uint32(&sc->rnd_source, count);
   7239 
   7240 	/*
   7241 	 * If there are no more pending transmissions, cancel the watchdog
   7242 	 * timer.
   7243 	 */
   7244 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7245 		ifp->if_timer = 0;
   7246 
   7247 	return processed;
   7248 }
   7249 
   7250 /*
   7251  * wm_rxeof:
   7252  *
   7253  *	Helper; handle receive interrupts.
   7254  */
   7255 static void
   7256 wm_rxeof(struct wm_rxqueue *rxq)
   7257 {
   7258 	struct wm_softc *sc = rxq->rxq_sc;
   7259 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7260 	struct wm_rxsoft *rxs;
   7261 	struct mbuf *m;
   7262 	int i, len;
   7263 	int count = 0;
   7264 	uint8_t status, errors;
   7265 	uint16_t vlantag;
   7266 
   7267 	KASSERT(mutex_owned(rxq->rxq_lock));
   7268 
   7269 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7270 		rxs = &rxq->rxq_soft[i];
   7271 
   7272 		DPRINTF(WM_DEBUG_RX,
   7273 		    ("%s: RX: checking descriptor %d\n",
   7274 		    device_xname(sc->sc_dev), i));
   7275 
   7276 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7277 
   7278 		status = rxq->rxq_descs[i].wrx_status;
   7279 		errors = rxq->rxq_descs[i].wrx_errors;
   7280 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   7281 		vlantag = rxq->rxq_descs[i].wrx_special;
   7282 
   7283 		if ((status & WRX_ST_DD) == 0) {
   7284 			/* We have processed all of the receive descriptors. */
   7285 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   7286 			break;
   7287 		}
   7288 
   7289 		count++;
   7290 		if (__predict_false(rxq->rxq_discard)) {
   7291 			DPRINTF(WM_DEBUG_RX,
   7292 			    ("%s: RX: discarding contents of descriptor %d\n",
   7293 			    device_xname(sc->sc_dev), i));
   7294 			wm_init_rxdesc(rxq, i);
   7295 			if (status & WRX_ST_EOP) {
   7296 				/* Reset our state. */
   7297 				DPRINTF(WM_DEBUG_RX,
   7298 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7299 				    device_xname(sc->sc_dev)));
   7300 				rxq->rxq_discard = 0;
   7301 			}
   7302 			continue;
   7303 		}
   7304 
   7305 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7306 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7307 
   7308 		m = rxs->rxs_mbuf;
   7309 
   7310 		/*
   7311 		 * Add a new receive buffer to the ring, unless of
   7312 		 * course the length is zero. Treat the latter as a
   7313 		 * failed mapping.
   7314 		 */
   7315 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7316 			/*
   7317 			 * Failed, throw away what we've done so
   7318 			 * far, and discard the rest of the packet.
   7319 			 */
   7320 			ifp->if_ierrors++;
   7321 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7322 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7323 			wm_init_rxdesc(rxq, i);
   7324 			if ((status & WRX_ST_EOP) == 0)
   7325 				rxq->rxq_discard = 1;
   7326 			if (rxq->rxq_head != NULL)
   7327 				m_freem(rxq->rxq_head);
   7328 			WM_RXCHAIN_RESET(rxq);
   7329 			DPRINTF(WM_DEBUG_RX,
   7330 			    ("%s: RX: Rx buffer allocation failed, "
   7331 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7332 			    rxq->rxq_discard ? " (discard)" : ""));
   7333 			continue;
   7334 		}
   7335 
   7336 		m->m_len = len;
   7337 		rxq->rxq_len += len;
   7338 		DPRINTF(WM_DEBUG_RX,
   7339 		    ("%s: RX: buffer at %p len %d\n",
   7340 		    device_xname(sc->sc_dev), m->m_data, len));
   7341 
   7342 		/* If this is not the end of the packet, keep looking. */
   7343 		if ((status & WRX_ST_EOP) == 0) {
   7344 			WM_RXCHAIN_LINK(rxq, m);
   7345 			DPRINTF(WM_DEBUG_RX,
   7346 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7347 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7348 			continue;
   7349 		}
   7350 
   7351 		/*
   7352 		 * Okay, we have the entire packet now.  The chip is
   7353 		 * configured to include the FCS except I350 and I21[01]
   7354 		 * (not all chips can be configured to strip it),
   7355 		 * so we need to trim it.
   7356 		 * May need to adjust length of previous mbuf in the
   7357 		 * chain if the current mbuf is too short.
   7358 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7359 		 * is always set in I350, so we don't trim it.
   7360 		 */
   7361 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7362 		    && (sc->sc_type != WM_T_I210)
   7363 		    && (sc->sc_type != WM_T_I211)) {
   7364 			if (m->m_len < ETHER_CRC_LEN) {
   7365 				rxq->rxq_tail->m_len
   7366 				    -= (ETHER_CRC_LEN - m->m_len);
   7367 				m->m_len = 0;
   7368 			} else
   7369 				m->m_len -= ETHER_CRC_LEN;
   7370 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7371 		} else
   7372 			len = rxq->rxq_len;
   7373 
   7374 		WM_RXCHAIN_LINK(rxq, m);
   7375 
   7376 		*rxq->rxq_tailp = NULL;
   7377 		m = rxq->rxq_head;
   7378 
   7379 		WM_RXCHAIN_RESET(rxq);
   7380 
   7381 		DPRINTF(WM_DEBUG_RX,
   7382 		    ("%s: RX: have entire packet, len -> %d\n",
   7383 		    device_xname(sc->sc_dev), len));
   7384 
   7385 		/* If an error occurred, update stats and drop the packet. */
   7386 		if (errors &
   7387 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7388 			if (errors & WRX_ER_SE)
   7389 				log(LOG_WARNING, "%s: symbol error\n",
   7390 				    device_xname(sc->sc_dev));
   7391 			else if (errors & WRX_ER_SEQ)
   7392 				log(LOG_WARNING, "%s: receive sequence error\n",
   7393 				    device_xname(sc->sc_dev));
   7394 			else if (errors & WRX_ER_CE)
   7395 				log(LOG_WARNING, "%s: CRC error\n",
   7396 				    device_xname(sc->sc_dev));
   7397 			m_freem(m);
   7398 			continue;
   7399 		}
   7400 
   7401 		/* No errors.  Receive the packet. */
   7402 		m_set_rcvif(m, ifp);
   7403 		m->m_pkthdr.len = len;
   7404 
   7405 		/*
   7406 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7407 		 * for us.  Associate the tag with the packet.
   7408 		 */
   7409 		/* XXXX should check for i350 and i354 */
   7410 		if ((status & WRX_ST_VP) != 0) {
   7411 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
   7412 		}
   7413 
   7414 		/* Set up checksum info for this packet. */
   7415 		if ((status & WRX_ST_IXSM) == 0) {
   7416 			if (status & WRX_ST_IPCS) {
   7417 				WM_Q_EVCNT_INCR(rxq, rxipsum);
   7418 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7419 				if (errors & WRX_ER_IPE)
   7420 					m->m_pkthdr.csum_flags |=
   7421 					    M_CSUM_IPv4_BAD;
   7422 			}
   7423 			if (status & WRX_ST_TCPCS) {
   7424 				/*
   7425 				 * Note: we don't know if this was TCP or UDP,
   7426 				 * so we just set both bits, and expect the
   7427 				 * upper layers to deal.
   7428 				 */
   7429 				WM_Q_EVCNT_INCR(rxq, rxtusum);
   7430 				m->m_pkthdr.csum_flags |=
   7431 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7432 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7433 				if (errors & WRX_ER_TCPE)
   7434 					m->m_pkthdr.csum_flags |=
   7435 					    M_CSUM_TCP_UDP_BAD;
   7436 			}
   7437 		}
   7438 
   7439 		ifp->if_ipackets++;
   7440 
   7441 		mutex_exit(rxq->rxq_lock);
   7442 
   7443 		/* Pass this up to any BPF listeners. */
   7444 		bpf_mtap(ifp, m);
   7445 
   7446 		/* Pass it on. */
   7447 		if_percpuq_enqueue(sc->sc_ipq, m);
   7448 
   7449 		mutex_enter(rxq->rxq_lock);
   7450 
   7451 		if (rxq->rxq_stopping)
   7452 			break;
   7453 	}
   7454 
   7455 	/* Update the receive pointer. */
   7456 	rxq->rxq_ptr = i;
   7457 	if (count != 0)
   7458 		rnd_add_uint32(&sc->rnd_source, count);
   7459 
   7460 	DPRINTF(WM_DEBUG_RX,
   7461 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7462 }
   7463 
   7464 /*
   7465  * wm_linkintr_gmii:
   7466  *
   7467  *	Helper; handle link interrupts for GMII.
   7468  */
   7469 static void
   7470 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7471 {
   7472 
   7473 	KASSERT(WM_CORE_LOCKED(sc));
   7474 
   7475 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7476 		__func__));
   7477 
   7478 	if (icr & ICR_LSC) {
   7479 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7480 
   7481 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7482 			wm_gig_downshift_workaround_ich8lan(sc);
   7483 
   7484 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7485 			device_xname(sc->sc_dev)));
   7486 		mii_pollstat(&sc->sc_mii);
   7487 		if (sc->sc_type == WM_T_82543) {
   7488 			int miistatus, active;
   7489 
   7490 			/*
   7491 			 * With 82543, we need to force speed and
   7492 			 * duplex on the MAC equal to what the PHY
   7493 			 * speed and duplex configuration is.
   7494 			 */
   7495 			miistatus = sc->sc_mii.mii_media_status;
   7496 
   7497 			if (miistatus & IFM_ACTIVE) {
   7498 				active = sc->sc_mii.mii_media_active;
   7499 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7500 				switch (IFM_SUBTYPE(active)) {
   7501 				case IFM_10_T:
   7502 					sc->sc_ctrl |= CTRL_SPEED_10;
   7503 					break;
   7504 				case IFM_100_TX:
   7505 					sc->sc_ctrl |= CTRL_SPEED_100;
   7506 					break;
   7507 				case IFM_1000_T:
   7508 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7509 					break;
   7510 				default:
   7511 					/*
   7512 					 * fiber?
   7513 					 * Shoud not enter here.
   7514 					 */
   7515 					printf("unknown media (%x)\n", active);
   7516 					break;
   7517 				}
   7518 				if (active & IFM_FDX)
   7519 					sc->sc_ctrl |= CTRL_FD;
   7520 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7521 			}
   7522 		} else if ((sc->sc_type == WM_T_ICH8)
   7523 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7524 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7525 		} else if (sc->sc_type == WM_T_PCH) {
   7526 			wm_k1_gig_workaround_hv(sc,
   7527 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7528 		}
   7529 
   7530 		if ((sc->sc_phytype == WMPHY_82578)
   7531 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7532 			== IFM_1000_T)) {
   7533 
   7534 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7535 				delay(200*1000); /* XXX too big */
   7536 
   7537 				/* Link stall fix for link up */
   7538 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7539 				    HV_MUX_DATA_CTRL,
   7540 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7541 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7542 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7543 				    HV_MUX_DATA_CTRL,
   7544 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7545 			}
   7546 		}
   7547 	} else if (icr & ICR_RXSEQ) {
   7548 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7549 			device_xname(sc->sc_dev)));
   7550 	}
   7551 }
   7552 
   7553 /*
   7554  * wm_linkintr_tbi:
   7555  *
   7556  *	Helper; handle link interrupts for TBI mode.
   7557  */
   7558 static void
   7559 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7560 {
   7561 	uint32_t status;
   7562 
   7563 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7564 		__func__));
   7565 
   7566 	status = CSR_READ(sc, WMREG_STATUS);
   7567 	if (icr & ICR_LSC) {
   7568 		if (status & STATUS_LU) {
   7569 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7570 			    device_xname(sc->sc_dev),
   7571 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7572 			/*
   7573 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7574 			 * so we should update sc->sc_ctrl
   7575 			 */
   7576 
   7577 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7578 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7579 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7580 			if (status & STATUS_FD)
   7581 				sc->sc_tctl |=
   7582 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7583 			else
   7584 				sc->sc_tctl |=
   7585 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7586 			if (sc->sc_ctrl & CTRL_TFCE)
   7587 				sc->sc_fcrtl |= FCRTL_XONE;
   7588 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7589 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7590 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7591 				      sc->sc_fcrtl);
   7592 			sc->sc_tbi_linkup = 1;
   7593 		} else {
   7594 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7595 			    device_xname(sc->sc_dev)));
   7596 			sc->sc_tbi_linkup = 0;
   7597 		}
   7598 		/* Update LED */
   7599 		wm_tbi_serdes_set_linkled(sc);
   7600 	} else if (icr & ICR_RXSEQ) {
   7601 		DPRINTF(WM_DEBUG_LINK,
   7602 		    ("%s: LINK: Receive sequence error\n",
   7603 		    device_xname(sc->sc_dev)));
   7604 	}
   7605 }
   7606 
   7607 /*
   7608  * wm_linkintr_serdes:
   7609  *
   7610  *	Helper; handle link interrupts for TBI mode.
   7611  */
   7612 static void
   7613 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7614 {
   7615 	struct mii_data *mii = &sc->sc_mii;
   7616 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7617 	uint32_t pcs_adv, pcs_lpab, reg;
   7618 
   7619 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7620 		__func__));
   7621 
   7622 	if (icr & ICR_LSC) {
   7623 		/* Check PCS */
   7624 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7625 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7626 			mii->mii_media_status |= IFM_ACTIVE;
   7627 			sc->sc_tbi_linkup = 1;
   7628 		} else {
   7629 			mii->mii_media_status |= IFM_NONE;
   7630 			sc->sc_tbi_linkup = 0;
   7631 			wm_tbi_serdes_set_linkled(sc);
   7632 			return;
   7633 		}
   7634 		mii->mii_media_active |= IFM_1000_SX;
   7635 		if ((reg & PCS_LSTS_FDX) != 0)
   7636 			mii->mii_media_active |= IFM_FDX;
   7637 		else
   7638 			mii->mii_media_active |= IFM_HDX;
   7639 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7640 			/* Check flow */
   7641 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7642 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7643 				DPRINTF(WM_DEBUG_LINK,
   7644 				    ("XXX LINKOK but not ACOMP\n"));
   7645 				return;
   7646 			}
   7647 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7648 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7649 			DPRINTF(WM_DEBUG_LINK,
   7650 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7651 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7652 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7653 				mii->mii_media_active |= IFM_FLOW
   7654 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7655 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7656 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7657 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7658 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7659 				mii->mii_media_active |= IFM_FLOW
   7660 				    | IFM_ETH_TXPAUSE;
   7661 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7662 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7663 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7664 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7665 				mii->mii_media_active |= IFM_FLOW
   7666 				    | IFM_ETH_RXPAUSE;
   7667 		}
   7668 		/* Update LED */
   7669 		wm_tbi_serdes_set_linkled(sc);
   7670 	} else {
   7671 		DPRINTF(WM_DEBUG_LINK,
   7672 		    ("%s: LINK: Receive sequence error\n",
   7673 		    device_xname(sc->sc_dev)));
   7674 	}
   7675 }
   7676 
   7677 /*
   7678  * wm_linkintr:
   7679  *
   7680  *	Helper; handle link interrupts.
   7681  */
   7682 static void
   7683 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7684 {
   7685 
   7686 	KASSERT(WM_CORE_LOCKED(sc));
   7687 
   7688 	if (sc->sc_flags & WM_F_HAS_MII)
   7689 		wm_linkintr_gmii(sc, icr);
   7690 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7691 	    && (sc->sc_type >= WM_T_82575))
   7692 		wm_linkintr_serdes(sc, icr);
   7693 	else
   7694 		wm_linkintr_tbi(sc, icr);
   7695 }
   7696 
   7697 /*
   7698  * wm_intr_legacy:
   7699  *
   7700  *	Interrupt service routine for INTx and MSI.
   7701  */
   7702 static int
   7703 wm_intr_legacy(void *arg)
   7704 {
   7705 	struct wm_softc *sc = arg;
   7706 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7707 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
   7708 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7709 	uint32_t icr, rndval = 0;
   7710 	int handled = 0;
   7711 
   7712 	DPRINTF(WM_DEBUG_TX,
   7713 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7714 	while (1 /* CONSTCOND */) {
   7715 		icr = CSR_READ(sc, WMREG_ICR);
   7716 		if ((icr & sc->sc_icr) == 0)
   7717 			break;
   7718 		if (rndval == 0)
   7719 			rndval = icr;
   7720 
   7721 		mutex_enter(rxq->rxq_lock);
   7722 
   7723 		if (rxq->rxq_stopping) {
   7724 			mutex_exit(rxq->rxq_lock);
   7725 			break;
   7726 		}
   7727 
   7728 		handled = 1;
   7729 
   7730 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7731 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   7732 			DPRINTF(WM_DEBUG_RX,
   7733 			    ("%s: RX: got Rx intr 0x%08x\n",
   7734 			    device_xname(sc->sc_dev),
   7735 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   7736 			WM_Q_EVCNT_INCR(rxq, rxintr);
   7737 		}
   7738 #endif
   7739 		wm_rxeof(rxq);
   7740 
   7741 		mutex_exit(rxq->rxq_lock);
   7742 		mutex_enter(txq->txq_lock);
   7743 
   7744 		if (txq->txq_stopping) {
   7745 			mutex_exit(txq->txq_lock);
   7746 			break;
   7747 		}
   7748 
   7749 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7750 		if (icr & ICR_TXDW) {
   7751 			DPRINTF(WM_DEBUG_TX,
   7752 			    ("%s: TX: got TXDW interrupt\n",
   7753 			    device_xname(sc->sc_dev)));
   7754 			WM_Q_EVCNT_INCR(txq, txdw);
   7755 		}
   7756 #endif
   7757 		wm_txeof(sc, txq);
   7758 
   7759 		mutex_exit(txq->txq_lock);
   7760 		WM_CORE_LOCK(sc);
   7761 
   7762 		if (sc->sc_core_stopping) {
   7763 			WM_CORE_UNLOCK(sc);
   7764 			break;
   7765 		}
   7766 
   7767 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   7768 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7769 			wm_linkintr(sc, icr);
   7770 		}
   7771 
   7772 		WM_CORE_UNLOCK(sc);
   7773 
   7774 		if (icr & ICR_RXO) {
   7775 #if defined(WM_DEBUG)
   7776 			log(LOG_WARNING, "%s: Receive overrun\n",
   7777 			    device_xname(sc->sc_dev));
   7778 #endif /* defined(WM_DEBUG) */
   7779 		}
   7780 	}
   7781 
   7782 	rnd_add_uint32(&sc->rnd_source, rndval);
   7783 
   7784 	if (handled) {
   7785 		/* Try to get more packets going. */
   7786 		ifp->if_start(ifp);
   7787 	}
   7788 
   7789 	return handled;
   7790 }
   7791 
   7792 static int
   7793 wm_txrxintr_msix(void *arg)
   7794 {
   7795 	struct wm_queue *wmq = arg;
   7796 	struct wm_txqueue *txq = &wmq->wmq_txq;
   7797 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7798 	struct wm_softc *sc = txq->txq_sc;
   7799 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7800 
   7801 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   7802 
   7803 	DPRINTF(WM_DEBUG_TX,
   7804 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7805 
   7806 	if (sc->sc_type == WM_T_82574)
   7807 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7808 	else if (sc->sc_type == WM_T_82575)
   7809 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7810 	else
   7811 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   7812 
   7813 	mutex_enter(txq->txq_lock);
   7814 
   7815 	if (txq->txq_stopping) {
   7816 		mutex_exit(txq->txq_lock);
   7817 		return 0;
   7818 	}
   7819 
   7820 	WM_Q_EVCNT_INCR(txq, txdw);
   7821 	wm_txeof(sc, txq);
   7822 
   7823 	/* Try to get more packets going. */
   7824 	if (pcq_peek(txq->txq_interq) != NULL)
   7825 		wm_nq_transmit_locked(ifp, txq);
   7826 	/*
   7827 	 * There are still some upper layer processing which call
   7828 	 * ifp->if_start(). e.g. ALTQ
   7829 	 */
   7830 	if (wmq->wmq_id == 0) {
   7831 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
   7832 			wm_nq_start_locked(ifp);
   7833 	}
   7834 
   7835 	mutex_exit(txq->txq_lock);
   7836 
   7837 	DPRINTF(WM_DEBUG_RX,
   7838 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7839 	mutex_enter(rxq->rxq_lock);
   7840 
   7841 	if (rxq->rxq_stopping) {
   7842 		mutex_exit(rxq->rxq_lock);
   7843 		return 0;
   7844 	}
   7845 
   7846 	WM_Q_EVCNT_INCR(rxq, rxintr);
   7847 	wm_rxeof(rxq);
   7848 	mutex_exit(rxq->rxq_lock);
   7849 
   7850 	if (sc->sc_type == WM_T_82574)
   7851 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7852 	else if (sc->sc_type == WM_T_82575)
   7853 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7854 	else
   7855 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   7856 
   7857 	return 1;
   7858 }
   7859 
   7860 /*
   7861  * wm_linkintr_msix:
   7862  *
   7863  *	Interrupt service routine for link status change for MSI-X.
   7864  */
   7865 static int
   7866 wm_linkintr_msix(void *arg)
   7867 {
   7868 	struct wm_softc *sc = arg;
   7869 	uint32_t reg;
   7870 
   7871 	DPRINTF(WM_DEBUG_LINK,
   7872 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   7873 
   7874 	reg = CSR_READ(sc, WMREG_ICR);
   7875 	WM_CORE_LOCK(sc);
   7876 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   7877 		goto out;
   7878 
   7879 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7880 	wm_linkintr(sc, ICR_LSC);
   7881 
   7882 out:
   7883 	WM_CORE_UNLOCK(sc);
   7884 
   7885 	if (sc->sc_type == WM_T_82574)
   7886 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   7887 	else if (sc->sc_type == WM_T_82575)
   7888 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   7889 	else
   7890 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   7891 
   7892 	return 1;
   7893 }
   7894 
   7895 /*
   7896  * Media related.
   7897  * GMII, SGMII, TBI (and SERDES)
   7898  */
   7899 
   7900 /* Common */
   7901 
   7902 /*
   7903  * wm_tbi_serdes_set_linkled:
   7904  *
   7905  *	Update the link LED on TBI and SERDES devices.
   7906  */
   7907 static void
   7908 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   7909 {
   7910 
   7911 	if (sc->sc_tbi_linkup)
   7912 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7913 	else
   7914 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7915 
   7916 	/* 82540 or newer devices are active low */
   7917 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7918 
   7919 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7920 }
   7921 
   7922 /* GMII related */
   7923 
   7924 /*
   7925  * wm_gmii_reset:
   7926  *
   7927  *	Reset the PHY.
   7928  */
   7929 static void
   7930 wm_gmii_reset(struct wm_softc *sc)
   7931 {
   7932 	uint32_t reg;
   7933 	int rv;
   7934 
   7935 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7936 		device_xname(sc->sc_dev), __func__));
   7937 
   7938 	rv = sc->phy.acquire(sc);
   7939 	if (rv != 0) {
   7940 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7941 		    __func__);
   7942 		return;
   7943 	}
   7944 
   7945 	switch (sc->sc_type) {
   7946 	case WM_T_82542_2_0:
   7947 	case WM_T_82542_2_1:
   7948 		/* null */
   7949 		break;
   7950 	case WM_T_82543:
   7951 		/*
   7952 		 * With 82543, we need to force speed and duplex on the MAC
   7953 		 * equal to what the PHY speed and duplex configuration is.
   7954 		 * In addition, we need to perform a hardware reset on the PHY
   7955 		 * to take it out of reset.
   7956 		 */
   7957 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7958 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7959 
   7960 		/* The PHY reset pin is active-low. */
   7961 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7962 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   7963 		    CTRL_EXT_SWDPIN(4));
   7964 		reg |= CTRL_EXT_SWDPIO(4);
   7965 
   7966 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7967 		CSR_WRITE_FLUSH(sc);
   7968 		delay(10*1000);
   7969 
   7970 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   7971 		CSR_WRITE_FLUSH(sc);
   7972 		delay(150);
   7973 #if 0
   7974 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   7975 #endif
   7976 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   7977 		break;
   7978 	case WM_T_82544:	/* reset 10000us */
   7979 	case WM_T_82540:
   7980 	case WM_T_82545:
   7981 	case WM_T_82545_3:
   7982 	case WM_T_82546:
   7983 	case WM_T_82546_3:
   7984 	case WM_T_82541:
   7985 	case WM_T_82541_2:
   7986 	case WM_T_82547:
   7987 	case WM_T_82547_2:
   7988 	case WM_T_82571:	/* reset 100us */
   7989 	case WM_T_82572:
   7990 	case WM_T_82573:
   7991 	case WM_T_82574:
   7992 	case WM_T_82575:
   7993 	case WM_T_82576:
   7994 	case WM_T_82580:
   7995 	case WM_T_I350:
   7996 	case WM_T_I354:
   7997 	case WM_T_I210:
   7998 	case WM_T_I211:
   7999 	case WM_T_82583:
   8000 	case WM_T_80003:
   8001 		/* generic reset */
   8002 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8003 		CSR_WRITE_FLUSH(sc);
   8004 		delay(20000);
   8005 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8006 		CSR_WRITE_FLUSH(sc);
   8007 		delay(20000);
   8008 
   8009 		if ((sc->sc_type == WM_T_82541)
   8010 		    || (sc->sc_type == WM_T_82541_2)
   8011 		    || (sc->sc_type == WM_T_82547)
   8012 		    || (sc->sc_type == WM_T_82547_2)) {
   8013 			/* workaround for igp are done in igp_reset() */
   8014 			/* XXX add code to set LED after phy reset */
   8015 		}
   8016 		break;
   8017 	case WM_T_ICH8:
   8018 	case WM_T_ICH9:
   8019 	case WM_T_ICH10:
   8020 	case WM_T_PCH:
   8021 	case WM_T_PCH2:
   8022 	case WM_T_PCH_LPT:
   8023 	case WM_T_PCH_SPT:
   8024 		/* generic reset */
   8025 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8026 		CSR_WRITE_FLUSH(sc);
   8027 		delay(100);
   8028 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8029 		CSR_WRITE_FLUSH(sc);
   8030 		delay(150);
   8031 		break;
   8032 	default:
   8033 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8034 		    __func__);
   8035 		break;
   8036 	}
   8037 
   8038 	sc->phy.release(sc);
   8039 
   8040 	/* get_cfg_done */
   8041 	wm_get_cfg_done(sc);
   8042 
   8043 	/* extra setup */
   8044 	switch (sc->sc_type) {
   8045 	case WM_T_82542_2_0:
   8046 	case WM_T_82542_2_1:
   8047 	case WM_T_82543:
   8048 	case WM_T_82544:
   8049 	case WM_T_82540:
   8050 	case WM_T_82545:
   8051 	case WM_T_82545_3:
   8052 	case WM_T_82546:
   8053 	case WM_T_82546_3:
   8054 	case WM_T_82541_2:
   8055 	case WM_T_82547_2:
   8056 	case WM_T_82571:
   8057 	case WM_T_82572:
   8058 	case WM_T_82573:
   8059 	case WM_T_82575:
   8060 	case WM_T_82576:
   8061 	case WM_T_82580:
   8062 	case WM_T_I350:
   8063 	case WM_T_I354:
   8064 	case WM_T_I210:
   8065 	case WM_T_I211:
   8066 	case WM_T_80003:
   8067 		/* null */
   8068 		break;
   8069 	case WM_T_82574:
   8070 	case WM_T_82583:
   8071 		wm_lplu_d0_disable(sc);
   8072 		break;
   8073 	case WM_T_82541:
   8074 	case WM_T_82547:
   8075 		/* XXX Configure actively LED after PHY reset */
   8076 		break;
   8077 	case WM_T_ICH8:
   8078 	case WM_T_ICH9:
   8079 	case WM_T_ICH10:
   8080 	case WM_T_PCH:
   8081 	case WM_T_PCH2:
   8082 	case WM_T_PCH_LPT:
   8083 	case WM_T_PCH_SPT:
   8084 		/* Allow time for h/w to get to a quiescent state afer reset */
   8085 		delay(10*1000);
   8086 
   8087 		if (sc->sc_type == WM_T_PCH)
   8088 			wm_hv_phy_workaround_ich8lan(sc);
   8089 
   8090 		if (sc->sc_type == WM_T_PCH2)
   8091 			wm_lv_phy_workaround_ich8lan(sc);
   8092 
   8093 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   8094 			/*
   8095 			 * dummy read to clear the phy wakeup bit after lcd
   8096 			 * reset
   8097 			 */
   8098 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   8099 		}
   8100 
   8101 		/*
   8102 		 * XXX Configure the LCD with th extended configuration region
   8103 		 * in NVM
   8104 		 */
   8105 
   8106 		/* Disable D0 LPLU. */
   8107 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8108 			wm_lplu_d0_disable_pch(sc);
   8109 		else
   8110 			wm_lplu_d0_disable(sc);	/* ICH* */
   8111 		break;
   8112 	default:
   8113 		panic("%s: unknown type\n", __func__);
   8114 		break;
   8115 	}
   8116 }
   8117 
   8118 /*
   8119  * wm_get_phy_id_82575:
   8120  *
   8121  * Return PHY ID. Return -1 if it failed.
   8122  */
   8123 static int
   8124 wm_get_phy_id_82575(struct wm_softc *sc)
   8125 {
   8126 	uint32_t reg;
   8127 	int phyid = -1;
   8128 
   8129 	/* XXX */
   8130 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8131 		return -1;
   8132 
   8133 	if (wm_sgmii_uses_mdio(sc)) {
   8134 		switch (sc->sc_type) {
   8135 		case WM_T_82575:
   8136 		case WM_T_82576:
   8137 			reg = CSR_READ(sc, WMREG_MDIC);
   8138 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8139 			break;
   8140 		case WM_T_82580:
   8141 		case WM_T_I350:
   8142 		case WM_T_I354:
   8143 		case WM_T_I210:
   8144 		case WM_T_I211:
   8145 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8146 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8147 			break;
   8148 		default:
   8149 			return -1;
   8150 		}
   8151 	}
   8152 
   8153 	return phyid;
   8154 }
   8155 
   8156 
   8157 /*
   8158  * wm_gmii_mediainit:
   8159  *
   8160  *	Initialize media for use on 1000BASE-T devices.
   8161  */
   8162 static void
   8163 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8164 {
   8165 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8166 	struct mii_data *mii = &sc->sc_mii;
   8167 	uint32_t reg;
   8168 
   8169 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8170 		device_xname(sc->sc_dev), __func__));
   8171 
   8172 	/* We have GMII. */
   8173 	sc->sc_flags |= WM_F_HAS_MII;
   8174 
   8175 	if (sc->sc_type == WM_T_80003)
   8176 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8177 	else
   8178 		sc->sc_tipg = TIPG_1000T_DFLT;
   8179 
   8180 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8181 	if ((sc->sc_type == WM_T_82580)
   8182 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8183 	    || (sc->sc_type == WM_T_I211)) {
   8184 		reg = CSR_READ(sc, WMREG_PHPM);
   8185 		reg &= ~PHPM_GO_LINK_D;
   8186 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8187 	}
   8188 
   8189 	/*
   8190 	 * Let the chip set speed/duplex on its own based on
   8191 	 * signals from the PHY.
   8192 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8193 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8194 	 */
   8195 	sc->sc_ctrl |= CTRL_SLU;
   8196 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8197 
   8198 	/* Initialize our media structures and probe the GMII. */
   8199 	mii->mii_ifp = ifp;
   8200 
   8201 	/*
   8202 	 * Determine the PHY access method.
   8203 	 *
   8204 	 *  For SGMII, use SGMII specific method.
   8205 	 *
   8206 	 *  For some devices, we can determine the PHY access method
   8207 	 * from sc_type.
   8208 	 *
   8209 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   8210 	 * access  method by sc_type, so use the PCI product ID for some
   8211 	 * devices.
   8212 	 * For other ICH8 variants, try to use igp's method. If the PHY
   8213 	 * can't detect, then use bm's method.
   8214 	 */
   8215 	switch (prodid) {
   8216 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   8217 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   8218 		/* 82577 */
   8219 		sc->sc_phytype = WMPHY_82577;
   8220 		break;
   8221 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   8222 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   8223 		/* 82578 */
   8224 		sc->sc_phytype = WMPHY_82578;
   8225 		break;
   8226 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8227 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8228 		/* 82579 */
   8229 		sc->sc_phytype = WMPHY_82579;
   8230 		break;
   8231 	case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8232 	case PCI_PRODUCT_INTEL_82801I_BM:
   8233 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8234 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8235 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8236 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8237 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8238 		/* ICH8, 9, 10 with 82567 */
   8239 		sc->sc_phytype = WMPHY_BM;
   8240 		mii->mii_readreg = wm_gmii_bm_readreg;
   8241 		mii->mii_writereg = wm_gmii_bm_writereg;
   8242 		break;
   8243 	default:
   8244 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   8245 		    && !wm_sgmii_uses_mdio(sc)){
   8246 			/* SGMII */
   8247 			mii->mii_readreg = wm_sgmii_readreg;
   8248 			mii->mii_writereg = wm_sgmii_writereg;
   8249 		} else if (sc->sc_type >= WM_T_ICH8) {
   8250 			/* non-82567 ICH8, 9 and 10 */
   8251 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8252 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8253 		} else if (sc->sc_type >= WM_T_80003) {
   8254 			/* 80003 */
   8255 			sc->sc_phytype = WMPHY_GG82563;
   8256 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8257 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8258 		} else if (sc->sc_type >= WM_T_I210) {
   8259 			/* I210 and I211 */
   8260 			sc->sc_phytype = WMPHY_210;
   8261 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8262 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8263 		} else if (sc->sc_type >= WM_T_82580) {
   8264 			/* 82580, I350 and I354 */
   8265 			sc->sc_phytype = WMPHY_82580;
   8266 			mii->mii_readreg = wm_gmii_82580_readreg;
   8267 			mii->mii_writereg = wm_gmii_82580_writereg;
   8268 		} else if (sc->sc_type >= WM_T_82544) {
   8269 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8270 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8271 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8272 		} else {
   8273 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8274 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8275 		}
   8276 		break;
   8277 	}
   8278 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8279 		/* All PCH* use _hv_ */
   8280 		mii->mii_readreg = wm_gmii_hv_readreg;
   8281 		mii->mii_writereg = wm_gmii_hv_writereg;
   8282 	}
   8283 	mii->mii_statchg = wm_gmii_statchg;
   8284 
   8285 	wm_gmii_reset(sc);
   8286 
   8287 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8288 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8289 	    wm_gmii_mediastatus);
   8290 
   8291 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8292 	    || (sc->sc_type == WM_T_82580)
   8293 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8294 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8295 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8296 			/* Attach only one port */
   8297 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8298 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8299 		} else {
   8300 			int i, id;
   8301 			uint32_t ctrl_ext;
   8302 
   8303 			id = wm_get_phy_id_82575(sc);
   8304 			if (id != -1) {
   8305 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8306 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8307 			}
   8308 			if ((id == -1)
   8309 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8310 				/* Power on sgmii phy if it is disabled */
   8311 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8312 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8313 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8314 				CSR_WRITE_FLUSH(sc);
   8315 				delay(300*1000); /* XXX too long */
   8316 
   8317 				/* from 1 to 8 */
   8318 				for (i = 1; i < 8; i++)
   8319 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8320 					    0xffffffff, i, MII_OFFSET_ANY,
   8321 					    MIIF_DOPAUSE);
   8322 
   8323 				/* restore previous sfp cage power state */
   8324 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8325 			}
   8326 		}
   8327 	} else {
   8328 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8329 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8330 	}
   8331 
   8332 	/*
   8333 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8334 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8335 	 */
   8336 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8337 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8338 		wm_set_mdio_slow_mode_hv(sc);
   8339 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8340 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8341 	}
   8342 
   8343 	/*
   8344 	 * (For ICH8 variants)
   8345 	 * If PHY detection failed, use BM's r/w function and retry.
   8346 	 */
   8347 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8348 		/* if failed, retry with *_bm_* */
   8349 		mii->mii_readreg = wm_gmii_bm_readreg;
   8350 		mii->mii_writereg = wm_gmii_bm_writereg;
   8351 
   8352 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8353 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8354 	}
   8355 
   8356 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8357 		/* Any PHY wasn't find */
   8358 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8359 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8360 		sc->sc_phytype = WMPHY_NONE;
   8361 	} else {
   8362 		/*
   8363 		 * PHY Found!
   8364 		 * Check PHY type.
   8365 		 */
   8366 		uint32_t model;
   8367 		struct mii_softc *child;
   8368 
   8369 		child = LIST_FIRST(&mii->mii_phys);
   8370 		model = child->mii_mpd_model;
   8371 		if (model == MII_MODEL_yyINTEL_I82566)
   8372 			sc->sc_phytype = WMPHY_IGP_3;
   8373 
   8374 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8375 	}
   8376 }
   8377 
   8378 /*
   8379  * wm_gmii_mediachange:	[ifmedia interface function]
   8380  *
   8381  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8382  */
   8383 static int
   8384 wm_gmii_mediachange(struct ifnet *ifp)
   8385 {
   8386 	struct wm_softc *sc = ifp->if_softc;
   8387 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8388 	int rc;
   8389 
   8390 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8391 		device_xname(sc->sc_dev), __func__));
   8392 	if ((ifp->if_flags & IFF_UP) == 0)
   8393 		return 0;
   8394 
   8395 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8396 	sc->sc_ctrl |= CTRL_SLU;
   8397 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8398 	    || (sc->sc_type > WM_T_82543)) {
   8399 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8400 	} else {
   8401 		sc->sc_ctrl &= ~CTRL_ASDE;
   8402 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8403 		if (ife->ifm_media & IFM_FDX)
   8404 			sc->sc_ctrl |= CTRL_FD;
   8405 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8406 		case IFM_10_T:
   8407 			sc->sc_ctrl |= CTRL_SPEED_10;
   8408 			break;
   8409 		case IFM_100_TX:
   8410 			sc->sc_ctrl |= CTRL_SPEED_100;
   8411 			break;
   8412 		case IFM_1000_T:
   8413 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8414 			break;
   8415 		default:
   8416 			panic("wm_gmii_mediachange: bad media 0x%x",
   8417 			    ife->ifm_media);
   8418 		}
   8419 	}
   8420 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8421 	if (sc->sc_type <= WM_T_82543)
   8422 		wm_gmii_reset(sc);
   8423 
   8424 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8425 		return 0;
   8426 	return rc;
   8427 }
   8428 
   8429 /*
   8430  * wm_gmii_mediastatus:	[ifmedia interface function]
   8431  *
   8432  *	Get the current interface media status on a 1000BASE-T device.
   8433  */
   8434 static void
   8435 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8436 {
   8437 	struct wm_softc *sc = ifp->if_softc;
   8438 
   8439 	ether_mediastatus(ifp, ifmr);
   8440 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8441 	    | sc->sc_flowflags;
   8442 }
   8443 
   8444 #define	MDI_IO		CTRL_SWDPIN(2)
   8445 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8446 #define	MDI_CLK		CTRL_SWDPIN(3)
   8447 
   8448 static void
   8449 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8450 {
   8451 	uint32_t i, v;
   8452 
   8453 	v = CSR_READ(sc, WMREG_CTRL);
   8454 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8455 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8456 
   8457 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8458 		if (data & i)
   8459 			v |= MDI_IO;
   8460 		else
   8461 			v &= ~MDI_IO;
   8462 		CSR_WRITE(sc, WMREG_CTRL, v);
   8463 		CSR_WRITE_FLUSH(sc);
   8464 		delay(10);
   8465 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8466 		CSR_WRITE_FLUSH(sc);
   8467 		delay(10);
   8468 		CSR_WRITE(sc, WMREG_CTRL, v);
   8469 		CSR_WRITE_FLUSH(sc);
   8470 		delay(10);
   8471 	}
   8472 }
   8473 
   8474 static uint32_t
   8475 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8476 {
   8477 	uint32_t v, i, data = 0;
   8478 
   8479 	v = CSR_READ(sc, WMREG_CTRL);
   8480 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8481 	v |= CTRL_SWDPIO(3);
   8482 
   8483 	CSR_WRITE(sc, WMREG_CTRL, v);
   8484 	CSR_WRITE_FLUSH(sc);
   8485 	delay(10);
   8486 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8487 	CSR_WRITE_FLUSH(sc);
   8488 	delay(10);
   8489 	CSR_WRITE(sc, WMREG_CTRL, v);
   8490 	CSR_WRITE_FLUSH(sc);
   8491 	delay(10);
   8492 
   8493 	for (i = 0; i < 16; i++) {
   8494 		data <<= 1;
   8495 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8496 		CSR_WRITE_FLUSH(sc);
   8497 		delay(10);
   8498 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8499 			data |= 1;
   8500 		CSR_WRITE(sc, WMREG_CTRL, v);
   8501 		CSR_WRITE_FLUSH(sc);
   8502 		delay(10);
   8503 	}
   8504 
   8505 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8506 	CSR_WRITE_FLUSH(sc);
   8507 	delay(10);
   8508 	CSR_WRITE(sc, WMREG_CTRL, v);
   8509 	CSR_WRITE_FLUSH(sc);
   8510 	delay(10);
   8511 
   8512 	return data;
   8513 }
   8514 
   8515 #undef MDI_IO
   8516 #undef MDI_DIR
   8517 #undef MDI_CLK
   8518 
   8519 /*
   8520  * wm_gmii_i82543_readreg:	[mii interface function]
   8521  *
   8522  *	Read a PHY register on the GMII (i82543 version).
   8523  */
   8524 static int
   8525 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8526 {
   8527 	struct wm_softc *sc = device_private(self);
   8528 	int rv;
   8529 
   8530 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8531 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8532 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8533 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8534 
   8535 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8536 	    device_xname(sc->sc_dev), phy, reg, rv));
   8537 
   8538 	return rv;
   8539 }
   8540 
   8541 /*
   8542  * wm_gmii_i82543_writereg:	[mii interface function]
   8543  *
   8544  *	Write a PHY register on the GMII (i82543 version).
   8545  */
   8546 static void
   8547 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8548 {
   8549 	struct wm_softc *sc = device_private(self);
   8550 
   8551 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8552 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8553 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8554 	    (MII_COMMAND_START << 30), 32);
   8555 }
   8556 
   8557 /*
   8558  * wm_gmii_mdic_readreg:	[mii interface function]
   8559  *
   8560  *	Read a PHY register on the GMII.
   8561  */
   8562 static int
   8563 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   8564 {
   8565 	struct wm_softc *sc = device_private(self);
   8566 	uint32_t mdic = 0;
   8567 	int i, rv;
   8568 
   8569 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8570 	    MDIC_REGADD(reg));
   8571 
   8572 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8573 		mdic = CSR_READ(sc, WMREG_MDIC);
   8574 		if (mdic & MDIC_READY)
   8575 			break;
   8576 		delay(50);
   8577 	}
   8578 
   8579 	if ((mdic & MDIC_READY) == 0) {
   8580 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8581 		    device_xname(sc->sc_dev), phy, reg);
   8582 		rv = 0;
   8583 	} else if (mdic & MDIC_E) {
   8584 #if 0 /* This is normal if no PHY is present. */
   8585 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8586 		    device_xname(sc->sc_dev), phy, reg);
   8587 #endif
   8588 		rv = 0;
   8589 	} else {
   8590 		rv = MDIC_DATA(mdic);
   8591 		if (rv == 0xffff)
   8592 			rv = 0;
   8593 	}
   8594 
   8595 	return rv;
   8596 }
   8597 
   8598 /*
   8599  * wm_gmii_mdic_writereg:	[mii interface function]
   8600  *
   8601  *	Write a PHY register on the GMII.
   8602  */
   8603 static void
   8604 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   8605 {
   8606 	struct wm_softc *sc = device_private(self);
   8607 	uint32_t mdic = 0;
   8608 	int i;
   8609 
   8610 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8611 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8612 
   8613 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8614 		mdic = CSR_READ(sc, WMREG_MDIC);
   8615 		if (mdic & MDIC_READY)
   8616 			break;
   8617 		delay(50);
   8618 	}
   8619 
   8620 	if ((mdic & MDIC_READY) == 0)
   8621 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8622 		    device_xname(sc->sc_dev), phy, reg);
   8623 	else if (mdic & MDIC_E)
   8624 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8625 		    device_xname(sc->sc_dev), phy, reg);
   8626 }
   8627 
   8628 /*
   8629  * wm_gmii_i82544_readreg:	[mii interface function]
   8630  *
   8631  *	Read a PHY register on the GMII.
   8632  */
   8633 static int
   8634 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8635 {
   8636 	struct wm_softc *sc = device_private(self);
   8637 	int rv;
   8638 
   8639 	if (sc->phy.acquire(sc)) {
   8640 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8641 		    __func__);
   8642 		return 0;
   8643 	}
   8644 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   8645 	sc->phy.release(sc);
   8646 
   8647 	return rv;
   8648 }
   8649 
   8650 /*
   8651  * wm_gmii_i82544_writereg:	[mii interface function]
   8652  *
   8653  *	Write a PHY register on the GMII.
   8654  */
   8655 static void
   8656 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8657 {
   8658 	struct wm_softc *sc = device_private(self);
   8659 
   8660 	if (sc->phy.acquire(sc)) {
   8661 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8662 		    __func__);
   8663 	}
   8664 	wm_gmii_mdic_writereg(self, phy, reg, val);
   8665 	sc->phy.release(sc);
   8666 }
   8667 
   8668 /*
   8669  * wm_gmii_i80003_readreg:	[mii interface function]
   8670  *
   8671  *	Read a PHY register on the kumeran
   8672  * This could be handled by the PHY layer if we didn't have to lock the
   8673  * ressource ...
   8674  */
   8675 static int
   8676 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8677 {
   8678 	struct wm_softc *sc = device_private(self);
   8679 	int rv;
   8680 
   8681 	if (phy != 1) /* only one PHY on kumeran bus */
   8682 		return 0;
   8683 
   8684 	if (sc->phy.acquire(sc)) {
   8685 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8686 		    __func__);
   8687 		return 0;
   8688 	}
   8689 
   8690 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   8691 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8692 		    reg >> GG82563_PAGE_SHIFT);
   8693 	} else {
   8694 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8695 		    reg >> GG82563_PAGE_SHIFT);
   8696 	}
   8697 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8698 	delay(200);
   8699 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   8700 	delay(200);
   8701 	sc->phy.release(sc);
   8702 
   8703 	return rv;
   8704 }
   8705 
   8706 /*
   8707  * wm_gmii_i80003_writereg:	[mii interface function]
   8708  *
   8709  *	Write a PHY register on the kumeran.
   8710  * This could be handled by the PHY layer if we didn't have to lock the
   8711  * ressource ...
   8712  */
   8713 static void
   8714 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8715 {
   8716 	struct wm_softc *sc = device_private(self);
   8717 
   8718 	if (phy != 1) /* only one PHY on kumeran bus */
   8719 		return;
   8720 
   8721 	if (sc->phy.acquire(sc)) {
   8722 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8723 		    __func__);
   8724 		return;
   8725 	}
   8726 
   8727 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   8728 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8729 		    reg >> GG82563_PAGE_SHIFT);
   8730 	} else {
   8731 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8732 		    reg >> GG82563_PAGE_SHIFT);
   8733 	}
   8734 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8735 	delay(200);
   8736 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   8737 	delay(200);
   8738 
   8739 	sc->phy.release(sc);
   8740 }
   8741 
   8742 /*
   8743  * wm_gmii_bm_readreg:	[mii interface function]
   8744  *
   8745  *	Read a PHY register on the kumeran
   8746  * This could be handled by the PHY layer if we didn't have to lock the
   8747  * ressource ...
   8748  */
   8749 static int
   8750 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8751 {
   8752 	struct wm_softc *sc = device_private(self);
   8753 	int rv;
   8754 
   8755 	if (sc->phy.acquire(sc)) {
   8756 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8757 		    __func__);
   8758 		return 0;
   8759 	}
   8760 
   8761 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8762 		if (phy == 1)
   8763 			wm_gmii_mdic_writereg(self, phy,
   8764 			    MII_IGPHY_PAGE_SELECT, reg);
   8765 		else
   8766 			wm_gmii_mdic_writereg(self, phy,
   8767 			    BME1000_PHY_PAGE_SELECT,
   8768 			    reg >> GG82563_PAGE_SHIFT);
   8769 	}
   8770 
   8771 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   8772 	sc->phy.release(sc);
   8773 	return rv;
   8774 }
   8775 
   8776 /*
   8777  * wm_gmii_bm_writereg:	[mii interface function]
   8778  *
   8779  *	Write a PHY register on the kumeran.
   8780  * This could be handled by the PHY layer if we didn't have to lock the
   8781  * ressource ...
   8782  */
   8783 static void
   8784 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8785 {
   8786 	struct wm_softc *sc = device_private(self);
   8787 
   8788 	if (sc->phy.acquire(sc)) {
   8789 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8790 		    __func__);
   8791 		return;
   8792 	}
   8793 
   8794 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8795 		if (phy == 1)
   8796 			wm_gmii_mdic_writereg(self, phy,
   8797 			    MII_IGPHY_PAGE_SELECT, reg);
   8798 		else
   8799 			wm_gmii_mdic_writereg(self, phy,
   8800 			    BME1000_PHY_PAGE_SELECT,
   8801 			    reg >> GG82563_PAGE_SHIFT);
   8802 	}
   8803 
   8804 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   8805 	sc->phy.release(sc);
   8806 }
   8807 
   8808 static void
   8809 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8810 {
   8811 	struct wm_softc *sc = device_private(self);
   8812 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8813 	uint16_t wuce;
   8814 
   8815 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8816 		device_xname(sc->sc_dev), __func__));
   8817 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8818 	if (sc->sc_type == WM_T_PCH) {
   8819 		/* XXX e1000 driver do nothing... why? */
   8820 	}
   8821 
   8822 	/* Set page 769 */
   8823 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8824 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8825 
   8826 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   8827 
   8828 	wuce &= ~BM_WUC_HOST_WU_BIT;
   8829 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG,
   8830 	    wuce | BM_WUC_ENABLE_BIT);
   8831 
   8832 	/* Select page 800 */
   8833 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8834 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   8835 
   8836 	/* Write page 800 */
   8837 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   8838 
   8839 	if (rd)
   8840 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   8841 	else
   8842 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   8843 
   8844 	/* Set page 769 */
   8845 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8846 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8847 
   8848 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   8849 }
   8850 
   8851 /*
   8852  * wm_gmii_hv_readreg:	[mii interface function]
   8853  *
   8854  *	Read a PHY register on the kumeran
   8855  * This could be handled by the PHY layer if we didn't have to lock the
   8856  * ressource ...
   8857  */
   8858 static int
   8859 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   8860 {
   8861 	struct wm_softc *sc = device_private(self);
   8862 	int rv;
   8863 
   8864 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8865 		device_xname(sc->sc_dev), __func__));
   8866 	if (sc->phy.acquire(sc)) {
   8867 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8868 		    __func__);
   8869 		return 0;
   8870 	}
   8871 
   8872 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   8873 	sc->phy.release(sc);
   8874 	return rv;
   8875 }
   8876 
   8877 static int
   8878 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   8879 {
   8880 	struct wm_softc *sc = device_private(self);
   8881 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8882 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8883 	uint16_t val;
   8884 	int rv;
   8885 
   8886 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8887 	if (sc->sc_phytype == WMPHY_82577) {
   8888 		/* XXX must write */
   8889 	}
   8890 
   8891 	/* Page 800 works differently than the rest so it has its own func */
   8892 	if (page == BM_WUC_PAGE) {
   8893 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8894 		return val;
   8895 	}
   8896 
   8897 	/*
   8898 	 * Lower than page 768 works differently than the rest so it has its
   8899 	 * own func
   8900 	 */
   8901 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8902 		printf("gmii_hv_readreg!!!\n");
   8903 		return 0;
   8904 	}
   8905 
   8906 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8907 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8908 		    page << BME1000_PAGE_SHIFT);
   8909 	}
   8910 
   8911 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   8912 	return rv;
   8913 }
   8914 
   8915 /*
   8916  * wm_gmii_hv_writereg:	[mii interface function]
   8917  *
   8918  *	Write a PHY register on the kumeran.
   8919  * This could be handled by the PHY layer if we didn't have to lock the
   8920  * ressource ...
   8921  */
   8922 static void
   8923 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   8924 {
   8925 	struct wm_softc *sc = device_private(self);
   8926 
   8927 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8928 		device_xname(sc->sc_dev), __func__));
   8929 
   8930 	if (sc->phy.acquire(sc)) {
   8931 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8932 		    __func__);
   8933 		return;
   8934 	}
   8935 
   8936 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   8937 	sc->phy.release(sc);
   8938 }
   8939 
   8940 static void
   8941 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   8942 {
   8943 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8944 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8945 
   8946 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8947 
   8948 	/* Page 800 works differently than the rest so it has its own func */
   8949 	if (page == BM_WUC_PAGE) {
   8950 		uint16_t tmp;
   8951 
   8952 		tmp = val;
   8953 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8954 		return;
   8955 	}
   8956 
   8957 	/*
   8958 	 * Lower than page 768 works differently than the rest so it has its
   8959 	 * own func
   8960 	 */
   8961 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8962 		printf("gmii_hv_writereg!!!\n");
   8963 		return;
   8964 	}
   8965 
   8966 	/*
   8967 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   8968 	 * Power Down (whenever bit 11 of the PHY control register is set)
   8969 	 */
   8970 
   8971 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8972 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8973 		    page << BME1000_PAGE_SHIFT);
   8974 	}
   8975 
   8976 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   8977 }
   8978 
   8979 /*
   8980  * wm_gmii_82580_readreg:	[mii interface function]
   8981  *
   8982  *	Read a PHY register on the 82580 and I350.
   8983  * This could be handled by the PHY layer if we didn't have to lock the
   8984  * ressource ...
   8985  */
   8986 static int
   8987 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   8988 {
   8989 	struct wm_softc *sc = device_private(self);
   8990 	int rv;
   8991 
   8992 	if (sc->phy.acquire(sc) != 0) {
   8993 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8994 		    __func__);
   8995 		return 0;
   8996 	}
   8997 
   8998 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   8999 
   9000 	sc->phy.release(sc);
   9001 	return rv;
   9002 }
   9003 
   9004 /*
   9005  * wm_gmii_82580_writereg:	[mii interface function]
   9006  *
   9007  *	Write a PHY register on the 82580 and I350.
   9008  * This could be handled by the PHY layer if we didn't have to lock the
   9009  * ressource ...
   9010  */
   9011 static void
   9012 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   9013 {
   9014 	struct wm_softc *sc = device_private(self);
   9015 
   9016 	if (sc->phy.acquire(sc) != 0) {
   9017 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9018 		    __func__);
   9019 		return;
   9020 	}
   9021 
   9022 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9023 
   9024 	sc->phy.release(sc);
   9025 }
   9026 
   9027 /*
   9028  * wm_gmii_gs40g_readreg:	[mii interface function]
   9029  *
   9030  *	Read a PHY register on the I2100 and I211.
   9031  * This could be handled by the PHY layer if we didn't have to lock the
   9032  * ressource ...
   9033  */
   9034 static int
   9035 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   9036 {
   9037 	struct wm_softc *sc = device_private(self);
   9038 	int page, offset;
   9039 	int rv;
   9040 
   9041 	/* Acquire semaphore */
   9042 	if (sc->phy.acquire(sc)) {
   9043 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9044 		    __func__);
   9045 		return 0;
   9046 	}
   9047 
   9048 	/* Page select */
   9049 	page = reg >> GS40G_PAGE_SHIFT;
   9050 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9051 
   9052 	/* Read reg */
   9053 	offset = reg & GS40G_OFFSET_MASK;
   9054 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   9055 
   9056 	sc->phy.release(sc);
   9057 	return rv;
   9058 }
   9059 
   9060 /*
   9061  * wm_gmii_gs40g_writereg:	[mii interface function]
   9062  *
   9063  *	Write a PHY register on the I210 and I211.
   9064  * This could be handled by the PHY layer if we didn't have to lock the
   9065  * ressource ...
   9066  */
   9067 static void
   9068 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   9069 {
   9070 	struct wm_softc *sc = device_private(self);
   9071 	int page, offset;
   9072 
   9073 	/* Acquire semaphore */
   9074 	if (sc->phy.acquire(sc)) {
   9075 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9076 		    __func__);
   9077 		return;
   9078 	}
   9079 
   9080 	/* Page select */
   9081 	page = reg >> GS40G_PAGE_SHIFT;
   9082 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9083 
   9084 	/* Write reg */
   9085 	offset = reg & GS40G_OFFSET_MASK;
   9086 	wm_gmii_mdic_writereg(self, phy, offset, val);
   9087 
   9088 	/* Release semaphore */
   9089 	sc->phy.release(sc);
   9090 }
   9091 
   9092 /*
   9093  * wm_gmii_statchg:	[mii interface function]
   9094  *
   9095  *	Callback from MII layer when media changes.
   9096  */
   9097 static void
   9098 wm_gmii_statchg(struct ifnet *ifp)
   9099 {
   9100 	struct wm_softc *sc = ifp->if_softc;
   9101 	struct mii_data *mii = &sc->sc_mii;
   9102 
   9103 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   9104 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9105 	sc->sc_fcrtl &= ~FCRTL_XONE;
   9106 
   9107 	/*
   9108 	 * Get flow control negotiation result.
   9109 	 */
   9110 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   9111 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   9112 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   9113 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   9114 	}
   9115 
   9116 	if (sc->sc_flowflags & IFM_FLOW) {
   9117 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   9118 			sc->sc_ctrl |= CTRL_TFCE;
   9119 			sc->sc_fcrtl |= FCRTL_XONE;
   9120 		}
   9121 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   9122 			sc->sc_ctrl |= CTRL_RFCE;
   9123 	}
   9124 
   9125 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   9126 		DPRINTF(WM_DEBUG_LINK,
   9127 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   9128 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9129 	} else {
   9130 		DPRINTF(WM_DEBUG_LINK,
   9131 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   9132 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9133 	}
   9134 
   9135 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9136 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9137 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   9138 						 : WMREG_FCRTL, sc->sc_fcrtl);
   9139 	if (sc->sc_type == WM_T_80003) {
   9140 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   9141 		case IFM_1000_T:
   9142 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9143 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   9144 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9145 			break;
   9146 		default:
   9147 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9148 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   9149 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   9150 			break;
   9151 		}
   9152 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   9153 	}
   9154 }
   9155 
   9156 /*
   9157  * wm_kmrn_readreg:
   9158  *
   9159  *	Read a kumeran register
   9160  */
   9161 static int
   9162 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   9163 {
   9164 	int rv;
   9165 
   9166 	if (sc->sc_type == WM_T_80003)
   9167 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9168 	else
   9169 		rv = sc->phy.acquire(sc);
   9170 	if (rv != 0) {
   9171 		aprint_error_dev(sc->sc_dev,
   9172 		    "%s: failed to get semaphore\n", __func__);
   9173 		return 0;
   9174 	}
   9175 
   9176 	rv = wm_kmrn_readreg_locked(sc, reg);
   9177 
   9178 	if (sc->sc_type == WM_T_80003)
   9179 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9180 	else
   9181 		sc->phy.release(sc);
   9182 
   9183 	return rv;
   9184 }
   9185 
   9186 static int
   9187 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   9188 {
   9189 	int rv;
   9190 
   9191 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9192 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9193 	    KUMCTRLSTA_REN);
   9194 	CSR_WRITE_FLUSH(sc);
   9195 	delay(2);
   9196 
   9197 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9198 
   9199 	return rv;
   9200 }
   9201 
   9202 /*
   9203  * wm_kmrn_writereg:
   9204  *
   9205  *	Write a kumeran register
   9206  */
   9207 static void
   9208 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9209 {
   9210 	int rv;
   9211 
   9212 	if (sc->sc_type == WM_T_80003)
   9213 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9214 	else
   9215 		rv = sc->phy.acquire(sc);
   9216 	if (rv != 0) {
   9217 		aprint_error_dev(sc->sc_dev,
   9218 		    "%s: failed to get semaphore\n", __func__);
   9219 		return;
   9220 	}
   9221 
   9222 	wm_kmrn_writereg_locked(sc, reg, val);
   9223 
   9224 	if (sc->sc_type == WM_T_80003)
   9225 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9226 	else
   9227 		sc->phy.release(sc);
   9228 }
   9229 
   9230 static void
   9231 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   9232 {
   9233 
   9234 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9235 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9236 	    (val & KUMCTRLSTA_MASK));
   9237 }
   9238 
   9239 /* SGMII related */
   9240 
   9241 /*
   9242  * wm_sgmii_uses_mdio
   9243  *
   9244  * Check whether the transaction is to the internal PHY or the external
   9245  * MDIO interface. Return true if it's MDIO.
   9246  */
   9247 static bool
   9248 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9249 {
   9250 	uint32_t reg;
   9251 	bool ismdio = false;
   9252 
   9253 	switch (sc->sc_type) {
   9254 	case WM_T_82575:
   9255 	case WM_T_82576:
   9256 		reg = CSR_READ(sc, WMREG_MDIC);
   9257 		ismdio = ((reg & MDIC_DEST) != 0);
   9258 		break;
   9259 	case WM_T_82580:
   9260 	case WM_T_I350:
   9261 	case WM_T_I354:
   9262 	case WM_T_I210:
   9263 	case WM_T_I211:
   9264 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9265 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9266 		break;
   9267 	default:
   9268 		break;
   9269 	}
   9270 
   9271 	return ismdio;
   9272 }
   9273 
   9274 /*
   9275  * wm_sgmii_readreg:	[mii interface function]
   9276  *
   9277  *	Read a PHY register on the SGMII
   9278  * This could be handled by the PHY layer if we didn't have to lock the
   9279  * ressource ...
   9280  */
   9281 static int
   9282 wm_sgmii_readreg(device_t self, int phy, int reg)
   9283 {
   9284 	struct wm_softc *sc = device_private(self);
   9285 	uint32_t i2ccmd;
   9286 	int i, rv;
   9287 
   9288 	if (sc->phy.acquire(sc)) {
   9289 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9290 		    __func__);
   9291 		return 0;
   9292 	}
   9293 
   9294 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9295 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9296 	    | I2CCMD_OPCODE_READ;
   9297 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9298 
   9299 	/* Poll the ready bit */
   9300 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9301 		delay(50);
   9302 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9303 		if (i2ccmd & I2CCMD_READY)
   9304 			break;
   9305 	}
   9306 	if ((i2ccmd & I2CCMD_READY) == 0)
   9307 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9308 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9309 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9310 
   9311 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9312 
   9313 	sc->phy.release(sc);
   9314 	return rv;
   9315 }
   9316 
   9317 /*
   9318  * wm_sgmii_writereg:	[mii interface function]
   9319  *
   9320  *	Write a PHY register on the SGMII.
   9321  * This could be handled by the PHY layer if we didn't have to lock the
   9322  * ressource ...
   9323  */
   9324 static void
   9325 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9326 {
   9327 	struct wm_softc *sc = device_private(self);
   9328 	uint32_t i2ccmd;
   9329 	int i;
   9330 	int val_swapped;
   9331 
   9332 	if (sc->phy.acquire(sc) != 0) {
   9333 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9334 		    __func__);
   9335 		return;
   9336 	}
   9337 	/* Swap the data bytes for the I2C interface */
   9338 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9339 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9340 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9341 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9342 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9343 
   9344 	/* Poll the ready bit */
   9345 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9346 		delay(50);
   9347 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9348 		if (i2ccmd & I2CCMD_READY)
   9349 			break;
   9350 	}
   9351 	if ((i2ccmd & I2CCMD_READY) == 0)
   9352 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9353 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9354 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9355 
   9356 	sc->phy.release(sc);
   9357 }
   9358 
   9359 /* TBI related */
   9360 
   9361 /*
   9362  * wm_tbi_mediainit:
   9363  *
   9364  *	Initialize media for use on 1000BASE-X devices.
   9365  */
   9366 static void
   9367 wm_tbi_mediainit(struct wm_softc *sc)
   9368 {
   9369 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9370 	const char *sep = "";
   9371 
   9372 	if (sc->sc_type < WM_T_82543)
   9373 		sc->sc_tipg = TIPG_WM_DFLT;
   9374 	else
   9375 		sc->sc_tipg = TIPG_LG_DFLT;
   9376 
   9377 	sc->sc_tbi_serdes_anegticks = 5;
   9378 
   9379 	/* Initialize our media structures */
   9380 	sc->sc_mii.mii_ifp = ifp;
   9381 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9382 
   9383 	if ((sc->sc_type >= WM_T_82575)
   9384 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9385 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9386 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9387 	else
   9388 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9389 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9390 
   9391 	/*
   9392 	 * SWD Pins:
   9393 	 *
   9394 	 *	0 = Link LED (output)
   9395 	 *	1 = Loss Of Signal (input)
   9396 	 */
   9397 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9398 
   9399 	/* XXX Perhaps this is only for TBI */
   9400 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9401 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9402 
   9403 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9404 		sc->sc_ctrl &= ~CTRL_LRST;
   9405 
   9406 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9407 
   9408 #define	ADD(ss, mm, dd)							\
   9409 do {									\
   9410 	aprint_normal("%s%s", sep, ss);					\
   9411 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9412 	sep = ", ";							\
   9413 } while (/*CONSTCOND*/0)
   9414 
   9415 	aprint_normal_dev(sc->sc_dev, "");
   9416 
   9417 	/* Only 82545 is LX */
   9418 	if (sc->sc_type == WM_T_82545) {
   9419 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9420 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9421 	} else {
   9422 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9423 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9424 	}
   9425 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9426 	aprint_normal("\n");
   9427 
   9428 #undef ADD
   9429 
   9430 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9431 }
   9432 
   9433 /*
   9434  * wm_tbi_mediachange:	[ifmedia interface function]
   9435  *
   9436  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9437  */
   9438 static int
   9439 wm_tbi_mediachange(struct ifnet *ifp)
   9440 {
   9441 	struct wm_softc *sc = ifp->if_softc;
   9442 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9443 	uint32_t status;
   9444 	int i;
   9445 
   9446 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9447 		/* XXX need some work for >= 82571 and < 82575 */
   9448 		if (sc->sc_type < WM_T_82575)
   9449 			return 0;
   9450 	}
   9451 
   9452 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9453 	    || (sc->sc_type >= WM_T_82575))
   9454 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9455 
   9456 	sc->sc_ctrl &= ~CTRL_LRST;
   9457 	sc->sc_txcw = TXCW_ANE;
   9458 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9459 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9460 	else if (ife->ifm_media & IFM_FDX)
   9461 		sc->sc_txcw |= TXCW_FD;
   9462 	else
   9463 		sc->sc_txcw |= TXCW_HD;
   9464 
   9465 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9466 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9467 
   9468 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9469 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9470 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9471 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9472 	CSR_WRITE_FLUSH(sc);
   9473 	delay(1000);
   9474 
   9475 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9476 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9477 
   9478 	/*
   9479 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9480 	 * optics detect a signal, 0 if they don't.
   9481 	 */
   9482 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9483 		/* Have signal; wait for the link to come up. */
   9484 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9485 			delay(10000);
   9486 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9487 				break;
   9488 		}
   9489 
   9490 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9491 			    device_xname(sc->sc_dev),i));
   9492 
   9493 		status = CSR_READ(sc, WMREG_STATUS);
   9494 		DPRINTF(WM_DEBUG_LINK,
   9495 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9496 			device_xname(sc->sc_dev),status, STATUS_LU));
   9497 		if (status & STATUS_LU) {
   9498 			/* Link is up. */
   9499 			DPRINTF(WM_DEBUG_LINK,
   9500 			    ("%s: LINK: set media -> link up %s\n",
   9501 			    device_xname(sc->sc_dev),
   9502 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9503 
   9504 			/*
   9505 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9506 			 * so we should update sc->sc_ctrl
   9507 			 */
   9508 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9509 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9510 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9511 			if (status & STATUS_FD)
   9512 				sc->sc_tctl |=
   9513 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9514 			else
   9515 				sc->sc_tctl |=
   9516 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9517 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9518 				sc->sc_fcrtl |= FCRTL_XONE;
   9519 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9520 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9521 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9522 				      sc->sc_fcrtl);
   9523 			sc->sc_tbi_linkup = 1;
   9524 		} else {
   9525 			if (i == WM_LINKUP_TIMEOUT)
   9526 				wm_check_for_link(sc);
   9527 			/* Link is down. */
   9528 			DPRINTF(WM_DEBUG_LINK,
   9529 			    ("%s: LINK: set media -> link down\n",
   9530 			    device_xname(sc->sc_dev)));
   9531 			sc->sc_tbi_linkup = 0;
   9532 		}
   9533 	} else {
   9534 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9535 		    device_xname(sc->sc_dev)));
   9536 		sc->sc_tbi_linkup = 0;
   9537 	}
   9538 
   9539 	wm_tbi_serdes_set_linkled(sc);
   9540 
   9541 	return 0;
   9542 }
   9543 
   9544 /*
   9545  * wm_tbi_mediastatus:	[ifmedia interface function]
   9546  *
   9547  *	Get the current interface media status on a 1000BASE-X device.
   9548  */
   9549 static void
   9550 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9551 {
   9552 	struct wm_softc *sc = ifp->if_softc;
   9553 	uint32_t ctrl, status;
   9554 
   9555 	ifmr->ifm_status = IFM_AVALID;
   9556 	ifmr->ifm_active = IFM_ETHER;
   9557 
   9558 	status = CSR_READ(sc, WMREG_STATUS);
   9559 	if ((status & STATUS_LU) == 0) {
   9560 		ifmr->ifm_active |= IFM_NONE;
   9561 		return;
   9562 	}
   9563 
   9564 	ifmr->ifm_status |= IFM_ACTIVE;
   9565 	/* Only 82545 is LX */
   9566 	if (sc->sc_type == WM_T_82545)
   9567 		ifmr->ifm_active |= IFM_1000_LX;
   9568 	else
   9569 		ifmr->ifm_active |= IFM_1000_SX;
   9570 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9571 		ifmr->ifm_active |= IFM_FDX;
   9572 	else
   9573 		ifmr->ifm_active |= IFM_HDX;
   9574 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9575 	if (ctrl & CTRL_RFCE)
   9576 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9577 	if (ctrl & CTRL_TFCE)
   9578 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9579 }
   9580 
   9581 /* XXX TBI only */
   9582 static int
   9583 wm_check_for_link(struct wm_softc *sc)
   9584 {
   9585 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9586 	uint32_t rxcw;
   9587 	uint32_t ctrl;
   9588 	uint32_t status;
   9589 	uint32_t sig;
   9590 
   9591 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9592 		/* XXX need some work for >= 82571 */
   9593 		if (sc->sc_type >= WM_T_82571) {
   9594 			sc->sc_tbi_linkup = 1;
   9595 			return 0;
   9596 		}
   9597 	}
   9598 
   9599 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9600 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9601 	status = CSR_READ(sc, WMREG_STATUS);
   9602 
   9603 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9604 
   9605 	DPRINTF(WM_DEBUG_LINK,
   9606 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9607 		device_xname(sc->sc_dev), __func__,
   9608 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9609 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   9610 
   9611 	/*
   9612 	 * SWDPIN   LU RXCW
   9613 	 *      0    0    0
   9614 	 *      0    0    1	(should not happen)
   9615 	 *      0    1    0	(should not happen)
   9616 	 *      0    1    1	(should not happen)
   9617 	 *      1    0    0	Disable autonego and force linkup
   9618 	 *      1    0    1	got /C/ but not linkup yet
   9619 	 *      1    1    0	(linkup)
   9620 	 *      1    1    1	If IFM_AUTO, back to autonego
   9621 	 *
   9622 	 */
   9623 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9624 	    && ((status & STATUS_LU) == 0)
   9625 	    && ((rxcw & RXCW_C) == 0)) {
   9626 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9627 			__func__));
   9628 		sc->sc_tbi_linkup = 0;
   9629 		/* Disable auto-negotiation in the TXCW register */
   9630 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9631 
   9632 		/*
   9633 		 * Force link-up and also force full-duplex.
   9634 		 *
   9635 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9636 		 * so we should update sc->sc_ctrl
   9637 		 */
   9638 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9639 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9640 	} else if (((status & STATUS_LU) != 0)
   9641 	    && ((rxcw & RXCW_C) != 0)
   9642 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9643 		sc->sc_tbi_linkup = 1;
   9644 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9645 			__func__));
   9646 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9647 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9648 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9649 	    && ((rxcw & RXCW_C) != 0)) {
   9650 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9651 	} else {
   9652 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9653 			status));
   9654 	}
   9655 
   9656 	return 0;
   9657 }
   9658 
   9659 /*
   9660  * wm_tbi_tick:
   9661  *
   9662  *	Check the link on TBI devices.
   9663  *	This function acts as mii_tick().
   9664  */
   9665 static void
   9666 wm_tbi_tick(struct wm_softc *sc)
   9667 {
   9668 	struct mii_data *mii = &sc->sc_mii;
   9669 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9670 	uint32_t status;
   9671 
   9672 	KASSERT(WM_CORE_LOCKED(sc));
   9673 
   9674 	status = CSR_READ(sc, WMREG_STATUS);
   9675 
   9676 	/* XXX is this needed? */
   9677 	(void)CSR_READ(sc, WMREG_RXCW);
   9678 	(void)CSR_READ(sc, WMREG_CTRL);
   9679 
   9680 	/* set link status */
   9681 	if ((status & STATUS_LU) == 0) {
   9682 		DPRINTF(WM_DEBUG_LINK,
   9683 		    ("%s: LINK: checklink -> down\n",
   9684 			device_xname(sc->sc_dev)));
   9685 		sc->sc_tbi_linkup = 0;
   9686 	} else if (sc->sc_tbi_linkup == 0) {
   9687 		DPRINTF(WM_DEBUG_LINK,
   9688 		    ("%s: LINK: checklink -> up %s\n",
   9689 			device_xname(sc->sc_dev),
   9690 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9691 		sc->sc_tbi_linkup = 1;
   9692 		sc->sc_tbi_serdes_ticks = 0;
   9693 	}
   9694 
   9695 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9696 		goto setled;
   9697 
   9698 	if ((status & STATUS_LU) == 0) {
   9699 		sc->sc_tbi_linkup = 0;
   9700 		/* If the timer expired, retry autonegotiation */
   9701 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9702 		    && (++sc->sc_tbi_serdes_ticks
   9703 			>= sc->sc_tbi_serdes_anegticks)) {
   9704 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9705 			sc->sc_tbi_serdes_ticks = 0;
   9706 			/*
   9707 			 * Reset the link, and let autonegotiation do
   9708 			 * its thing
   9709 			 */
   9710 			sc->sc_ctrl |= CTRL_LRST;
   9711 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9712 			CSR_WRITE_FLUSH(sc);
   9713 			delay(1000);
   9714 			sc->sc_ctrl &= ~CTRL_LRST;
   9715 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9716 			CSR_WRITE_FLUSH(sc);
   9717 			delay(1000);
   9718 			CSR_WRITE(sc, WMREG_TXCW,
   9719 			    sc->sc_txcw & ~TXCW_ANE);
   9720 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9721 		}
   9722 	}
   9723 
   9724 setled:
   9725 	wm_tbi_serdes_set_linkled(sc);
   9726 }
   9727 
   9728 /* SERDES related */
   9729 static void
   9730 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9731 {
   9732 	uint32_t reg;
   9733 
   9734 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9735 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9736 		return;
   9737 
   9738 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9739 	reg |= PCS_CFG_PCS_EN;
   9740 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9741 
   9742 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9743 	reg &= ~CTRL_EXT_SWDPIN(3);
   9744 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9745 	CSR_WRITE_FLUSH(sc);
   9746 }
   9747 
   9748 static int
   9749 wm_serdes_mediachange(struct ifnet *ifp)
   9750 {
   9751 	struct wm_softc *sc = ifp->if_softc;
   9752 	bool pcs_autoneg = true; /* XXX */
   9753 	uint32_t ctrl_ext, pcs_lctl, reg;
   9754 
   9755 	/* XXX Currently, this function is not called on 8257[12] */
   9756 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9757 	    || (sc->sc_type >= WM_T_82575))
   9758 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9759 
   9760 	wm_serdes_power_up_link_82575(sc);
   9761 
   9762 	sc->sc_ctrl |= CTRL_SLU;
   9763 
   9764 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9765 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9766 
   9767 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9768 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9769 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9770 	case CTRL_EXT_LINK_MODE_SGMII:
   9771 		pcs_autoneg = true;
   9772 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9773 		break;
   9774 	case CTRL_EXT_LINK_MODE_1000KX:
   9775 		pcs_autoneg = false;
   9776 		/* FALLTHROUGH */
   9777 	default:
   9778 		if ((sc->sc_type == WM_T_82575)
   9779 		    || (sc->sc_type == WM_T_82576)) {
   9780 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9781 				pcs_autoneg = false;
   9782 		}
   9783 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9784 		    | CTRL_FRCFDX;
   9785 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9786 	}
   9787 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9788 
   9789 	if (pcs_autoneg) {
   9790 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9791 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9792 
   9793 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9794 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9795 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9796 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9797 	} else
   9798 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9799 
   9800 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   9801 
   9802 
   9803 	return 0;
   9804 }
   9805 
   9806 static void
   9807 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9808 {
   9809 	struct wm_softc *sc = ifp->if_softc;
   9810 	struct mii_data *mii = &sc->sc_mii;
   9811 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9812 	uint32_t pcs_adv, pcs_lpab, reg;
   9813 
   9814 	ifmr->ifm_status = IFM_AVALID;
   9815 	ifmr->ifm_active = IFM_ETHER;
   9816 
   9817 	/* Check PCS */
   9818 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9819 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   9820 		ifmr->ifm_active |= IFM_NONE;
   9821 		sc->sc_tbi_linkup = 0;
   9822 		goto setled;
   9823 	}
   9824 
   9825 	sc->sc_tbi_linkup = 1;
   9826 	ifmr->ifm_status |= IFM_ACTIVE;
   9827 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   9828 	if ((reg & PCS_LSTS_FDX) != 0)
   9829 		ifmr->ifm_active |= IFM_FDX;
   9830 	else
   9831 		ifmr->ifm_active |= IFM_HDX;
   9832 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   9833 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9834 		/* Check flow */
   9835 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9836 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9837 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   9838 			goto setled;
   9839 		}
   9840 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9841 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9842 		DPRINTF(WM_DEBUG_LINK,
   9843 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   9844 		if ((pcs_adv & TXCW_SYM_PAUSE)
   9845 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9846 			mii->mii_media_active |= IFM_FLOW
   9847 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9848 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9849 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9850 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   9851 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9852 			mii->mii_media_active |= IFM_FLOW
   9853 			    | IFM_ETH_TXPAUSE;
   9854 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   9855 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9856 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9857 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9858 			mii->mii_media_active |= IFM_FLOW
   9859 			    | IFM_ETH_RXPAUSE;
   9860 		} else {
   9861 		}
   9862 	}
   9863 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9864 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   9865 setled:
   9866 	wm_tbi_serdes_set_linkled(sc);
   9867 }
   9868 
   9869 /*
   9870  * wm_serdes_tick:
   9871  *
   9872  *	Check the link on serdes devices.
   9873  */
   9874 static void
   9875 wm_serdes_tick(struct wm_softc *sc)
   9876 {
   9877 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9878 	struct mii_data *mii = &sc->sc_mii;
   9879 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9880 	uint32_t reg;
   9881 
   9882 	KASSERT(WM_CORE_LOCKED(sc));
   9883 
   9884 	mii->mii_media_status = IFM_AVALID;
   9885 	mii->mii_media_active = IFM_ETHER;
   9886 
   9887 	/* Check PCS */
   9888 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9889 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   9890 		mii->mii_media_status |= IFM_ACTIVE;
   9891 		sc->sc_tbi_linkup = 1;
   9892 		sc->sc_tbi_serdes_ticks = 0;
   9893 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   9894 		if ((reg & PCS_LSTS_FDX) != 0)
   9895 			mii->mii_media_active |= IFM_FDX;
   9896 		else
   9897 			mii->mii_media_active |= IFM_HDX;
   9898 	} else {
   9899 		mii->mii_media_status |= IFM_NONE;
   9900 		sc->sc_tbi_linkup = 0;
   9901 		    /* If the timer expired, retry autonegotiation */
   9902 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9903 		    && (++sc->sc_tbi_serdes_ticks
   9904 			>= sc->sc_tbi_serdes_anegticks)) {
   9905 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9906 			sc->sc_tbi_serdes_ticks = 0;
   9907 			/* XXX */
   9908 			wm_serdes_mediachange(ifp);
   9909 		}
   9910 	}
   9911 
   9912 	wm_tbi_serdes_set_linkled(sc);
   9913 }
   9914 
   9915 /* SFP related */
   9916 
   9917 static int
   9918 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   9919 {
   9920 	uint32_t i2ccmd;
   9921 	int i;
   9922 
   9923 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   9924 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9925 
   9926 	/* Poll the ready bit */
   9927 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9928 		delay(50);
   9929 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9930 		if (i2ccmd & I2CCMD_READY)
   9931 			break;
   9932 	}
   9933 	if ((i2ccmd & I2CCMD_READY) == 0)
   9934 		return -1;
   9935 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9936 		return -1;
   9937 
   9938 	*data = i2ccmd & 0x00ff;
   9939 
   9940 	return 0;
   9941 }
   9942 
   9943 static uint32_t
   9944 wm_sfp_get_media_type(struct wm_softc *sc)
   9945 {
   9946 	uint32_t ctrl_ext;
   9947 	uint8_t val = 0;
   9948 	int timeout = 3;
   9949 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   9950 	int rv = -1;
   9951 
   9952 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9953 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   9954 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   9955 	CSR_WRITE_FLUSH(sc);
   9956 
   9957 	/* Read SFP module data */
   9958 	while (timeout) {
   9959 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   9960 		if (rv == 0)
   9961 			break;
   9962 		delay(100*1000); /* XXX too big */
   9963 		timeout--;
   9964 	}
   9965 	if (rv != 0)
   9966 		goto out;
   9967 	switch (val) {
   9968 	case SFF_SFP_ID_SFF:
   9969 		aprint_normal_dev(sc->sc_dev,
   9970 		    "Module/Connector soldered to board\n");
   9971 		break;
   9972 	case SFF_SFP_ID_SFP:
   9973 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   9974 		break;
   9975 	case SFF_SFP_ID_UNKNOWN:
   9976 		goto out;
   9977 	default:
   9978 		break;
   9979 	}
   9980 
   9981 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   9982 	if (rv != 0) {
   9983 		goto out;
   9984 	}
   9985 
   9986 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   9987 		mediatype = WM_MEDIATYPE_SERDES;
   9988 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   9989 		sc->sc_flags |= WM_F_SGMII;
   9990 		mediatype = WM_MEDIATYPE_COPPER;
   9991 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   9992 		sc->sc_flags |= WM_F_SGMII;
   9993 		mediatype = WM_MEDIATYPE_SERDES;
   9994 	}
   9995 
   9996 out:
   9997 	/* Restore I2C interface setting */
   9998 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9999 
   10000 	return mediatype;
   10001 }
   10002 /*
   10003  * NVM related.
   10004  * Microwire, SPI (w/wo EERD) and Flash.
   10005  */
   10006 
   10007 /* Both spi and uwire */
   10008 
   10009 /*
   10010  * wm_eeprom_sendbits:
   10011  *
   10012  *	Send a series of bits to the EEPROM.
   10013  */
   10014 static void
   10015 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   10016 {
   10017 	uint32_t reg;
   10018 	int x;
   10019 
   10020 	reg = CSR_READ(sc, WMREG_EECD);
   10021 
   10022 	for (x = nbits; x > 0; x--) {
   10023 		if (bits & (1U << (x - 1)))
   10024 			reg |= EECD_DI;
   10025 		else
   10026 			reg &= ~EECD_DI;
   10027 		CSR_WRITE(sc, WMREG_EECD, reg);
   10028 		CSR_WRITE_FLUSH(sc);
   10029 		delay(2);
   10030 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10031 		CSR_WRITE_FLUSH(sc);
   10032 		delay(2);
   10033 		CSR_WRITE(sc, WMREG_EECD, reg);
   10034 		CSR_WRITE_FLUSH(sc);
   10035 		delay(2);
   10036 	}
   10037 }
   10038 
   10039 /*
   10040  * wm_eeprom_recvbits:
   10041  *
   10042  *	Receive a series of bits from the EEPROM.
   10043  */
   10044 static void
   10045 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   10046 {
   10047 	uint32_t reg, val;
   10048 	int x;
   10049 
   10050 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   10051 
   10052 	val = 0;
   10053 	for (x = nbits; x > 0; x--) {
   10054 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10055 		CSR_WRITE_FLUSH(sc);
   10056 		delay(2);
   10057 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   10058 			val |= (1U << (x - 1));
   10059 		CSR_WRITE(sc, WMREG_EECD, reg);
   10060 		CSR_WRITE_FLUSH(sc);
   10061 		delay(2);
   10062 	}
   10063 	*valp = val;
   10064 }
   10065 
   10066 /* Microwire */
   10067 
   10068 /*
   10069  * wm_nvm_read_uwire:
   10070  *
   10071  *	Read a word from the EEPROM using the MicroWire protocol.
   10072  */
   10073 static int
   10074 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10075 {
   10076 	uint32_t reg, val;
   10077 	int i;
   10078 
   10079 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10080 		device_xname(sc->sc_dev), __func__));
   10081 
   10082 	for (i = 0; i < wordcnt; i++) {
   10083 		/* Clear SK and DI. */
   10084 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   10085 		CSR_WRITE(sc, WMREG_EECD, reg);
   10086 
   10087 		/*
   10088 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   10089 		 * and Xen.
   10090 		 *
   10091 		 * We use this workaround only for 82540 because qemu's
   10092 		 * e1000 act as 82540.
   10093 		 */
   10094 		if (sc->sc_type == WM_T_82540) {
   10095 			reg |= EECD_SK;
   10096 			CSR_WRITE(sc, WMREG_EECD, reg);
   10097 			reg &= ~EECD_SK;
   10098 			CSR_WRITE(sc, WMREG_EECD, reg);
   10099 			CSR_WRITE_FLUSH(sc);
   10100 			delay(2);
   10101 		}
   10102 		/* XXX: end of workaround */
   10103 
   10104 		/* Set CHIP SELECT. */
   10105 		reg |= EECD_CS;
   10106 		CSR_WRITE(sc, WMREG_EECD, reg);
   10107 		CSR_WRITE_FLUSH(sc);
   10108 		delay(2);
   10109 
   10110 		/* Shift in the READ command. */
   10111 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   10112 
   10113 		/* Shift in address. */
   10114 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   10115 
   10116 		/* Shift out the data. */
   10117 		wm_eeprom_recvbits(sc, &val, 16);
   10118 		data[i] = val & 0xffff;
   10119 
   10120 		/* Clear CHIP SELECT. */
   10121 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   10122 		CSR_WRITE(sc, WMREG_EECD, reg);
   10123 		CSR_WRITE_FLUSH(sc);
   10124 		delay(2);
   10125 	}
   10126 
   10127 	return 0;
   10128 }
   10129 
   10130 /* SPI */
   10131 
   10132 /*
   10133  * Set SPI and FLASH related information from the EECD register.
   10134  * For 82541 and 82547, the word size is taken from EEPROM.
   10135  */
   10136 static int
   10137 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   10138 {
   10139 	int size;
   10140 	uint32_t reg;
   10141 	uint16_t data;
   10142 
   10143 	reg = CSR_READ(sc, WMREG_EECD);
   10144 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   10145 
   10146 	/* Read the size of NVM from EECD by default */
   10147 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10148 	switch (sc->sc_type) {
   10149 	case WM_T_82541:
   10150 	case WM_T_82541_2:
   10151 	case WM_T_82547:
   10152 	case WM_T_82547_2:
   10153 		/* Set dummy value to access EEPROM */
   10154 		sc->sc_nvm_wordsize = 64;
   10155 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   10156 		reg = data;
   10157 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10158 		if (size == 0)
   10159 			size = 6; /* 64 word size */
   10160 		else
   10161 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   10162 		break;
   10163 	case WM_T_80003:
   10164 	case WM_T_82571:
   10165 	case WM_T_82572:
   10166 	case WM_T_82573: /* SPI case */
   10167 	case WM_T_82574: /* SPI case */
   10168 	case WM_T_82583: /* SPI case */
   10169 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10170 		if (size > 14)
   10171 			size = 14;
   10172 		break;
   10173 	case WM_T_82575:
   10174 	case WM_T_82576:
   10175 	case WM_T_82580:
   10176 	case WM_T_I350:
   10177 	case WM_T_I354:
   10178 	case WM_T_I210:
   10179 	case WM_T_I211:
   10180 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10181 		if (size > 15)
   10182 			size = 15;
   10183 		break;
   10184 	default:
   10185 		aprint_error_dev(sc->sc_dev,
   10186 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   10187 		return -1;
   10188 		break;
   10189 	}
   10190 
   10191 	sc->sc_nvm_wordsize = 1 << size;
   10192 
   10193 	return 0;
   10194 }
   10195 
   10196 /*
   10197  * wm_nvm_ready_spi:
   10198  *
   10199  *	Wait for a SPI EEPROM to be ready for commands.
   10200  */
   10201 static int
   10202 wm_nvm_ready_spi(struct wm_softc *sc)
   10203 {
   10204 	uint32_t val;
   10205 	int usec;
   10206 
   10207 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10208 		device_xname(sc->sc_dev), __func__));
   10209 
   10210 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10211 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10212 		wm_eeprom_recvbits(sc, &val, 8);
   10213 		if ((val & SPI_SR_RDY) == 0)
   10214 			break;
   10215 	}
   10216 	if (usec >= SPI_MAX_RETRIES) {
   10217 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10218 		return 1;
   10219 	}
   10220 	return 0;
   10221 }
   10222 
   10223 /*
   10224  * wm_nvm_read_spi:
   10225  *
   10226  *	Read a work from the EEPROM using the SPI protocol.
   10227  */
   10228 static int
   10229 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10230 {
   10231 	uint32_t reg, val;
   10232 	int i;
   10233 	uint8_t opc;
   10234 
   10235 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10236 		device_xname(sc->sc_dev), __func__));
   10237 
   10238 	/* Clear SK and CS. */
   10239 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10240 	CSR_WRITE(sc, WMREG_EECD, reg);
   10241 	CSR_WRITE_FLUSH(sc);
   10242 	delay(2);
   10243 
   10244 	if (wm_nvm_ready_spi(sc))
   10245 		return 1;
   10246 
   10247 	/* Toggle CS to flush commands. */
   10248 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10249 	CSR_WRITE_FLUSH(sc);
   10250 	delay(2);
   10251 	CSR_WRITE(sc, WMREG_EECD, reg);
   10252 	CSR_WRITE_FLUSH(sc);
   10253 	delay(2);
   10254 
   10255 	opc = SPI_OPC_READ;
   10256 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10257 		opc |= SPI_OPC_A8;
   10258 
   10259 	wm_eeprom_sendbits(sc, opc, 8);
   10260 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10261 
   10262 	for (i = 0; i < wordcnt; i++) {
   10263 		wm_eeprom_recvbits(sc, &val, 16);
   10264 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10265 	}
   10266 
   10267 	/* Raise CS and clear SK. */
   10268 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10269 	CSR_WRITE(sc, WMREG_EECD, reg);
   10270 	CSR_WRITE_FLUSH(sc);
   10271 	delay(2);
   10272 
   10273 	return 0;
   10274 }
   10275 
   10276 /* Using with EERD */
   10277 
   10278 static int
   10279 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10280 {
   10281 	uint32_t attempts = 100000;
   10282 	uint32_t i, reg = 0;
   10283 	int32_t done = -1;
   10284 
   10285 	for (i = 0; i < attempts; i++) {
   10286 		reg = CSR_READ(sc, rw);
   10287 
   10288 		if (reg & EERD_DONE) {
   10289 			done = 0;
   10290 			break;
   10291 		}
   10292 		delay(5);
   10293 	}
   10294 
   10295 	return done;
   10296 }
   10297 
   10298 static int
   10299 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   10300     uint16_t *data)
   10301 {
   10302 	int i, eerd = 0;
   10303 	int error = 0;
   10304 
   10305 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10306 		device_xname(sc->sc_dev), __func__));
   10307 
   10308 	for (i = 0; i < wordcnt; i++) {
   10309 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   10310 
   10311 		CSR_WRITE(sc, WMREG_EERD, eerd);
   10312 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   10313 		if (error != 0)
   10314 			break;
   10315 
   10316 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   10317 	}
   10318 
   10319 	return error;
   10320 }
   10321 
   10322 /* Flash */
   10323 
   10324 static int
   10325 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10326 {
   10327 	uint32_t eecd;
   10328 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10329 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10330 	uint8_t sig_byte = 0;
   10331 
   10332 	switch (sc->sc_type) {
   10333 	case WM_T_PCH_SPT:
   10334 		/*
   10335 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   10336 		 * sector valid bits from the NVM.
   10337 		 */
   10338 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   10339 		if ((*bank == 0) || (*bank == 1)) {
   10340 			aprint_error_dev(sc->sc_dev,
   10341 			    "%s: no valid NVM bank present (%u)\n", __func__,
   10342 				*bank);
   10343 			return -1;
   10344 		} else {
   10345 			*bank = *bank - 2;
   10346 			return 0;
   10347 		}
   10348 	case WM_T_ICH8:
   10349 	case WM_T_ICH9:
   10350 		eecd = CSR_READ(sc, WMREG_EECD);
   10351 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10352 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10353 			return 0;
   10354 		}
   10355 		/* FALLTHROUGH */
   10356 	default:
   10357 		/* Default to 0 */
   10358 		*bank = 0;
   10359 
   10360 		/* Check bank 0 */
   10361 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10362 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10363 			*bank = 0;
   10364 			return 0;
   10365 		}
   10366 
   10367 		/* Check bank 1 */
   10368 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10369 		    &sig_byte);
   10370 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10371 			*bank = 1;
   10372 			return 0;
   10373 		}
   10374 	}
   10375 
   10376 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10377 		device_xname(sc->sc_dev)));
   10378 	return -1;
   10379 }
   10380 
   10381 /******************************************************************************
   10382  * This function does initial flash setup so that a new read/write/erase cycle
   10383  * can be started.
   10384  *
   10385  * sc - The pointer to the hw structure
   10386  ****************************************************************************/
   10387 static int32_t
   10388 wm_ich8_cycle_init(struct wm_softc *sc)
   10389 {
   10390 	uint16_t hsfsts;
   10391 	int32_t error = 1;
   10392 	int32_t i     = 0;
   10393 
   10394 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10395 
   10396 	/* May be check the Flash Des Valid bit in Hw status */
   10397 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10398 		return error;
   10399 	}
   10400 
   10401 	/* Clear FCERR in Hw status by writing 1 */
   10402 	/* Clear DAEL in Hw status by writing a 1 */
   10403 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10404 
   10405 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10406 
   10407 	/*
   10408 	 * Either we should have a hardware SPI cycle in progress bit to check
   10409 	 * against, in order to start a new cycle or FDONE bit should be
   10410 	 * changed in the hardware so that it is 1 after harware reset, which
   10411 	 * can then be used as an indication whether a cycle is in progress or
   10412 	 * has been completed .. we should also have some software semaphore
   10413 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10414 	 * threads access to those bits can be sequentiallized or a way so that
   10415 	 * 2 threads dont start the cycle at the same time
   10416 	 */
   10417 
   10418 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10419 		/*
   10420 		 * There is no cycle running at present, so we can start a
   10421 		 * cycle
   10422 		 */
   10423 
   10424 		/* Begin by setting Flash Cycle Done. */
   10425 		hsfsts |= HSFSTS_DONE;
   10426 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10427 		error = 0;
   10428 	} else {
   10429 		/*
   10430 		 * otherwise poll for sometime so the current cycle has a
   10431 		 * chance to end before giving up.
   10432 		 */
   10433 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10434 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10435 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10436 				error = 0;
   10437 				break;
   10438 			}
   10439 			delay(1);
   10440 		}
   10441 		if (error == 0) {
   10442 			/*
   10443 			 * Successful in waiting for previous cycle to timeout,
   10444 			 * now set the Flash Cycle Done.
   10445 			 */
   10446 			hsfsts |= HSFSTS_DONE;
   10447 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10448 		}
   10449 	}
   10450 	return error;
   10451 }
   10452 
   10453 /******************************************************************************
   10454  * This function starts a flash cycle and waits for its completion
   10455  *
   10456  * sc - The pointer to the hw structure
   10457  ****************************************************************************/
   10458 static int32_t
   10459 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10460 {
   10461 	uint16_t hsflctl;
   10462 	uint16_t hsfsts;
   10463 	int32_t error = 1;
   10464 	uint32_t i = 0;
   10465 
   10466 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10467 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10468 	hsflctl |= HSFCTL_GO;
   10469 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10470 
   10471 	/* Wait till FDONE bit is set to 1 */
   10472 	do {
   10473 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10474 		if (hsfsts & HSFSTS_DONE)
   10475 			break;
   10476 		delay(1);
   10477 		i++;
   10478 	} while (i < timeout);
   10479 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10480 		error = 0;
   10481 
   10482 	return error;
   10483 }
   10484 
   10485 /******************************************************************************
   10486  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   10487  *
   10488  * sc - The pointer to the hw structure
   10489  * index - The index of the byte or word to read.
   10490  * size - Size of data to read, 1=byte 2=word, 4=dword
   10491  * data - Pointer to the word to store the value read.
   10492  *****************************************************************************/
   10493 static int32_t
   10494 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10495     uint32_t size, uint32_t *data)
   10496 {
   10497 	uint16_t hsfsts;
   10498 	uint16_t hsflctl;
   10499 	uint32_t flash_linear_address;
   10500 	uint32_t flash_data = 0;
   10501 	int32_t error = 1;
   10502 	int32_t count = 0;
   10503 
   10504 	if (size < 1  || size > 4 || data == 0x0 ||
   10505 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10506 		return error;
   10507 
   10508 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10509 	    sc->sc_ich8_flash_base;
   10510 
   10511 	do {
   10512 		delay(1);
   10513 		/* Steps */
   10514 		error = wm_ich8_cycle_init(sc);
   10515 		if (error)
   10516 			break;
   10517 
   10518 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10519 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10520 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10521 		    & HSFCTL_BCOUNT_MASK;
   10522 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10523 		if (sc->sc_type == WM_T_PCH_SPT) {
   10524 			/*
   10525 			 * In SPT, This register is in Lan memory space, not
   10526 			 * flash. Therefore, only 32 bit access is supported.
   10527 			 */
   10528 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   10529 			    (uint32_t)hsflctl);
   10530 		} else
   10531 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10532 
   10533 		/*
   10534 		 * Write the last 24 bits of index into Flash Linear address
   10535 		 * field in Flash Address
   10536 		 */
   10537 		/* TODO: TBD maybe check the index against the size of flash */
   10538 
   10539 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10540 
   10541 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10542 
   10543 		/*
   10544 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10545 		 * the whole sequence a few more times, else read in (shift in)
   10546 		 * the Flash Data0, the order is least significant byte first
   10547 		 * msb to lsb
   10548 		 */
   10549 		if (error == 0) {
   10550 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10551 			if (size == 1)
   10552 				*data = (uint8_t)(flash_data & 0x000000FF);
   10553 			else if (size == 2)
   10554 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10555 			else if (size == 4)
   10556 				*data = (uint32_t)flash_data;
   10557 			break;
   10558 		} else {
   10559 			/*
   10560 			 * If we've gotten here, then things are probably
   10561 			 * completely hosed, but if the error condition is
   10562 			 * detected, it won't hurt to give it another try...
   10563 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10564 			 */
   10565 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10566 			if (hsfsts & HSFSTS_ERR) {
   10567 				/* Repeat for some time before giving up. */
   10568 				continue;
   10569 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10570 				break;
   10571 		}
   10572 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10573 
   10574 	return error;
   10575 }
   10576 
   10577 /******************************************************************************
   10578  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10579  *
   10580  * sc - pointer to wm_hw structure
   10581  * index - The index of the byte to read.
   10582  * data - Pointer to a byte to store the value read.
   10583  *****************************************************************************/
   10584 static int32_t
   10585 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10586 {
   10587 	int32_t status;
   10588 	uint32_t word = 0;
   10589 
   10590 	status = wm_read_ich8_data(sc, index, 1, &word);
   10591 	if (status == 0)
   10592 		*data = (uint8_t)word;
   10593 	else
   10594 		*data = 0;
   10595 
   10596 	return status;
   10597 }
   10598 
   10599 /******************************************************************************
   10600  * Reads a word from the NVM using the ICH8 flash access registers.
   10601  *
   10602  * sc - pointer to wm_hw structure
   10603  * index - The starting byte index of the word to read.
   10604  * data - Pointer to a word to store the value read.
   10605  *****************************************************************************/
   10606 static int32_t
   10607 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10608 {
   10609 	int32_t status;
   10610 	uint32_t word = 0;
   10611 
   10612 	status = wm_read_ich8_data(sc, index, 2, &word);
   10613 	if (status == 0)
   10614 		*data = (uint16_t)word;
   10615 	else
   10616 		*data = 0;
   10617 
   10618 	return status;
   10619 }
   10620 
   10621 /******************************************************************************
   10622  * Reads a dword from the NVM using the ICH8 flash access registers.
   10623  *
   10624  * sc - pointer to wm_hw structure
   10625  * index - The starting byte index of the word to read.
   10626  * data - Pointer to a word to store the value read.
   10627  *****************************************************************************/
   10628 static int32_t
   10629 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   10630 {
   10631 	int32_t status;
   10632 
   10633 	status = wm_read_ich8_data(sc, index, 4, data);
   10634 	return status;
   10635 }
   10636 
   10637 /******************************************************************************
   10638  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10639  * register.
   10640  *
   10641  * sc - Struct containing variables accessed by shared code
   10642  * offset - offset of word in the EEPROM to read
   10643  * data - word read from the EEPROM
   10644  * words - number of words to read
   10645  *****************************************************************************/
   10646 static int
   10647 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10648 {
   10649 	int32_t  error = 0;
   10650 	uint32_t flash_bank = 0;
   10651 	uint32_t act_offset = 0;
   10652 	uint32_t bank_offset = 0;
   10653 	uint16_t word = 0;
   10654 	uint16_t i = 0;
   10655 
   10656 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10657 		device_xname(sc->sc_dev), __func__));
   10658 
   10659 	/*
   10660 	 * We need to know which is the valid flash bank.  In the event
   10661 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10662 	 * managing flash_bank.  So it cannot be trusted and needs
   10663 	 * to be updated with each read.
   10664 	 */
   10665 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10666 	if (error) {
   10667 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10668 			device_xname(sc->sc_dev)));
   10669 		flash_bank = 0;
   10670 	}
   10671 
   10672 	/*
   10673 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10674 	 * size
   10675 	 */
   10676 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10677 
   10678 	error = wm_get_swfwhw_semaphore(sc);
   10679 	if (error) {
   10680 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10681 		    __func__);
   10682 		return error;
   10683 	}
   10684 
   10685 	for (i = 0; i < words; i++) {
   10686 		/* The NVM part needs a byte offset, hence * 2 */
   10687 		act_offset = bank_offset + ((offset + i) * 2);
   10688 		error = wm_read_ich8_word(sc, act_offset, &word);
   10689 		if (error) {
   10690 			aprint_error_dev(sc->sc_dev,
   10691 			    "%s: failed to read NVM\n", __func__);
   10692 			break;
   10693 		}
   10694 		data[i] = word;
   10695 	}
   10696 
   10697 	wm_put_swfwhw_semaphore(sc);
   10698 	return error;
   10699 }
   10700 
   10701 /******************************************************************************
   10702  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   10703  * register.
   10704  *
   10705  * sc - Struct containing variables accessed by shared code
   10706  * offset - offset of word in the EEPROM to read
   10707  * data - word read from the EEPROM
   10708  * words - number of words to read
   10709  *****************************************************************************/
   10710 static int
   10711 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10712 {
   10713 	int32_t  error = 0;
   10714 	uint32_t flash_bank = 0;
   10715 	uint32_t act_offset = 0;
   10716 	uint32_t bank_offset = 0;
   10717 	uint32_t dword = 0;
   10718 	uint16_t i = 0;
   10719 
   10720 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10721 		device_xname(sc->sc_dev), __func__));
   10722 
   10723 	/*
   10724 	 * We need to know which is the valid flash bank.  In the event
   10725 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10726 	 * managing flash_bank.  So it cannot be trusted and needs
   10727 	 * to be updated with each read.
   10728 	 */
   10729 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10730 	if (error) {
   10731 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10732 			device_xname(sc->sc_dev)));
   10733 		flash_bank = 0;
   10734 	}
   10735 
   10736 	/*
   10737 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10738 	 * size
   10739 	 */
   10740 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10741 
   10742 	error = wm_get_swfwhw_semaphore(sc);
   10743 	if (error) {
   10744 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10745 		    __func__);
   10746 		return error;
   10747 	}
   10748 
   10749 	for (i = 0; i < words; i++) {
   10750 		/* The NVM part needs a byte offset, hence * 2 */
   10751 		act_offset = bank_offset + ((offset + i) * 2);
   10752 		/* but we must read dword aligned, so mask ... */
   10753 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   10754 		if (error) {
   10755 			aprint_error_dev(sc->sc_dev,
   10756 			    "%s: failed to read NVM\n", __func__);
   10757 			break;
   10758 		}
   10759 		/* ... and pick out low or high word */
   10760 		if ((act_offset & 0x2) == 0)
   10761 			data[i] = (uint16_t)(dword & 0xFFFF);
   10762 		else
   10763 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   10764 	}
   10765 
   10766 	wm_put_swfwhw_semaphore(sc);
   10767 	return error;
   10768 }
   10769 
   10770 /* iNVM */
   10771 
   10772 static int
   10773 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   10774 {
   10775 	int32_t  rv = 0;
   10776 	uint32_t invm_dword;
   10777 	uint16_t i;
   10778 	uint8_t record_type, word_address;
   10779 
   10780 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10781 		device_xname(sc->sc_dev), __func__));
   10782 
   10783 	for (i = 0; i < INVM_SIZE; i++) {
   10784 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   10785 		/* Get record type */
   10786 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   10787 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   10788 			break;
   10789 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   10790 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   10791 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   10792 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   10793 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   10794 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   10795 			if (word_address == address) {
   10796 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   10797 				rv = 0;
   10798 				break;
   10799 			}
   10800 		}
   10801 	}
   10802 
   10803 	return rv;
   10804 }
   10805 
   10806 static int
   10807 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10808 {
   10809 	int rv = 0;
   10810 	int i;
   10811 
   10812 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10813 		device_xname(sc->sc_dev), __func__));
   10814 
   10815 	for (i = 0; i < words; i++) {
   10816 		switch (offset + i) {
   10817 		case NVM_OFF_MACADDR:
   10818 		case NVM_OFF_MACADDR1:
   10819 		case NVM_OFF_MACADDR2:
   10820 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   10821 			if (rv != 0) {
   10822 				data[i] = 0xffff;
   10823 				rv = -1;
   10824 			}
   10825 			break;
   10826 		case NVM_OFF_CFG2:
   10827 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10828 			if (rv != 0) {
   10829 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   10830 				rv = 0;
   10831 			}
   10832 			break;
   10833 		case NVM_OFF_CFG4:
   10834 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10835 			if (rv != 0) {
   10836 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   10837 				rv = 0;
   10838 			}
   10839 			break;
   10840 		case NVM_OFF_LED_1_CFG:
   10841 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10842 			if (rv != 0) {
   10843 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   10844 				rv = 0;
   10845 			}
   10846 			break;
   10847 		case NVM_OFF_LED_0_2_CFG:
   10848 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10849 			if (rv != 0) {
   10850 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   10851 				rv = 0;
   10852 			}
   10853 			break;
   10854 		case NVM_OFF_ID_LED_SETTINGS:
   10855 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10856 			if (rv != 0) {
   10857 				*data = ID_LED_RESERVED_FFFF;
   10858 				rv = 0;
   10859 			}
   10860 			break;
   10861 		default:
   10862 			DPRINTF(WM_DEBUG_NVM,
   10863 			    ("NVM word 0x%02x is not mapped.\n", offset));
   10864 			*data = NVM_RESERVED_WORD;
   10865 			break;
   10866 		}
   10867 	}
   10868 
   10869 	return rv;
   10870 }
   10871 
   10872 /* Lock, detecting NVM type, validate checksum, version and read */
   10873 
   10874 /*
   10875  * wm_nvm_acquire:
   10876  *
   10877  *	Perform the EEPROM handshake required on some chips.
   10878  */
   10879 static int
   10880 wm_nvm_acquire(struct wm_softc *sc)
   10881 {
   10882 	uint32_t reg;
   10883 	int x;
   10884 	int ret = 0;
   10885 
   10886 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10887 		device_xname(sc->sc_dev), __func__));
   10888 
   10889 	if (sc->sc_type >= WM_T_ICH8) {
   10890 		ret = wm_get_nvm_ich8lan(sc);
   10891 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   10892 		ret = wm_get_swfwhw_semaphore(sc);
   10893 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   10894 		/* This will also do wm_get_swsm_semaphore() if needed */
   10895 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   10896 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10897 		ret = wm_get_swsm_semaphore(sc);
   10898 	}
   10899 
   10900 	if (ret) {
   10901 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10902 			__func__);
   10903 		return 1;
   10904 	}
   10905 
   10906 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10907 		reg = CSR_READ(sc, WMREG_EECD);
   10908 
   10909 		/* Request EEPROM access. */
   10910 		reg |= EECD_EE_REQ;
   10911 		CSR_WRITE(sc, WMREG_EECD, reg);
   10912 
   10913 		/* ..and wait for it to be granted. */
   10914 		for (x = 0; x < 1000; x++) {
   10915 			reg = CSR_READ(sc, WMREG_EECD);
   10916 			if (reg & EECD_EE_GNT)
   10917 				break;
   10918 			delay(5);
   10919 		}
   10920 		if ((reg & EECD_EE_GNT) == 0) {
   10921 			aprint_error_dev(sc->sc_dev,
   10922 			    "could not acquire EEPROM GNT\n");
   10923 			reg &= ~EECD_EE_REQ;
   10924 			CSR_WRITE(sc, WMREG_EECD, reg);
   10925 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10926 				wm_put_swfwhw_semaphore(sc);
   10927 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   10928 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10929 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10930 				wm_put_swsm_semaphore(sc);
   10931 			return 1;
   10932 		}
   10933 	}
   10934 
   10935 	return 0;
   10936 }
   10937 
   10938 /*
   10939  * wm_nvm_release:
   10940  *
   10941  *	Release the EEPROM mutex.
   10942  */
   10943 static void
   10944 wm_nvm_release(struct wm_softc *sc)
   10945 {
   10946 	uint32_t reg;
   10947 
   10948 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10949 		device_xname(sc->sc_dev), __func__));
   10950 
   10951 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10952 		reg = CSR_READ(sc, WMREG_EECD);
   10953 		reg &= ~EECD_EE_REQ;
   10954 		CSR_WRITE(sc, WMREG_EECD, reg);
   10955 	}
   10956 
   10957 	if (sc->sc_type >= WM_T_ICH8) {
   10958 		wm_put_nvm_ich8lan(sc);
   10959 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10960 		wm_put_swfwhw_semaphore(sc);
   10961 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   10962 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10963 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10964 		wm_put_swsm_semaphore(sc);
   10965 }
   10966 
   10967 static int
   10968 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   10969 {
   10970 	uint32_t eecd = 0;
   10971 
   10972 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   10973 	    || sc->sc_type == WM_T_82583) {
   10974 		eecd = CSR_READ(sc, WMREG_EECD);
   10975 
   10976 		/* Isolate bits 15 & 16 */
   10977 		eecd = ((eecd >> 15) & 0x03);
   10978 
   10979 		/* If both bits are set, device is Flash type */
   10980 		if (eecd == 0x03)
   10981 			return 0;
   10982 	}
   10983 	return 1;
   10984 }
   10985 
   10986 static int
   10987 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   10988 {
   10989 	uint32_t eec;
   10990 
   10991 	eec = CSR_READ(sc, WMREG_EEC);
   10992 	if ((eec & EEC_FLASH_DETECTED) != 0)
   10993 		return 1;
   10994 
   10995 	return 0;
   10996 }
   10997 
   10998 /*
   10999  * wm_nvm_validate_checksum
   11000  *
   11001  * The checksum is defined as the sum of the first 64 (16 bit) words.
   11002  */
   11003 static int
   11004 wm_nvm_validate_checksum(struct wm_softc *sc)
   11005 {
   11006 	uint16_t checksum;
   11007 	uint16_t eeprom_data;
   11008 #ifdef WM_DEBUG
   11009 	uint16_t csum_wordaddr, valid_checksum;
   11010 #endif
   11011 	int i;
   11012 
   11013 	checksum = 0;
   11014 
   11015 	/* Don't check for I211 */
   11016 	if (sc->sc_type == WM_T_I211)
   11017 		return 0;
   11018 
   11019 #ifdef WM_DEBUG
   11020 	if (sc->sc_type == WM_T_PCH_LPT) {
   11021 		csum_wordaddr = NVM_OFF_COMPAT;
   11022 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   11023 	} else {
   11024 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   11025 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   11026 	}
   11027 
   11028 	/* Dump EEPROM image for debug */
   11029 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11030 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11031 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   11032 		/* XXX PCH_SPT? */
   11033 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   11034 		if ((eeprom_data & valid_checksum) == 0) {
   11035 			DPRINTF(WM_DEBUG_NVM,
   11036 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   11037 				device_xname(sc->sc_dev), eeprom_data,
   11038 				    valid_checksum));
   11039 		}
   11040 	}
   11041 
   11042 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   11043 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   11044 		for (i = 0; i < NVM_SIZE; i++) {
   11045 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11046 				printf("XXXX ");
   11047 			else
   11048 				printf("%04hx ", eeprom_data);
   11049 			if (i % 8 == 7)
   11050 				printf("\n");
   11051 		}
   11052 	}
   11053 
   11054 #endif /* WM_DEBUG */
   11055 
   11056 	for (i = 0; i < NVM_SIZE; i++) {
   11057 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11058 			return 1;
   11059 		checksum += eeprom_data;
   11060 	}
   11061 
   11062 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   11063 #ifdef WM_DEBUG
   11064 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   11065 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   11066 #endif
   11067 	}
   11068 
   11069 	return 0;
   11070 }
   11071 
   11072 static void
   11073 wm_nvm_version_invm(struct wm_softc *sc)
   11074 {
   11075 	uint32_t dword;
   11076 
   11077 	/*
   11078 	 * Linux's code to decode version is very strange, so we don't
   11079 	 * obey that algorithm and just use word 61 as the document.
   11080 	 * Perhaps it's not perfect though...
   11081 	 *
   11082 	 * Example:
   11083 	 *
   11084 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   11085 	 */
   11086 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   11087 	dword = __SHIFTOUT(dword, INVM_VER_1);
   11088 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   11089 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   11090 }
   11091 
   11092 static void
   11093 wm_nvm_version(struct wm_softc *sc)
   11094 {
   11095 	uint16_t major, minor, build, patch;
   11096 	uint16_t uid0, uid1;
   11097 	uint16_t nvm_data;
   11098 	uint16_t off;
   11099 	bool check_version = false;
   11100 	bool check_optionrom = false;
   11101 	bool have_build = false;
   11102 
   11103 	/*
   11104 	 * Version format:
   11105 	 *
   11106 	 * XYYZ
   11107 	 * X0YZ
   11108 	 * X0YY
   11109 	 *
   11110 	 * Example:
   11111 	 *
   11112 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   11113 	 *	82571	0x50a6	5.10.6?
   11114 	 *	82572	0x506a	5.6.10?
   11115 	 *	82572EI	0x5069	5.6.9?
   11116 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   11117 	 *		0x2013	2.1.3?
   11118 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   11119 	 */
   11120 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   11121 	switch (sc->sc_type) {
   11122 	case WM_T_82571:
   11123 	case WM_T_82572:
   11124 	case WM_T_82574:
   11125 	case WM_T_82583:
   11126 		check_version = true;
   11127 		check_optionrom = true;
   11128 		have_build = true;
   11129 		break;
   11130 	case WM_T_82575:
   11131 	case WM_T_82576:
   11132 	case WM_T_82580:
   11133 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   11134 			check_version = true;
   11135 		break;
   11136 	case WM_T_I211:
   11137 		wm_nvm_version_invm(sc);
   11138 		goto printver;
   11139 	case WM_T_I210:
   11140 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   11141 			wm_nvm_version_invm(sc);
   11142 			goto printver;
   11143 		}
   11144 		/* FALLTHROUGH */
   11145 	case WM_T_I350:
   11146 	case WM_T_I354:
   11147 		check_version = true;
   11148 		check_optionrom = true;
   11149 		break;
   11150 	default:
   11151 		return;
   11152 	}
   11153 	if (check_version) {
   11154 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   11155 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   11156 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   11157 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   11158 			build = nvm_data & NVM_BUILD_MASK;
   11159 			have_build = true;
   11160 		} else
   11161 			minor = nvm_data & 0x00ff;
   11162 
   11163 		/* Decimal */
   11164 		minor = (minor / 16) * 10 + (minor % 16);
   11165 		sc->sc_nvm_ver_major = major;
   11166 		sc->sc_nvm_ver_minor = minor;
   11167 
   11168 printver:
   11169 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   11170 		    sc->sc_nvm_ver_minor);
   11171 		if (have_build) {
   11172 			sc->sc_nvm_ver_build = build;
   11173 			aprint_verbose(".%d", build);
   11174 		}
   11175 	}
   11176 	if (check_optionrom) {
   11177 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   11178 		/* Option ROM Version */
   11179 		if ((off != 0x0000) && (off != 0xffff)) {
   11180 			off += NVM_COMBO_VER_OFF;
   11181 			wm_nvm_read(sc, off + 1, 1, &uid1);
   11182 			wm_nvm_read(sc, off, 1, &uid0);
   11183 			if ((uid0 != 0) && (uid0 != 0xffff)
   11184 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   11185 				/* 16bits */
   11186 				major = uid0 >> 8;
   11187 				build = (uid0 << 8) | (uid1 >> 8);
   11188 				patch = uid1 & 0x00ff;
   11189 				aprint_verbose(", option ROM Version %d.%d.%d",
   11190 				    major, build, patch);
   11191 			}
   11192 		}
   11193 	}
   11194 
   11195 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   11196 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   11197 }
   11198 
   11199 /*
   11200  * wm_nvm_read:
   11201  *
   11202  *	Read data from the serial EEPROM.
   11203  */
   11204 static int
   11205 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11206 {
   11207 	int rv;
   11208 
   11209 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11210 		device_xname(sc->sc_dev), __func__));
   11211 
   11212 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   11213 		return 1;
   11214 
   11215 	if (wm_nvm_acquire(sc))
   11216 		return 1;
   11217 
   11218 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11219 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11220 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   11221 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   11222 	else if (sc->sc_type == WM_T_PCH_SPT)
   11223 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11224 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11225 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11226 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11227 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11228 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11229 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11230 	else
   11231 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11232 
   11233 	wm_nvm_release(sc);
   11234 	return rv;
   11235 }
   11236 
   11237 /*
   11238  * Hardware semaphores.
   11239  * Very complexed...
   11240  */
   11241 
   11242 static int
   11243 wm_get_null(struct wm_softc *sc)
   11244 {
   11245 
   11246 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11247 		device_xname(sc->sc_dev), __func__));
   11248 	return 0;
   11249 }
   11250 
   11251 static void
   11252 wm_put_null(struct wm_softc *sc)
   11253 {
   11254 
   11255 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11256 		device_xname(sc->sc_dev), __func__));
   11257 	return;
   11258 }
   11259 
   11260 /*
   11261  * Get hardware semaphore.
   11262  * Same as e1000_get_hw_semaphore_generic()
   11263  */
   11264 static int
   11265 wm_get_swsm_semaphore(struct wm_softc *sc)
   11266 {
   11267 	int32_t timeout;
   11268 	uint32_t swsm;
   11269 
   11270 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11271 		device_xname(sc->sc_dev), __func__));
   11272 	KASSERT(sc->sc_nvm_wordsize > 0);
   11273 
   11274 	/* Get the SW semaphore. */
   11275 	timeout = sc->sc_nvm_wordsize + 1;
   11276 	while (timeout) {
   11277 		swsm = CSR_READ(sc, WMREG_SWSM);
   11278 
   11279 		if ((swsm & SWSM_SMBI) == 0)
   11280 			break;
   11281 
   11282 		delay(50);
   11283 		timeout--;
   11284 	}
   11285 
   11286 	if (timeout == 0) {
   11287 		aprint_error_dev(sc->sc_dev,
   11288 		    "could not acquire SWSM SMBI\n");
   11289 		return 1;
   11290 	}
   11291 
   11292 	/* Get the FW semaphore. */
   11293 	timeout = sc->sc_nvm_wordsize + 1;
   11294 	while (timeout) {
   11295 		swsm = CSR_READ(sc, WMREG_SWSM);
   11296 		swsm |= SWSM_SWESMBI;
   11297 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   11298 		/* If we managed to set the bit we got the semaphore. */
   11299 		swsm = CSR_READ(sc, WMREG_SWSM);
   11300 		if (swsm & SWSM_SWESMBI)
   11301 			break;
   11302 
   11303 		delay(50);
   11304 		timeout--;
   11305 	}
   11306 
   11307 	if (timeout == 0) {
   11308 		aprint_error_dev(sc->sc_dev,
   11309 		    "could not acquire SWSM SWESMBI\n");
   11310 		/* Release semaphores */
   11311 		wm_put_swsm_semaphore(sc);
   11312 		return 1;
   11313 	}
   11314 	return 0;
   11315 }
   11316 
   11317 /*
   11318  * Put hardware semaphore.
   11319  * Same as e1000_put_hw_semaphore_generic()
   11320  */
   11321 static void
   11322 wm_put_swsm_semaphore(struct wm_softc *sc)
   11323 {
   11324 	uint32_t swsm;
   11325 
   11326 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11327 		device_xname(sc->sc_dev), __func__));
   11328 
   11329 	swsm = CSR_READ(sc, WMREG_SWSM);
   11330 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   11331 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   11332 }
   11333 
   11334 /*
   11335  * Get SW/FW semaphore.
   11336  * Same as e1000_acquire_swfw_sync_82575().
   11337  */
   11338 static int
   11339 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11340 {
   11341 	uint32_t swfw_sync;
   11342 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   11343 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   11344 	int timeout = 200;
   11345 
   11346 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11347 		device_xname(sc->sc_dev), __func__));
   11348 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11349 
   11350 	for (timeout = 0; timeout < 200; timeout++) {
   11351 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11352 			if (wm_get_swsm_semaphore(sc)) {
   11353 				aprint_error_dev(sc->sc_dev,
   11354 				    "%s: failed to get semaphore\n",
   11355 				    __func__);
   11356 				return 1;
   11357 			}
   11358 		}
   11359 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11360 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   11361 			swfw_sync |= swmask;
   11362 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11363 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   11364 				wm_put_swsm_semaphore(sc);
   11365 			return 0;
   11366 		}
   11367 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   11368 			wm_put_swsm_semaphore(sc);
   11369 		delay(5000);
   11370 	}
   11371 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   11372 	    device_xname(sc->sc_dev), mask, swfw_sync);
   11373 	return 1;
   11374 }
   11375 
   11376 static void
   11377 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11378 {
   11379 	uint32_t swfw_sync;
   11380 
   11381 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11382 		device_xname(sc->sc_dev), __func__));
   11383 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11384 
   11385 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11386 		while (wm_get_swsm_semaphore(sc) != 0)
   11387 			continue;
   11388 	}
   11389 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11390 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   11391 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11392 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   11393 		wm_put_swsm_semaphore(sc);
   11394 }
   11395 
   11396 static int
   11397 wm_get_phy_82575(struct wm_softc *sc)
   11398 {
   11399 
   11400 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11401 		device_xname(sc->sc_dev), __func__));
   11402 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11403 }
   11404 
   11405 static void
   11406 wm_put_phy_82575(struct wm_softc *sc)
   11407 {
   11408 
   11409 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11410 		device_xname(sc->sc_dev), __func__));
   11411 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11412 }
   11413 
   11414 static int
   11415 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   11416 {
   11417 	uint32_t ext_ctrl;
   11418 	int timeout = 200;
   11419 
   11420 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11421 		device_xname(sc->sc_dev), __func__));
   11422 
   11423 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11424 	for (timeout = 0; timeout < 200; timeout++) {
   11425 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11426 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11427 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11428 
   11429 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11430 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11431 			return 0;
   11432 		delay(5000);
   11433 	}
   11434 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   11435 	    device_xname(sc->sc_dev), ext_ctrl);
   11436 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11437 	return 1;
   11438 }
   11439 
   11440 static void
   11441 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   11442 {
   11443 	uint32_t ext_ctrl;
   11444 
   11445 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11446 		device_xname(sc->sc_dev), __func__));
   11447 
   11448 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11449 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11450 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11451 
   11452 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11453 }
   11454 
   11455 static int
   11456 wm_get_swflag_ich8lan(struct wm_softc *sc)
   11457 {
   11458 	uint32_t ext_ctrl;
   11459 	int timeout;
   11460 
   11461 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11462 		device_xname(sc->sc_dev), __func__));
   11463 	mutex_enter(sc->sc_ich_phymtx);
   11464 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   11465 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11466 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   11467 			break;
   11468 		delay(1000);
   11469 	}
   11470 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   11471 		printf("%s: SW has already locked the resource\n",
   11472 		    device_xname(sc->sc_dev));
   11473 		goto out;
   11474 	}
   11475 
   11476 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11477 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11478 	for (timeout = 0; timeout < 1000; timeout++) {
   11479 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11480 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11481 			break;
   11482 		delay(1000);
   11483 	}
   11484 	if (timeout >= 1000) {
   11485 		printf("%s: failed to acquire semaphore\n",
   11486 		    device_xname(sc->sc_dev));
   11487 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11488 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11489 		goto out;
   11490 	}
   11491 	return 0;
   11492 
   11493 out:
   11494 	mutex_exit(sc->sc_ich_phymtx);
   11495 	return 1;
   11496 }
   11497 
   11498 static void
   11499 wm_put_swflag_ich8lan(struct wm_softc *sc)
   11500 {
   11501 	uint32_t ext_ctrl;
   11502 
   11503 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11504 		device_xname(sc->sc_dev), __func__));
   11505 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11506 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   11507 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11508 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11509 	} else {
   11510 		printf("%s: Semaphore unexpectedly released\n",
   11511 		    device_xname(sc->sc_dev));
   11512 	}
   11513 
   11514 	mutex_exit(sc->sc_ich_phymtx);
   11515 }
   11516 
   11517 static int
   11518 wm_get_nvm_ich8lan(struct wm_softc *sc)
   11519 {
   11520 
   11521 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11522 		device_xname(sc->sc_dev), __func__));
   11523 	mutex_enter(sc->sc_ich_nvmmtx);
   11524 
   11525 	return 0;
   11526 }
   11527 
   11528 static void
   11529 wm_put_nvm_ich8lan(struct wm_softc *sc)
   11530 {
   11531 
   11532 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11533 		device_xname(sc->sc_dev), __func__));
   11534 	mutex_exit(sc->sc_ich_nvmmtx);
   11535 }
   11536 
   11537 static int
   11538 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   11539 {
   11540 	int i = 0;
   11541 	uint32_t reg;
   11542 
   11543 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11544 		device_xname(sc->sc_dev), __func__));
   11545 
   11546 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11547 	do {
   11548 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   11549 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   11550 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11551 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   11552 			break;
   11553 		delay(2*1000);
   11554 		i++;
   11555 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   11556 
   11557 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   11558 		wm_put_hw_semaphore_82573(sc);
   11559 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   11560 		    device_xname(sc->sc_dev));
   11561 		return -1;
   11562 	}
   11563 
   11564 	return 0;
   11565 }
   11566 
   11567 static void
   11568 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   11569 {
   11570 	uint32_t reg;
   11571 
   11572 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11573 		device_xname(sc->sc_dev), __func__));
   11574 
   11575 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11576 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11577 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11578 }
   11579 
   11580 /*
   11581  * Management mode and power management related subroutines.
   11582  * BMC, AMT, suspend/resume and EEE.
   11583  */
   11584 
   11585 #ifdef WM_WOL
   11586 static int
   11587 wm_check_mng_mode(struct wm_softc *sc)
   11588 {
   11589 	int rv;
   11590 
   11591 	switch (sc->sc_type) {
   11592 	case WM_T_ICH8:
   11593 	case WM_T_ICH9:
   11594 	case WM_T_ICH10:
   11595 	case WM_T_PCH:
   11596 	case WM_T_PCH2:
   11597 	case WM_T_PCH_LPT:
   11598 	case WM_T_PCH_SPT:
   11599 		rv = wm_check_mng_mode_ich8lan(sc);
   11600 		break;
   11601 	case WM_T_82574:
   11602 	case WM_T_82583:
   11603 		rv = wm_check_mng_mode_82574(sc);
   11604 		break;
   11605 	case WM_T_82571:
   11606 	case WM_T_82572:
   11607 	case WM_T_82573:
   11608 	case WM_T_80003:
   11609 		rv = wm_check_mng_mode_generic(sc);
   11610 		break;
   11611 	default:
   11612 		/* noting to do */
   11613 		rv = 0;
   11614 		break;
   11615 	}
   11616 
   11617 	return rv;
   11618 }
   11619 
   11620 static int
   11621 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   11622 {
   11623 	uint32_t fwsm;
   11624 
   11625 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11626 
   11627 	if (((fwsm & FWSM_FW_VALID) != 0)
   11628 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11629 		return 1;
   11630 
   11631 	return 0;
   11632 }
   11633 
   11634 static int
   11635 wm_check_mng_mode_82574(struct wm_softc *sc)
   11636 {
   11637 	uint16_t data;
   11638 
   11639 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11640 
   11641 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   11642 		return 1;
   11643 
   11644 	return 0;
   11645 }
   11646 
   11647 static int
   11648 wm_check_mng_mode_generic(struct wm_softc *sc)
   11649 {
   11650 	uint32_t fwsm;
   11651 
   11652 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11653 
   11654 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   11655 		return 1;
   11656 
   11657 	return 0;
   11658 }
   11659 #endif /* WM_WOL */
   11660 
   11661 static int
   11662 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11663 {
   11664 	uint32_t manc, fwsm, factps;
   11665 
   11666 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11667 		return 0;
   11668 
   11669 	manc = CSR_READ(sc, WMREG_MANC);
   11670 
   11671 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11672 		device_xname(sc->sc_dev), manc));
   11673 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11674 		return 0;
   11675 
   11676 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11677 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11678 		factps = CSR_READ(sc, WMREG_FACTPS);
   11679 		if (((factps & FACTPS_MNGCG) == 0)
   11680 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11681 			return 1;
   11682 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11683 		uint16_t data;
   11684 
   11685 		factps = CSR_READ(sc, WMREG_FACTPS);
   11686 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11687 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11688 			device_xname(sc->sc_dev), factps, data));
   11689 		if (((factps & FACTPS_MNGCG) == 0)
   11690 		    && ((data & NVM_CFG2_MNGM_MASK)
   11691 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11692 			return 1;
   11693 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11694 	    && ((manc & MANC_ASF_EN) == 0))
   11695 		return 1;
   11696 
   11697 	return 0;
   11698 }
   11699 
   11700 static bool
   11701 wm_phy_resetisblocked(struct wm_softc *sc)
   11702 {
   11703 	bool blocked = false;
   11704 	uint32_t reg;
   11705 	int i = 0;
   11706 
   11707 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11708 		device_xname(sc->sc_dev), __func__));
   11709 
   11710 	switch (sc->sc_type) {
   11711 	case WM_T_ICH8:
   11712 	case WM_T_ICH9:
   11713 	case WM_T_ICH10:
   11714 	case WM_T_PCH:
   11715 	case WM_T_PCH2:
   11716 	case WM_T_PCH_LPT:
   11717 	case WM_T_PCH_SPT:
   11718 		do {
   11719 			reg = CSR_READ(sc, WMREG_FWSM);
   11720 			if ((reg & FWSM_RSPCIPHY) == 0) {
   11721 				blocked = true;
   11722 				delay(10*1000);
   11723 				continue;
   11724 			}
   11725 			blocked = false;
   11726 		} while (blocked && (i++ < 30));
   11727 		return blocked;
   11728 		break;
   11729 	case WM_T_82571:
   11730 	case WM_T_82572:
   11731 	case WM_T_82573:
   11732 	case WM_T_82574:
   11733 	case WM_T_82583:
   11734 	case WM_T_80003:
   11735 		reg = CSR_READ(sc, WMREG_MANC);
   11736 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   11737 			return true;
   11738 		else
   11739 			return false;
   11740 		break;
   11741 	default:
   11742 		/* no problem */
   11743 		break;
   11744 	}
   11745 
   11746 	return false;
   11747 }
   11748 
   11749 static void
   11750 wm_get_hw_control(struct wm_softc *sc)
   11751 {
   11752 	uint32_t reg;
   11753 
   11754 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11755 		device_xname(sc->sc_dev), __func__));
   11756 
   11757 	switch (sc->sc_type) {
   11758 	case WM_T_82573:
   11759 		reg = CSR_READ(sc, WMREG_SWSM);
   11760 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   11761 		break;
   11762 	case WM_T_82571:
   11763 	case WM_T_82572:
   11764 	case WM_T_82574:
   11765 	case WM_T_82583:
   11766 	case WM_T_80003:
   11767 	case WM_T_ICH8:
   11768 	case WM_T_ICH9:
   11769 	case WM_T_ICH10:
   11770 	case WM_T_PCH:
   11771 	case WM_T_PCH2:
   11772 	case WM_T_PCH_LPT:
   11773 	case WM_T_PCH_SPT:
   11774 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11775 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   11776 		break;
   11777 	default:
   11778 		break;
   11779 	}
   11780 }
   11781 
   11782 static void
   11783 wm_release_hw_control(struct wm_softc *sc)
   11784 {
   11785 	uint32_t reg;
   11786 
   11787 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11788 		device_xname(sc->sc_dev), __func__));
   11789 
   11790 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   11791 		return;
   11792 
   11793 	if (sc->sc_type == WM_T_82573) {
   11794 		reg = CSR_READ(sc, WMREG_SWSM);
   11795 		reg &= ~SWSM_DRV_LOAD;
   11796 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   11797 	} else {
   11798 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11799 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   11800 	}
   11801 }
   11802 
   11803 static void
   11804 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   11805 {
   11806 	uint32_t reg;
   11807 
   11808 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11809 		device_xname(sc->sc_dev), __func__));
   11810 
   11811 	if (sc->sc_type < WM_T_PCH2)
   11812 		return;
   11813 
   11814 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11815 
   11816 	if (gate)
   11817 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   11818 	else
   11819 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   11820 
   11821 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11822 }
   11823 
   11824 static void
   11825 wm_smbustopci(struct wm_softc *sc)
   11826 {
   11827 	uint32_t fwsm, reg;
   11828 
   11829 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11830 		device_xname(sc->sc_dev), __func__));
   11831 
   11832 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   11833 	wm_gate_hw_phy_config_ich8lan(sc, true);
   11834 
   11835 	/* Acquire PHY semaphore */
   11836 	sc->phy.acquire(sc);
   11837 
   11838 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11839 	if (((fwsm & FWSM_FW_VALID) == 0)
   11840 	    && ((wm_phy_resetisblocked(sc) == false))) {
   11841 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11842 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11843 			reg |= CTRL_EXT_FORCE_SMBUS;
   11844 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11845 			CSR_WRITE_FLUSH(sc);
   11846 			delay(50*1000);
   11847 		}
   11848 
   11849 		/* Toggle LANPHYPC */
   11850 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   11851 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   11852 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11853 		CSR_WRITE_FLUSH(sc);
   11854 		delay(1000);
   11855 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   11856 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11857 		CSR_WRITE_FLUSH(sc);
   11858 		delay(50*1000);
   11859 
   11860 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11861 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11862 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   11863 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11864 		}
   11865 	}
   11866 
   11867 	/* Release semaphore */
   11868 	sc->phy.release(sc);
   11869 
   11870 	/*
   11871 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   11872 	 */
   11873 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0))
   11874 		wm_gate_hw_phy_config_ich8lan(sc, false);
   11875 }
   11876 
   11877 static void
   11878 wm_init_manageability(struct wm_softc *sc)
   11879 {
   11880 
   11881 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11882 		device_xname(sc->sc_dev), __func__));
   11883 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11884 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   11885 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11886 
   11887 		/* Disable hardware interception of ARP */
   11888 		manc &= ~MANC_ARP_EN;
   11889 
   11890 		/* Enable receiving management packets to the host */
   11891 		if (sc->sc_type >= WM_T_82571) {
   11892 			manc |= MANC_EN_MNG2HOST;
   11893 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   11894 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   11895 		}
   11896 
   11897 		CSR_WRITE(sc, WMREG_MANC, manc);
   11898 	}
   11899 }
   11900 
   11901 static void
   11902 wm_release_manageability(struct wm_softc *sc)
   11903 {
   11904 
   11905 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11906 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11907 
   11908 		manc |= MANC_ARP_EN;
   11909 		if (sc->sc_type >= WM_T_82571)
   11910 			manc &= ~MANC_EN_MNG2HOST;
   11911 
   11912 		CSR_WRITE(sc, WMREG_MANC, manc);
   11913 	}
   11914 }
   11915 
   11916 static void
   11917 wm_get_wakeup(struct wm_softc *sc)
   11918 {
   11919 
   11920 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   11921 	switch (sc->sc_type) {
   11922 	case WM_T_82573:
   11923 	case WM_T_82583:
   11924 		sc->sc_flags |= WM_F_HAS_AMT;
   11925 		/* FALLTHROUGH */
   11926 	case WM_T_80003:
   11927 	case WM_T_82541:
   11928 	case WM_T_82547:
   11929 	case WM_T_82571:
   11930 	case WM_T_82572:
   11931 	case WM_T_82574:
   11932 	case WM_T_82575:
   11933 	case WM_T_82576:
   11934 	case WM_T_82580:
   11935 	case WM_T_I350:
   11936 	case WM_T_I354:
   11937 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   11938 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   11939 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11940 		break;
   11941 	case WM_T_ICH8:
   11942 	case WM_T_ICH9:
   11943 	case WM_T_ICH10:
   11944 	case WM_T_PCH:
   11945 	case WM_T_PCH2:
   11946 	case WM_T_PCH_LPT:
   11947 	case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
   11948 		sc->sc_flags |= WM_F_HAS_AMT;
   11949 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11950 		break;
   11951 	default:
   11952 		break;
   11953 	}
   11954 
   11955 	/* 1: HAS_MANAGE */
   11956 	if (wm_enable_mng_pass_thru(sc) != 0)
   11957 		sc->sc_flags |= WM_F_HAS_MANAGE;
   11958 
   11959 #ifdef WM_DEBUG
   11960 	printf("\n");
   11961 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   11962 		printf("HAS_AMT,");
   11963 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   11964 		printf("ARC_SUBSYS_VALID,");
   11965 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   11966 		printf("ASF_FIRMWARE_PRES,");
   11967 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   11968 		printf("HAS_MANAGE,");
   11969 	printf("\n");
   11970 #endif
   11971 	/*
   11972 	 * Note that the WOL flags is set after the resetting of the eeprom
   11973 	 * stuff
   11974 	 */
   11975 }
   11976 
   11977 #ifdef WM_WOL
   11978 /* WOL in the newer chipset interfaces (pchlan) */
   11979 static void
   11980 wm_enable_phy_wakeup(struct wm_softc *sc)
   11981 {
   11982 #if 0
   11983 	uint16_t preg;
   11984 
   11985 	/* Copy MAC RARs to PHY RARs */
   11986 
   11987 	/* Copy MAC MTA to PHY MTA */
   11988 
   11989 	/* Configure PHY Rx Control register */
   11990 
   11991 	/* Enable PHY wakeup in MAC register */
   11992 
   11993 	/* Configure and enable PHY wakeup in PHY registers */
   11994 
   11995 	/* Activate PHY wakeup */
   11996 
   11997 	/* XXX */
   11998 #endif
   11999 }
   12000 
   12001 /* Power down workaround on D3 */
   12002 static void
   12003 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   12004 {
   12005 	uint32_t reg;
   12006 	int i;
   12007 
   12008 	for (i = 0; i < 2; i++) {
   12009 		/* Disable link */
   12010 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12011 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12012 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12013 
   12014 		/*
   12015 		 * Call gig speed drop workaround on Gig disable before
   12016 		 * accessing any PHY registers
   12017 		 */
   12018 		if (sc->sc_type == WM_T_ICH8)
   12019 			wm_gig_downshift_workaround_ich8lan(sc);
   12020 
   12021 		/* Write VR power-down enable */
   12022 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12023 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12024 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   12025 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   12026 
   12027 		/* Read it back and test */
   12028 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12029 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12030 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   12031 			break;
   12032 
   12033 		/* Issue PHY reset and repeat at most one more time */
   12034 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   12035 	}
   12036 }
   12037 
   12038 static void
   12039 wm_enable_wakeup(struct wm_softc *sc)
   12040 {
   12041 	uint32_t reg, pmreg;
   12042 	pcireg_t pmode;
   12043 
   12044 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12045 		device_xname(sc->sc_dev), __func__));
   12046 
   12047 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12048 		&pmreg, NULL) == 0)
   12049 		return;
   12050 
   12051 	/* Advertise the wakeup capability */
   12052 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   12053 	    | CTRL_SWDPIN(3));
   12054 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   12055 
   12056 	/* ICH workaround */
   12057 	switch (sc->sc_type) {
   12058 	case WM_T_ICH8:
   12059 	case WM_T_ICH9:
   12060 	case WM_T_ICH10:
   12061 	case WM_T_PCH:
   12062 	case WM_T_PCH2:
   12063 	case WM_T_PCH_LPT:
   12064 	case WM_T_PCH_SPT:
   12065 		/* Disable gig during WOL */
   12066 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12067 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   12068 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12069 		if (sc->sc_type == WM_T_PCH)
   12070 			wm_gmii_reset(sc);
   12071 
   12072 		/* Power down workaround */
   12073 		if (sc->sc_phytype == WMPHY_82577) {
   12074 			struct mii_softc *child;
   12075 
   12076 			/* Assume that the PHY is copper */
   12077 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12078 			if (child->mii_mpd_rev <= 2)
   12079 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   12080 				    (768 << 5) | 25, 0x0444); /* magic num */
   12081 		}
   12082 		break;
   12083 	default:
   12084 		break;
   12085 	}
   12086 
   12087 	/* Keep the laser running on fiber adapters */
   12088 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   12089 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12090 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12091 		reg |= CTRL_EXT_SWDPIN(3);
   12092 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12093 	}
   12094 
   12095 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   12096 #if 0	/* for the multicast packet */
   12097 	reg |= WUFC_MC;
   12098 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   12099 #endif
   12100 
   12101 	if (sc->sc_type == WM_T_PCH) {
   12102 		wm_enable_phy_wakeup(sc);
   12103 	} else {
   12104 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   12105 		CSR_WRITE(sc, WMREG_WUFC, reg);
   12106 	}
   12107 
   12108 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12109 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12110 		|| (sc->sc_type == WM_T_PCH2))
   12111 		    && (sc->sc_phytype == WMPHY_IGP_3))
   12112 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   12113 
   12114 	/* Request PME */
   12115 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   12116 #if 0
   12117 	/* Disable WOL */
   12118 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   12119 #else
   12120 	/* For WOL */
   12121 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   12122 #endif
   12123 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   12124 }
   12125 #endif /* WM_WOL */
   12126 
   12127 /* LPLU */
   12128 
   12129 static void
   12130 wm_lplu_d0_disable(struct wm_softc *sc)
   12131 {
   12132 	uint32_t reg;
   12133 
   12134 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12135 		device_xname(sc->sc_dev), __func__));
   12136 
   12137 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12138 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   12139 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12140 }
   12141 
   12142 static void
   12143 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   12144 {
   12145 	uint32_t reg;
   12146 
   12147 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12148 		device_xname(sc->sc_dev), __func__));
   12149 
   12150 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   12151 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   12152 	reg |= HV_OEM_BITS_ANEGNOW;
   12153 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   12154 }
   12155 
   12156 /* EEE */
   12157 
   12158 static void
   12159 wm_set_eee_i350(struct wm_softc *sc)
   12160 {
   12161 	uint32_t ipcnfg, eeer;
   12162 
   12163 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   12164 	eeer = CSR_READ(sc, WMREG_EEER);
   12165 
   12166 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   12167 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12168 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12169 		    | EEER_LPI_FC);
   12170 	} else {
   12171 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12172 		ipcnfg &= ~IPCNFG_10BASE_TE;
   12173 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12174 		    | EEER_LPI_FC);
   12175 	}
   12176 
   12177 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   12178 	CSR_WRITE(sc, WMREG_EEER, eeer);
   12179 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   12180 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   12181 }
   12182 
   12183 /*
   12184  * Workarounds (mainly PHY related).
   12185  * Basically, PHY's workarounds are in the PHY drivers.
   12186  */
   12187 
   12188 /* Work-around for 82566 Kumeran PCS lock loss */
   12189 static void
   12190 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   12191 {
   12192 #if 0
   12193 	int miistatus, active, i;
   12194 	int reg;
   12195 
   12196 	miistatus = sc->sc_mii.mii_media_status;
   12197 
   12198 	/* If the link is not up, do nothing */
   12199 	if ((miistatus & IFM_ACTIVE) == 0)
   12200 		return;
   12201 
   12202 	active = sc->sc_mii.mii_media_active;
   12203 
   12204 	/* Nothing to do if the link is other than 1Gbps */
   12205 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   12206 		return;
   12207 
   12208 	for (i = 0; i < 10; i++) {
   12209 		/* read twice */
   12210 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12211 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12212 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   12213 			goto out;	/* GOOD! */
   12214 
   12215 		/* Reset the PHY */
   12216 		wm_gmii_reset(sc);
   12217 		delay(5*1000);
   12218 	}
   12219 
   12220 	/* Disable GigE link negotiation */
   12221 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12222 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12223 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12224 
   12225 	/*
   12226 	 * Call gig speed drop workaround on Gig disable before accessing
   12227 	 * any PHY registers.
   12228 	 */
   12229 	wm_gig_downshift_workaround_ich8lan(sc);
   12230 
   12231 out:
   12232 	return;
   12233 #endif
   12234 }
   12235 
   12236 /* WOL from S5 stops working */
   12237 static void
   12238 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   12239 {
   12240 	uint16_t kmrn_reg;
   12241 
   12242 	/* Only for igp3 */
   12243 	if (sc->sc_phytype == WMPHY_IGP_3) {
   12244 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   12245 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   12246 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12247 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   12248 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12249 	}
   12250 }
   12251 
   12252 /*
   12253  * Workaround for pch's PHYs
   12254  * XXX should be moved to new PHY driver?
   12255  */
   12256 static void
   12257 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   12258 {
   12259 
   12260 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12261 		device_xname(sc->sc_dev), __func__));
   12262 	KASSERT(sc->sc_type == WM_T_PCH);
   12263 
   12264 	if (sc->sc_phytype == WMPHY_82577)
   12265 		wm_set_mdio_slow_mode_hv(sc);
   12266 
   12267 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   12268 
   12269 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   12270 
   12271 	/* 82578 */
   12272 	if (sc->sc_phytype == WMPHY_82578) {
   12273 		struct mii_softc *child;
   12274 
   12275 		/*
   12276 		 * Return registers to default by doing a soft reset then
   12277 		 * writing 0x3140 to the control register
   12278 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   12279 		 */
   12280 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12281 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   12282 			PHY_RESET(child);
   12283 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   12284 			    0x3140);
   12285 		}
   12286 	}
   12287 
   12288 	/* Select page 0 */
   12289 	sc->phy.acquire(sc);
   12290 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   12291 	sc->phy.release(sc);
   12292 
   12293 	/*
   12294 	 * Configure the K1 Si workaround during phy reset assuming there is
   12295 	 * link so that it disables K1 if link is in 1Gbps.
   12296 	 */
   12297 	wm_k1_gig_workaround_hv(sc, 1);
   12298 }
   12299 
   12300 static void
   12301 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   12302 {
   12303 
   12304 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12305 		device_xname(sc->sc_dev), __func__));
   12306 	KASSERT(sc->sc_type == WM_T_PCH2);
   12307 
   12308 	wm_set_mdio_slow_mode_hv(sc);
   12309 }
   12310 
   12311 static int
   12312 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   12313 {
   12314 	int k1_enable = sc->sc_nvm_k1_enabled;
   12315 
   12316 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12317 		device_xname(sc->sc_dev), __func__));
   12318 
   12319 	if (sc->phy.acquire(sc) != 0)
   12320 		return -1;
   12321 
   12322 	if (link) {
   12323 		k1_enable = 0;
   12324 
   12325 		/* Link stall fix for link up */
   12326 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   12327 	} else {
   12328 		/* Link stall fix for link down */
   12329 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   12330 	}
   12331 
   12332 	wm_configure_k1_ich8lan(sc, k1_enable);
   12333 	sc->phy.release(sc);
   12334 
   12335 	return 0;
   12336 }
   12337 
   12338 static void
   12339 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   12340 {
   12341 	uint32_t reg;
   12342 
   12343 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   12344 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   12345 	    reg | HV_KMRN_MDIO_SLOW);
   12346 }
   12347 
   12348 static void
   12349 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   12350 {
   12351 	uint32_t ctrl, ctrl_ext, tmp;
   12352 	uint16_t kmrn_reg;
   12353 
   12354 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   12355 
   12356 	if (k1_enable)
   12357 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   12358 	else
   12359 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   12360 
   12361 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   12362 
   12363 	delay(20);
   12364 
   12365 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12366 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12367 
   12368 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   12369 	tmp |= CTRL_FRCSPD;
   12370 
   12371 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   12372 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   12373 	CSR_WRITE_FLUSH(sc);
   12374 	delay(20);
   12375 
   12376 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   12377 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12378 	CSR_WRITE_FLUSH(sc);
   12379 	delay(20);
   12380 }
   12381 
   12382 /* special case - for 82575 - need to do manual init ... */
   12383 static void
   12384 wm_reset_init_script_82575(struct wm_softc *sc)
   12385 {
   12386 	/*
   12387 	 * remark: this is untested code - we have no board without EEPROM
   12388 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   12389 	 */
   12390 
   12391 	/* SerDes configuration via SERDESCTRL */
   12392 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   12393 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   12394 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   12395 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   12396 
   12397 	/* CCM configuration via CCMCTL register */
   12398 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   12399 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   12400 
   12401 	/* PCIe lanes configuration */
   12402 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   12403 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   12404 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   12405 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   12406 
   12407 	/* PCIe PLL Configuration */
   12408 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   12409 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   12410 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   12411 }
   12412 
   12413 static void
   12414 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   12415 {
   12416 	uint32_t reg;
   12417 	uint16_t nvmword;
   12418 	int rv;
   12419 
   12420 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   12421 		return;
   12422 
   12423 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   12424 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   12425 	if (rv != 0) {
   12426 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   12427 		    __func__);
   12428 		return;
   12429 	}
   12430 
   12431 	reg = CSR_READ(sc, WMREG_MDICNFG);
   12432 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   12433 		reg |= MDICNFG_DEST;
   12434 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   12435 		reg |= MDICNFG_COM_MDIO;
   12436 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12437 }
   12438 
   12439 /*
   12440  * I210 Errata 25 and I211 Errata 10
   12441  * Slow System Clock.
   12442  */
   12443 static void
   12444 wm_pll_workaround_i210(struct wm_softc *sc)
   12445 {
   12446 	uint32_t mdicnfg, wuc;
   12447 	uint32_t reg;
   12448 	pcireg_t pcireg;
   12449 	uint32_t pmreg;
   12450 	uint16_t nvmword, tmp_nvmword;
   12451 	int phyval;
   12452 	bool wa_done = false;
   12453 	int i;
   12454 
   12455 	/* Save WUC and MDICNFG registers */
   12456 	wuc = CSR_READ(sc, WMREG_WUC);
   12457 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   12458 
   12459 	reg = mdicnfg & ~MDICNFG_DEST;
   12460 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12461 
   12462 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   12463 		nvmword = INVM_DEFAULT_AL;
   12464 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   12465 
   12466 	/* Get Power Management cap offset */
   12467 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12468 		&pmreg, NULL) == 0)
   12469 		return;
   12470 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   12471 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   12472 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   12473 
   12474 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   12475 			break; /* OK */
   12476 		}
   12477 
   12478 		wa_done = true;
   12479 		/* Directly reset the internal PHY */
   12480 		reg = CSR_READ(sc, WMREG_CTRL);
   12481 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   12482 
   12483 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12484 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   12485 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12486 
   12487 		CSR_WRITE(sc, WMREG_WUC, 0);
   12488 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   12489 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12490 
   12491 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   12492 		    pmreg + PCI_PMCSR);
   12493 		pcireg |= PCI_PMCSR_STATE_D3;
   12494 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12495 		    pmreg + PCI_PMCSR, pcireg);
   12496 		delay(1000);
   12497 		pcireg &= ~PCI_PMCSR_STATE_D3;
   12498 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12499 		    pmreg + PCI_PMCSR, pcireg);
   12500 
   12501 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   12502 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12503 
   12504 		/* Restore WUC register */
   12505 		CSR_WRITE(sc, WMREG_WUC, wuc);
   12506 	}
   12507 
   12508 	/* Restore MDICNFG setting */
   12509 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   12510 	if (wa_done)
   12511 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   12512 }
   12513