Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.424
      1 /*	$NetBSD: if_wm.c,v 1.424 2016/10/21 04:41:09 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Advanced Receive Descriptor
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.424 2016/10/21 04:41:09 msaitoh Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #endif
    165 
    166 /*
    167  * This device driver's max interrupt numbers.
    168  */
    169 #define WM_MAX_NQUEUEINTR	16
    170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    171 
    172 /*
    173  * Transmit descriptor list size.  Due to errata, we can only have
    174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    175  * on >= 82544.  We tell the upper layers that they can queue a lot
    176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    177  * of them at a time.
    178  *
    179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    180  * chains containing many small mbufs have been observed in zero-copy
    181  * situations with jumbo frames.
    182  */
    183 #define	WM_NTXSEGS		256
    184 #define	WM_IFQUEUELEN		256
    185 #define	WM_TXQUEUELEN_MAX	64
    186 #define	WM_TXQUEUELEN_MAX_82547	16
    187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    190 #define	WM_NTXDESC_82542	256
    191 #define	WM_NTXDESC_82544	4096
    192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    197 
    198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    199 
    200 #define	WM_TXINTERQSIZE		256
    201 
    202 /*
    203  * Receive descriptor list size.  We have one Rx buffer for normal
    204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    205  * packet.  We allocate 256 receive descriptors, each with a 2k
    206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    207  */
    208 #define	WM_NRXDESC		256
    209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    212 
    213 typedef union txdescs {
    214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    216 } txdescs_t;
    217 
    218 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    219 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    220 
    221 /*
    222  * Software state for transmit jobs.
    223  */
    224 struct wm_txsoft {
    225 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    226 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    227 	int txs_firstdesc;		/* first descriptor in packet */
    228 	int txs_lastdesc;		/* last descriptor in packet */
    229 	int txs_ndesc;			/* # of descriptors used */
    230 };
    231 
    232 /*
    233  * Software state for receive buffers.  Each descriptor gets a
    234  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    235  * more than one buffer, we chain them together.
    236  */
    237 struct wm_rxsoft {
    238 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    239 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    240 };
    241 
    242 #define WM_LINKUP_TIMEOUT	50
    243 
    244 static uint16_t swfwphysem[] = {
    245 	SWFW_PHY0_SM,
    246 	SWFW_PHY1_SM,
    247 	SWFW_PHY2_SM,
    248 	SWFW_PHY3_SM
    249 };
    250 
    251 static const uint32_t wm_82580_rxpbs_table[] = {
    252 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    253 };
    254 
    255 struct wm_softc;
    256 
    257 #ifdef WM_EVENT_COUNTERS
    258 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    259 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    260 	struct evcnt qname##_ev_##evname;
    261 
    262 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    263 	do{								\
    264 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    265 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    266 		    "%s%02d%s", #qname, (qnum), #evname);		\
    267 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    268 		    (evtype), NULL, (xname),				\
    269 		    (q)->qname##_##evname##_evcnt_name);		\
    270 	}while(0)
    271 
    272 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    273 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    274 
    275 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    276 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    277 #endif /* WM_EVENT_COUNTERS */
    278 
    279 struct wm_txqueue {
    280 	kmutex_t *txq_lock;		/* lock for tx operations */
    281 
    282 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    283 
    284 	/* Software state for the transmit descriptors. */
    285 	int txq_num;			/* must be a power of two */
    286 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    287 
    288 	/* TX control data structures. */
    289 	int txq_ndesc;			/* must be a power of two */
    290 	size_t txq_descsize;		/* a tx descriptor size */
    291 	txdescs_t *txq_descs_u;
    292         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    293 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    294 	int txq_desc_rseg;		/* real number of control segment */
    295 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    296 #define	txq_descs	txq_descs_u->sctxu_txdescs
    297 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    298 
    299 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    300 
    301 	int txq_free;			/* number of free Tx descriptors */
    302 	int txq_next;			/* next ready Tx descriptor */
    303 
    304 	int txq_sfree;			/* number of free Tx jobs */
    305 	int txq_snext;			/* next free Tx job */
    306 	int txq_sdirty;			/* dirty Tx jobs */
    307 
    308 	/* These 4 variables are used only on the 82547. */
    309 	int txq_fifo_size;		/* Tx FIFO size */
    310 	int txq_fifo_head;		/* current head of FIFO */
    311 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    312 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    313 
    314 	/*
    315 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    316 	 * CPUs. This queue intermediate them without block.
    317 	 */
    318 	pcq_t *txq_interq;
    319 
    320 	/*
    321 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    322 	 * to manage Tx H/W queue's busy flag.
    323 	 */
    324 	int txq_flags;			/* flags for H/W queue, see below */
    325 #define	WM_TXQ_NO_SPACE	0x1
    326 
    327 #ifdef WM_EVENT_COUNTERS
    328 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    329 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    330 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    331 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    332 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    333 						/* XXX not used? */
    334 
    335 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    336 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    337 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    338 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    339 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    340 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    341 
    342 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    343 
    344 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    345 
    346 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    347 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    348 #endif /* WM_EVENT_COUNTERS */
    349 };
    350 
    351 struct wm_rxqueue {
    352 	kmutex_t *rxq_lock;		/* lock for rx operations */
    353 
    354 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    355 
    356 	/* Software state for the receive descriptors. */
    357 	wiseman_rxdesc_t *rxq_descs;
    358 
    359 	/* RX control data structures. */
    360 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    361 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    362 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    363 	int rxq_desc_rseg;		/* real number of control segment */
    364 	size_t rxq_desc_size;		/* control data size */
    365 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    366 
    367 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    368 
    369 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    370 	int rxq_discard;
    371 	int rxq_len;
    372 	struct mbuf *rxq_head;
    373 	struct mbuf *rxq_tail;
    374 	struct mbuf **rxq_tailp;
    375 
    376 #ifdef WM_EVENT_COUNTERS
    377 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    378 
    379 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    380 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    381 #endif
    382 };
    383 
    384 struct wm_queue {
    385 	int wmq_id;			/* index of transmit and receive queues */
    386 	int wmq_intr_idx;		/* index of MSI-X tables */
    387 
    388 	struct wm_txqueue wmq_txq;
    389 	struct wm_rxqueue wmq_rxq;
    390 };
    391 
    392 struct wm_phyop {
    393 	int (*acquire)(struct wm_softc *);
    394 	void (*release)(struct wm_softc *);
    395 };
    396 
    397 /*
    398  * Software state per device.
    399  */
    400 struct wm_softc {
    401 	device_t sc_dev;		/* generic device information */
    402 	bus_space_tag_t sc_st;		/* bus space tag */
    403 	bus_space_handle_t sc_sh;	/* bus space handle */
    404 	bus_size_t sc_ss;		/* bus space size */
    405 	bus_space_tag_t sc_iot;		/* I/O space tag */
    406 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    407 	bus_size_t sc_ios;		/* I/O space size */
    408 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    409 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    410 	bus_size_t sc_flashs;		/* flash registers space size */
    411 	off_t sc_flashreg_offset;	/*
    412 					 * offset to flash registers from
    413 					 * start of BAR
    414 					 */
    415 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    416 
    417 	struct ethercom sc_ethercom;	/* ethernet common data */
    418 	struct mii_data sc_mii;		/* MII/media information */
    419 
    420 	pci_chipset_tag_t sc_pc;
    421 	pcitag_t sc_pcitag;
    422 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    423 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    424 
    425 	uint16_t sc_pcidevid;		/* PCI device ID */
    426 	wm_chip_type sc_type;		/* MAC type */
    427 	int sc_rev;			/* MAC revision */
    428 	wm_phy_type sc_phytype;		/* PHY type */
    429 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    430 #define	WM_MEDIATYPE_UNKNOWN		0x00
    431 #define	WM_MEDIATYPE_FIBER		0x01
    432 #define	WM_MEDIATYPE_COPPER		0x02
    433 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    434 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    435 	int sc_flags;			/* flags; see below */
    436 	int sc_if_flags;		/* last if_flags */
    437 	int sc_flowflags;		/* 802.3x flow control flags */
    438 	int sc_align_tweak;
    439 
    440 	void *sc_ihs[WM_MAX_NINTR];	/*
    441 					 * interrupt cookie.
    442 					 * legacy and msi use sc_ihs[0].
    443 					 */
    444 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    445 	int sc_nintrs;			/* number of interrupts */
    446 
    447 	int sc_link_intr_idx;		/* index of MSI-X tables */
    448 
    449 	callout_t sc_tick_ch;		/* tick callout */
    450 	bool sc_stopping;
    451 
    452 	int sc_nvm_ver_major;
    453 	int sc_nvm_ver_minor;
    454 	int sc_nvm_ver_build;
    455 	int sc_nvm_addrbits;		/* NVM address bits */
    456 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    457 	int sc_ich8_flash_base;
    458 	int sc_ich8_flash_bank_size;
    459 	int sc_nvm_k1_enabled;
    460 
    461 	int sc_nqueues;
    462 	struct wm_queue *sc_queue;
    463 
    464 	int sc_affinity_offset;
    465 
    466 #ifdef WM_EVENT_COUNTERS
    467 	/* Event counters. */
    468 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    469 
    470         /* WM_T_82542_2_1 only */
    471 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    472 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    473 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    474 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    475 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    476 #endif /* WM_EVENT_COUNTERS */
    477 
    478 	/* This variable are used only on the 82547. */
    479 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    480 
    481 	uint32_t sc_ctrl;		/* prototype CTRL register */
    482 #if 0
    483 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    484 #endif
    485 	uint32_t sc_icr;		/* prototype interrupt bits */
    486 	uint32_t sc_itr;		/* prototype intr throttling reg */
    487 	uint32_t sc_tctl;		/* prototype TCTL register */
    488 	uint32_t sc_rctl;		/* prototype RCTL register */
    489 	uint32_t sc_txcw;		/* prototype TXCW register */
    490 	uint32_t sc_tipg;		/* prototype TIPG register */
    491 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    492 	uint32_t sc_pba;		/* prototype PBA register */
    493 
    494 	int sc_tbi_linkup;		/* TBI link status */
    495 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    496 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    497 
    498 	int sc_mchash_type;		/* multicast filter offset */
    499 
    500 	krndsource_t rnd_source;	/* random source */
    501 
    502 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    503 
    504 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    505 	kmutex_t *sc_ich_phymtx;	/*
    506 					 * 82574/82583/ICH/PCH specific PHY
    507 					 * mutex. For 82574/82583, the mutex
    508 					 * is used for both PHY and NVM.
    509 					 */
    510 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    511 
    512 	struct wm_phyop phy;
    513 };
    514 
    515 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    516 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    517 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    518 
    519 #ifdef WM_MPSAFE
    520 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    521 #else
    522 #define CALLOUT_FLAGS	0
    523 #endif
    524 
    525 #define	WM_RXCHAIN_RESET(rxq)						\
    526 do {									\
    527 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    528 	*(rxq)->rxq_tailp = NULL;					\
    529 	(rxq)->rxq_len = 0;						\
    530 } while (/*CONSTCOND*/0)
    531 
    532 #define	WM_RXCHAIN_LINK(rxq, m)						\
    533 do {									\
    534 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    535 	(rxq)->rxq_tailp = &(m)->m_next;				\
    536 } while (/*CONSTCOND*/0)
    537 
    538 #ifdef WM_EVENT_COUNTERS
    539 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    540 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    541 
    542 #define WM_Q_EVCNT_INCR(qname, evname)			\
    543 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    544 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    545 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    546 #else /* !WM_EVENT_COUNTERS */
    547 #define	WM_EVCNT_INCR(ev)	/* nothing */
    548 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    549 
    550 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    551 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    552 #endif /* !WM_EVENT_COUNTERS */
    553 
    554 #define	CSR_READ(sc, reg)						\
    555 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    556 #define	CSR_WRITE(sc, reg, val)						\
    557 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    558 #define	CSR_WRITE_FLUSH(sc)						\
    559 	(void) CSR_READ((sc), WMREG_STATUS)
    560 
    561 #define ICH8_FLASH_READ32(sc, reg)					\
    562 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    563 	    (reg) + sc->sc_flashreg_offset)
    564 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    565 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    566 	    (reg) + sc->sc_flashreg_offset, (data))
    567 
    568 #define ICH8_FLASH_READ16(sc, reg)					\
    569 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    570 	    (reg) + sc->sc_flashreg_offset)
    571 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    572 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    573 	    (reg) + sc->sc_flashreg_offset, (data))
    574 
    575 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    576 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    577 
    578 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    579 #define	WM_CDTXADDR_HI(txq, x)						\
    580 	(sizeof(bus_addr_t) == 8 ?					\
    581 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    582 
    583 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    584 #define	WM_CDRXADDR_HI(rxq, x)						\
    585 	(sizeof(bus_addr_t) == 8 ?					\
    586 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    587 
    588 /*
    589  * Register read/write functions.
    590  * Other than CSR_{READ|WRITE}().
    591  */
    592 #if 0
    593 static inline uint32_t wm_io_read(struct wm_softc *, int);
    594 #endif
    595 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    596 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    597 	uint32_t, uint32_t);
    598 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    599 
    600 /*
    601  * Descriptor sync/init functions.
    602  */
    603 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    604 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    605 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    606 
    607 /*
    608  * Device driver interface functions and commonly used functions.
    609  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    610  */
    611 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    612 static int	wm_match(device_t, cfdata_t, void *);
    613 static void	wm_attach(device_t, device_t, void *);
    614 static int	wm_detach(device_t, int);
    615 static bool	wm_suspend(device_t, const pmf_qual_t *);
    616 static bool	wm_resume(device_t, const pmf_qual_t *);
    617 static void	wm_watchdog(struct ifnet *);
    618 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    619 static void	wm_tick(void *);
    620 static int	wm_ifflags_cb(struct ethercom *);
    621 static int	wm_ioctl(struct ifnet *, u_long, void *);
    622 /* MAC address related */
    623 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    624 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    625 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    626 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    627 static void	wm_set_filter(struct wm_softc *);
    628 /* Reset and init related */
    629 static void	wm_set_vlan(struct wm_softc *);
    630 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    631 static void	wm_get_auto_rd_done(struct wm_softc *);
    632 static void	wm_lan_init_done(struct wm_softc *);
    633 static void	wm_get_cfg_done(struct wm_softc *);
    634 static void	wm_initialize_hardware_bits(struct wm_softc *);
    635 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    636 static void	wm_reset(struct wm_softc *);
    637 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    638 static void	wm_rxdrain(struct wm_rxqueue *);
    639 static void	wm_rss_getkey(uint8_t *);
    640 static void	wm_init_rss(struct wm_softc *);
    641 static void	wm_adjust_qnum(struct wm_softc *, int);
    642 static int	wm_setup_legacy(struct wm_softc *);
    643 static int	wm_setup_msix(struct wm_softc *);
    644 static int	wm_init(struct ifnet *);
    645 static int	wm_init_locked(struct ifnet *);
    646 static void	wm_stop(struct ifnet *, int);
    647 static void	wm_stop_locked(struct ifnet *, int);
    648 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    649 static void	wm_82547_txfifo_stall(void *);
    650 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    651 /* DMA related */
    652 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    653 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    654 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    655 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    656     struct wm_txqueue *);
    657 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    658 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    659 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    660     struct wm_rxqueue *);
    661 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    662 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    663 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    664 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    665 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    666 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    667 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    668     struct wm_txqueue *);
    669 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    670     struct wm_rxqueue *);
    671 static int	wm_alloc_txrx_queues(struct wm_softc *);
    672 static void	wm_free_txrx_queues(struct wm_softc *);
    673 static int	wm_init_txrx_queues(struct wm_softc *);
    674 /* Start */
    675 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    676     uint32_t *, uint8_t *);
    677 static void	wm_start(struct ifnet *);
    678 static void	wm_start_locked(struct ifnet *);
    679 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    680     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    681 static void	wm_nq_start(struct ifnet *);
    682 static void	wm_nq_start_locked(struct ifnet *);
    683 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    684 static inline int	wm_nq_select_txqueue(struct ifnet *, struct mbuf *);
    685 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    686 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    687 /* Interrupt */
    688 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    689 static void	wm_rxeof(struct wm_rxqueue *);
    690 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    691 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    692 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    693 static void	wm_linkintr(struct wm_softc *, uint32_t);
    694 static int	wm_intr_legacy(void *);
    695 static int	wm_txrxintr_msix(void *);
    696 static int	wm_linkintr_msix(void *);
    697 
    698 /*
    699  * Media related.
    700  * GMII, SGMII, TBI, SERDES and SFP.
    701  */
    702 /* Common */
    703 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    704 /* GMII related */
    705 static void	wm_gmii_reset(struct wm_softc *);
    706 static int	wm_get_phy_id_82575(struct wm_softc *);
    707 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    708 static int	wm_gmii_mediachange(struct ifnet *);
    709 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    710 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    711 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    712 static int	wm_gmii_i82543_readreg(device_t, int, int);
    713 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    714 static int	wm_gmii_mdic_readreg(device_t, int, int);
    715 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    716 static int	wm_gmii_i82544_readreg(device_t, int, int);
    717 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    718 static int	wm_gmii_i80003_readreg(device_t, int, int);
    719 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    720 static int	wm_gmii_bm_readreg(device_t, int, int);
    721 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    722 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    723 static int	wm_gmii_hv_readreg(device_t, int, int);
    724 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    725 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    726 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    727 static int	wm_gmii_82580_readreg(device_t, int, int);
    728 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    729 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    730 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    731 static void	wm_gmii_statchg(struct ifnet *);
    732 static int	wm_kmrn_readreg(struct wm_softc *, int);
    733 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    734 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    735 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    736 /* SGMII */
    737 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    738 static int	wm_sgmii_readreg(device_t, int, int);
    739 static void	wm_sgmii_writereg(device_t, int, int, int);
    740 /* TBI related */
    741 static void	wm_tbi_mediainit(struct wm_softc *);
    742 static int	wm_tbi_mediachange(struct ifnet *);
    743 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    744 static int	wm_check_for_link(struct wm_softc *);
    745 static void	wm_tbi_tick(struct wm_softc *);
    746 /* SERDES related */
    747 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    748 static int	wm_serdes_mediachange(struct ifnet *);
    749 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    750 static void	wm_serdes_tick(struct wm_softc *);
    751 /* SFP related */
    752 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    753 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    754 
    755 /*
    756  * NVM related.
    757  * Microwire, SPI (w/wo EERD) and Flash.
    758  */
    759 /* Misc functions */
    760 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    761 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    762 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    763 /* Microwire */
    764 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    765 /* SPI */
    766 static int	wm_nvm_ready_spi(struct wm_softc *);
    767 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    768 /* Using with EERD */
    769 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    770 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    771 /* Flash */
    772 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    773     unsigned int *);
    774 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    775 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    776 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    777 	uint32_t *);
    778 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    779 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    780 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    781 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    782 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    783 /* iNVM */
    784 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    785 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    786 /* Lock, detecting NVM type, validate checksum and read */
    787 static int	wm_nvm_acquire(struct wm_softc *);
    788 static void	wm_nvm_release(struct wm_softc *);
    789 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    790 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    791 static int	wm_nvm_validate_checksum(struct wm_softc *);
    792 static void	wm_nvm_version_invm(struct wm_softc *);
    793 static void	wm_nvm_version(struct wm_softc *);
    794 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    795 
    796 /*
    797  * Hardware semaphores.
    798  * Very complexed...
    799  */
    800 static int	wm_get_null(struct wm_softc *);
    801 static void	wm_put_null(struct wm_softc *);
    802 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    803 static void	wm_put_swsm_semaphore(struct wm_softc *);
    804 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    805 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    806 static int	wm_get_phy_82575(struct wm_softc *);
    807 static void	wm_put_phy_82575(struct wm_softc *);
    808 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    809 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    810 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    811 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    812 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    813 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    814 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    815 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    816 
    817 /*
    818  * Management mode and power management related subroutines.
    819  * BMC, AMT, suspend/resume and EEE.
    820  */
    821 #ifdef WM_WOL
    822 static int	wm_check_mng_mode(struct wm_softc *);
    823 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    824 static int	wm_check_mng_mode_82574(struct wm_softc *);
    825 static int	wm_check_mng_mode_generic(struct wm_softc *);
    826 #endif
    827 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    828 static bool	wm_phy_resetisblocked(struct wm_softc *);
    829 static void	wm_get_hw_control(struct wm_softc *);
    830 static void	wm_release_hw_control(struct wm_softc *);
    831 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    832 static void	wm_smbustopci(struct wm_softc *);
    833 static void	wm_init_manageability(struct wm_softc *);
    834 static void	wm_release_manageability(struct wm_softc *);
    835 static void	wm_get_wakeup(struct wm_softc *);
    836 #ifdef WM_WOL
    837 static void	wm_enable_phy_wakeup(struct wm_softc *);
    838 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    839 static void	wm_enable_wakeup(struct wm_softc *);
    840 #endif
    841 /* LPLU (Low Power Link Up) */
    842 static void	wm_lplu_d0_disable(struct wm_softc *);
    843 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    844 /* EEE */
    845 static void	wm_set_eee_i350(struct wm_softc *);
    846 
    847 /*
    848  * Workarounds (mainly PHY related).
    849  * Basically, PHY's workarounds are in the PHY drivers.
    850  */
    851 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    852 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    853 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    854 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    855 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    856 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    857 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    858 static void	wm_reset_init_script_82575(struct wm_softc *);
    859 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    860 static void	wm_pll_workaround_i210(struct wm_softc *);
    861 
    862 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    863     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    864 
    865 /*
    866  * Devices supported by this driver.
    867  */
    868 static const struct wm_product {
    869 	pci_vendor_id_t		wmp_vendor;
    870 	pci_product_id_t	wmp_product;
    871 	const char		*wmp_name;
    872 	wm_chip_type		wmp_type;
    873 	uint32_t		wmp_flags;
    874 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    875 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    876 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    877 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    878 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    879 } wm_products[] = {
    880 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    881 	  "Intel i82542 1000BASE-X Ethernet",
    882 	  WM_T_82542_2_1,	WMP_F_FIBER },
    883 
    884 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    885 	  "Intel i82543GC 1000BASE-X Ethernet",
    886 	  WM_T_82543,		WMP_F_FIBER },
    887 
    888 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    889 	  "Intel i82543GC 1000BASE-T Ethernet",
    890 	  WM_T_82543,		WMP_F_COPPER },
    891 
    892 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    893 	  "Intel i82544EI 1000BASE-T Ethernet",
    894 	  WM_T_82544,		WMP_F_COPPER },
    895 
    896 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    897 	  "Intel i82544EI 1000BASE-X Ethernet",
    898 	  WM_T_82544,		WMP_F_FIBER },
    899 
    900 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    901 	  "Intel i82544GC 1000BASE-T Ethernet",
    902 	  WM_T_82544,		WMP_F_COPPER },
    903 
    904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    905 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    906 	  WM_T_82544,		WMP_F_COPPER },
    907 
    908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    909 	  "Intel i82540EM 1000BASE-T Ethernet",
    910 	  WM_T_82540,		WMP_F_COPPER },
    911 
    912 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    913 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    914 	  WM_T_82540,		WMP_F_COPPER },
    915 
    916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    917 	  "Intel i82540EP 1000BASE-T Ethernet",
    918 	  WM_T_82540,		WMP_F_COPPER },
    919 
    920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    921 	  "Intel i82540EP 1000BASE-T Ethernet",
    922 	  WM_T_82540,		WMP_F_COPPER },
    923 
    924 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    925 	  "Intel i82540EP 1000BASE-T Ethernet",
    926 	  WM_T_82540,		WMP_F_COPPER },
    927 
    928 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    929 	  "Intel i82545EM 1000BASE-T Ethernet",
    930 	  WM_T_82545,		WMP_F_COPPER },
    931 
    932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    933 	  "Intel i82545GM 1000BASE-T Ethernet",
    934 	  WM_T_82545_3,		WMP_F_COPPER },
    935 
    936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    937 	  "Intel i82545GM 1000BASE-X Ethernet",
    938 	  WM_T_82545_3,		WMP_F_FIBER },
    939 
    940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    941 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    942 	  WM_T_82545_3,		WMP_F_SERDES },
    943 
    944 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    945 	  "Intel i82546EB 1000BASE-T Ethernet",
    946 	  WM_T_82546,		WMP_F_COPPER },
    947 
    948 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    949 	  "Intel i82546EB 1000BASE-T Ethernet",
    950 	  WM_T_82546,		WMP_F_COPPER },
    951 
    952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    953 	  "Intel i82545EM 1000BASE-X Ethernet",
    954 	  WM_T_82545,		WMP_F_FIBER },
    955 
    956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    957 	  "Intel i82546EB 1000BASE-X Ethernet",
    958 	  WM_T_82546,		WMP_F_FIBER },
    959 
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    961 	  "Intel i82546GB 1000BASE-T Ethernet",
    962 	  WM_T_82546_3,		WMP_F_COPPER },
    963 
    964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    965 	  "Intel i82546GB 1000BASE-X Ethernet",
    966 	  WM_T_82546_3,		WMP_F_FIBER },
    967 
    968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    969 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    970 	  WM_T_82546_3,		WMP_F_SERDES },
    971 
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    973 	  "i82546GB quad-port Gigabit Ethernet",
    974 	  WM_T_82546_3,		WMP_F_COPPER },
    975 
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    977 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    978 	  WM_T_82546_3,		WMP_F_COPPER },
    979 
    980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    981 	  "Intel PRO/1000MT (82546GB)",
    982 	  WM_T_82546_3,		WMP_F_COPPER },
    983 
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    985 	  "Intel i82541EI 1000BASE-T Ethernet",
    986 	  WM_T_82541,		WMP_F_COPPER },
    987 
    988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    989 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    990 	  WM_T_82541,		WMP_F_COPPER },
    991 
    992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    993 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    994 	  WM_T_82541,		WMP_F_COPPER },
    995 
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    997 	  "Intel i82541ER 1000BASE-T Ethernet",
    998 	  WM_T_82541_2,		WMP_F_COPPER },
    999 
   1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1001 	  "Intel i82541GI 1000BASE-T Ethernet",
   1002 	  WM_T_82541_2,		WMP_F_COPPER },
   1003 
   1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1005 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1006 	  WM_T_82541_2,		WMP_F_COPPER },
   1007 
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1009 	  "Intel i82541PI 1000BASE-T Ethernet",
   1010 	  WM_T_82541_2,		WMP_F_COPPER },
   1011 
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1013 	  "Intel i82547EI 1000BASE-T Ethernet",
   1014 	  WM_T_82547,		WMP_F_COPPER },
   1015 
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1017 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1018 	  WM_T_82547,		WMP_F_COPPER },
   1019 
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1021 	  "Intel i82547GI 1000BASE-T Ethernet",
   1022 	  WM_T_82547_2,		WMP_F_COPPER },
   1023 
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1025 	  "Intel PRO/1000 PT (82571EB)",
   1026 	  WM_T_82571,		WMP_F_COPPER },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1029 	  "Intel PRO/1000 PF (82571EB)",
   1030 	  WM_T_82571,		WMP_F_FIBER },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1033 	  "Intel PRO/1000 PB (82571EB)",
   1034 	  WM_T_82571,		WMP_F_SERDES },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1037 	  "Intel PRO/1000 QT (82571EB)",
   1038 	  WM_T_82571,		WMP_F_COPPER },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1041 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1042 	  WM_T_82571,		WMP_F_COPPER, },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1045 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1046 	  WM_T_82571,		WMP_F_COPPER, },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1049 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1050 	  WM_T_82571,		WMP_F_SERDES, },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1053 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1054 	  WM_T_82571,		WMP_F_SERDES, },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1057 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1058 	  WM_T_82571,		WMP_F_FIBER, },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1061 	  "Intel i82572EI 1000baseT Ethernet",
   1062 	  WM_T_82572,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1065 	  "Intel i82572EI 1000baseX Ethernet",
   1066 	  WM_T_82572,		WMP_F_FIBER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1069 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1070 	  WM_T_82572,		WMP_F_SERDES },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1073 	  "Intel i82572EI 1000baseT Ethernet",
   1074 	  WM_T_82572,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1077 	  "Intel i82573E",
   1078 	  WM_T_82573,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1081 	  "Intel i82573E IAMT",
   1082 	  WM_T_82573,		WMP_F_COPPER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1085 	  "Intel i82573L Gigabit Ethernet",
   1086 	  WM_T_82573,		WMP_F_COPPER },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1089 	  "Intel i82574L",
   1090 	  WM_T_82574,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1093 	  "Intel i82574L",
   1094 	  WM_T_82574,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1097 	  "Intel i82583V",
   1098 	  WM_T_82583,		WMP_F_COPPER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1101 	  "i80003 dual 1000baseT Ethernet",
   1102 	  WM_T_80003,		WMP_F_COPPER },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1105 	  "i80003 dual 1000baseX Ethernet",
   1106 	  WM_T_80003,		WMP_F_COPPER },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1109 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1110 	  WM_T_80003,		WMP_F_SERDES },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1113 	  "Intel i80003 1000baseT Ethernet",
   1114 	  WM_T_80003,		WMP_F_COPPER },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1117 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1118 	  WM_T_80003,		WMP_F_SERDES },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1121 	  "Intel i82801H (M_AMT) LAN Controller",
   1122 	  WM_T_ICH8,		WMP_F_COPPER },
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1124 	  "Intel i82801H (AMT) LAN Controller",
   1125 	  WM_T_ICH8,		WMP_F_COPPER },
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1127 	  "Intel i82801H LAN Controller",
   1128 	  WM_T_ICH8,		WMP_F_COPPER },
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1130 	  "Intel i82801H (IFE) LAN Controller",
   1131 	  WM_T_ICH8,		WMP_F_COPPER },
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1133 	  "Intel i82801H (M) LAN Controller",
   1134 	  WM_T_ICH8,		WMP_F_COPPER },
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1136 	  "Intel i82801H IFE (GT) LAN Controller",
   1137 	  WM_T_ICH8,		WMP_F_COPPER },
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1139 	  "Intel i82801H IFE (G) LAN Controller",
   1140 	  WM_T_ICH8,		WMP_F_COPPER },
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1142 	  "82801I (AMT) LAN Controller",
   1143 	  WM_T_ICH9,		WMP_F_COPPER },
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1145 	  "82801I LAN Controller",
   1146 	  WM_T_ICH9,		WMP_F_COPPER },
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1148 	  "82801I (G) LAN Controller",
   1149 	  WM_T_ICH9,		WMP_F_COPPER },
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1151 	  "82801I (GT) LAN Controller",
   1152 	  WM_T_ICH9,		WMP_F_COPPER },
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1154 	  "82801I (C) LAN Controller",
   1155 	  WM_T_ICH9,		WMP_F_COPPER },
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1157 	  "82801I mobile LAN Controller",
   1158 	  WM_T_ICH9,		WMP_F_COPPER },
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1160 	  "82801I mobile (V) LAN Controller",
   1161 	  WM_T_ICH9,		WMP_F_COPPER },
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1163 	  "82801I mobile (AMT) LAN Controller",
   1164 	  WM_T_ICH9,		WMP_F_COPPER },
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1166 	  "82567LM-4 LAN Controller",
   1167 	  WM_T_ICH9,		WMP_F_COPPER },
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
   1169 	  "82567V-3 LAN Controller",
   1170 	  WM_T_ICH9,		WMP_F_COPPER },
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1172 	  "82567LM-2 LAN Controller",
   1173 	  WM_T_ICH10,		WMP_F_COPPER },
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1175 	  "82567LF-2 LAN Controller",
   1176 	  WM_T_ICH10,		WMP_F_COPPER },
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1178 	  "82567LM-3 LAN Controller",
   1179 	  WM_T_ICH10,		WMP_F_COPPER },
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1181 	  "82567LF-3 LAN Controller",
   1182 	  WM_T_ICH10,		WMP_F_COPPER },
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1184 	  "82567V-2 LAN Controller",
   1185 	  WM_T_ICH10,		WMP_F_COPPER },
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1187 	  "82567V-3? LAN Controller",
   1188 	  WM_T_ICH10,		WMP_F_COPPER },
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1190 	  "HANKSVILLE LAN Controller",
   1191 	  WM_T_ICH10,		WMP_F_COPPER },
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1193 	  "PCH LAN (82577LM) Controller",
   1194 	  WM_T_PCH,		WMP_F_COPPER },
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1196 	  "PCH LAN (82577LC) Controller",
   1197 	  WM_T_PCH,		WMP_F_COPPER },
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1199 	  "PCH LAN (82578DM) Controller",
   1200 	  WM_T_PCH,		WMP_F_COPPER },
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1202 	  "PCH LAN (82578DC) Controller",
   1203 	  WM_T_PCH,		WMP_F_COPPER },
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1205 	  "PCH2 LAN (82579LM) Controller",
   1206 	  WM_T_PCH2,		WMP_F_COPPER },
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1208 	  "PCH2 LAN (82579V) Controller",
   1209 	  WM_T_PCH2,		WMP_F_COPPER },
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1211 	  "82575EB dual-1000baseT Ethernet",
   1212 	  WM_T_82575,		WMP_F_COPPER },
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1214 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1215 	  WM_T_82575,		WMP_F_SERDES },
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1217 	  "82575GB quad-1000baseT Ethernet",
   1218 	  WM_T_82575,		WMP_F_COPPER },
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1220 	  "82575GB quad-1000baseT Ethernet (PM)",
   1221 	  WM_T_82575,		WMP_F_COPPER },
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1223 	  "82576 1000BaseT Ethernet",
   1224 	  WM_T_82576,		WMP_F_COPPER },
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1226 	  "82576 1000BaseX Ethernet",
   1227 	  WM_T_82576,		WMP_F_FIBER },
   1228 
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1230 	  "82576 gigabit Ethernet (SERDES)",
   1231 	  WM_T_82576,		WMP_F_SERDES },
   1232 
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1234 	  "82576 quad-1000BaseT Ethernet",
   1235 	  WM_T_82576,		WMP_F_COPPER },
   1236 
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1238 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1239 	  WM_T_82576,		WMP_F_COPPER },
   1240 
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1242 	  "82576 gigabit Ethernet",
   1243 	  WM_T_82576,		WMP_F_COPPER },
   1244 
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1246 	  "82576 gigabit Ethernet (SERDES)",
   1247 	  WM_T_82576,		WMP_F_SERDES },
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1249 	  "82576 quad-gigabit Ethernet (SERDES)",
   1250 	  WM_T_82576,		WMP_F_SERDES },
   1251 
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1253 	  "82580 1000BaseT Ethernet",
   1254 	  WM_T_82580,		WMP_F_COPPER },
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1256 	  "82580 1000BaseX Ethernet",
   1257 	  WM_T_82580,		WMP_F_FIBER },
   1258 
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1260 	  "82580 1000BaseT Ethernet (SERDES)",
   1261 	  WM_T_82580,		WMP_F_SERDES },
   1262 
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1264 	  "82580 gigabit Ethernet (SGMII)",
   1265 	  WM_T_82580,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1267 	  "82580 dual-1000BaseT Ethernet",
   1268 	  WM_T_82580,		WMP_F_COPPER },
   1269 
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1271 	  "82580 quad-1000BaseX Ethernet",
   1272 	  WM_T_82580,		WMP_F_FIBER },
   1273 
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1275 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1276 	  WM_T_82580,		WMP_F_COPPER },
   1277 
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1279 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1280 	  WM_T_82580,		WMP_F_SERDES },
   1281 
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1283 	  "DH89XXCC 1000BASE-KX Ethernet",
   1284 	  WM_T_82580,		WMP_F_SERDES },
   1285 
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1287 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1288 	  WM_T_82580,		WMP_F_SERDES },
   1289 
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1291 	  "I350 Gigabit Network Connection",
   1292 	  WM_T_I350,		WMP_F_COPPER },
   1293 
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1295 	  "I350 Gigabit Fiber Network Connection",
   1296 	  WM_T_I350,		WMP_F_FIBER },
   1297 
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1299 	  "I350 Gigabit Backplane Connection",
   1300 	  WM_T_I350,		WMP_F_SERDES },
   1301 
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1303 	  "I350 Quad Port Gigabit Ethernet",
   1304 	  WM_T_I350,		WMP_F_SERDES },
   1305 
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1307 	  "I350 Gigabit Connection",
   1308 	  WM_T_I350,		WMP_F_COPPER },
   1309 
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1311 	  "I354 Gigabit Ethernet (KX)",
   1312 	  WM_T_I354,		WMP_F_SERDES },
   1313 
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1315 	  "I354 Gigabit Ethernet (SGMII)",
   1316 	  WM_T_I354,		WMP_F_COPPER },
   1317 
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1319 	  "I354 Gigabit Ethernet (2.5G)",
   1320 	  WM_T_I354,		WMP_F_COPPER },
   1321 
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1323 	  "I210-T1 Ethernet Server Adapter",
   1324 	  WM_T_I210,		WMP_F_COPPER },
   1325 
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1327 	  "I210 Ethernet (Copper OEM)",
   1328 	  WM_T_I210,		WMP_F_COPPER },
   1329 
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1331 	  "I210 Ethernet (Copper IT)",
   1332 	  WM_T_I210,		WMP_F_COPPER },
   1333 
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1335 	  "I210 Ethernet (FLASH less)",
   1336 	  WM_T_I210,		WMP_F_COPPER },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1339 	  "I210 Gigabit Ethernet (Fiber)",
   1340 	  WM_T_I210,		WMP_F_FIBER },
   1341 
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1343 	  "I210 Gigabit Ethernet (SERDES)",
   1344 	  WM_T_I210,		WMP_F_SERDES },
   1345 
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1347 	  "I210 Gigabit Ethernet (FLASH less)",
   1348 	  WM_T_I210,		WMP_F_SERDES },
   1349 
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1351 	  "I210 Gigabit Ethernet (SGMII)",
   1352 	  WM_T_I210,		WMP_F_COPPER },
   1353 
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1355 	  "I211 Ethernet (COPPER)",
   1356 	  WM_T_I211,		WMP_F_COPPER },
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1358 	  "I217 V Ethernet Connection",
   1359 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1361 	  "I217 LM Ethernet Connection",
   1362 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1364 	  "I218 V Ethernet Connection",
   1365 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1367 	  "I218 V Ethernet Connection",
   1368 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1370 	  "I218 V Ethernet Connection",
   1371 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1373 	  "I218 LM Ethernet Connection",
   1374 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1376 	  "I218 LM Ethernet Connection",
   1377 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1379 	  "I218 LM Ethernet Connection",
   1380 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1381 #if 0
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1383 	  "I219 V Ethernet Connection",
   1384 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1386 	  "I219 V Ethernet Connection",
   1387 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1389 	  "I219 V Ethernet Connection",
   1390 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1392 	  "I219 V Ethernet Connection",
   1393 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1395 	  "I219 LM Ethernet Connection",
   1396 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1398 	  "I219 LM Ethernet Connection",
   1399 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1401 	  "I219 LM Ethernet Connection",
   1402 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1404 	  "I219 LM Ethernet Connection",
   1405 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1407 	  "I219 LM Ethernet Connection",
   1408 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1409 #endif
   1410 	{ 0,			0,
   1411 	  NULL,
   1412 	  0,			0 },
   1413 };
   1414 
   1415 /*
   1416  * Register read/write functions.
   1417  * Other than CSR_{READ|WRITE}().
   1418  */
   1419 
   1420 #if 0 /* Not currently used */
   1421 static inline uint32_t
   1422 wm_io_read(struct wm_softc *sc, int reg)
   1423 {
   1424 
   1425 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1426 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1427 }
   1428 #endif
   1429 
   1430 static inline void
   1431 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1432 {
   1433 
   1434 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1435 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1436 }
   1437 
   1438 static inline void
   1439 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1440     uint32_t data)
   1441 {
   1442 	uint32_t regval;
   1443 	int i;
   1444 
   1445 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1446 
   1447 	CSR_WRITE(sc, reg, regval);
   1448 
   1449 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1450 		delay(5);
   1451 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1452 			break;
   1453 	}
   1454 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1455 		aprint_error("%s: WARNING:"
   1456 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1457 		    device_xname(sc->sc_dev), reg);
   1458 	}
   1459 }
   1460 
   1461 static inline void
   1462 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1463 {
   1464 	wa->wa_low = htole32(v & 0xffffffffU);
   1465 	if (sizeof(bus_addr_t) == 8)
   1466 		wa->wa_high = htole32((uint64_t) v >> 32);
   1467 	else
   1468 		wa->wa_high = 0;
   1469 }
   1470 
   1471 /*
   1472  * Descriptor sync/init functions.
   1473  */
   1474 static inline void
   1475 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1476 {
   1477 	struct wm_softc *sc = txq->txq_sc;
   1478 
   1479 	/* If it will wrap around, sync to the end of the ring. */
   1480 	if ((start + num) > WM_NTXDESC(txq)) {
   1481 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1482 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1483 		    (WM_NTXDESC(txq) - start), ops);
   1484 		num -= (WM_NTXDESC(txq) - start);
   1485 		start = 0;
   1486 	}
   1487 
   1488 	/* Now sync whatever is left. */
   1489 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1490 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1491 }
   1492 
   1493 static inline void
   1494 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1495 {
   1496 	struct wm_softc *sc = rxq->rxq_sc;
   1497 
   1498 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1499 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1500 }
   1501 
   1502 static inline void
   1503 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1504 {
   1505 	struct wm_softc *sc = rxq->rxq_sc;
   1506 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1507 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1508 	struct mbuf *m = rxs->rxs_mbuf;
   1509 
   1510 	/*
   1511 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1512 	 * so that the payload after the Ethernet header is aligned
   1513 	 * to a 4-byte boundary.
   1514 
   1515 	 * XXX BRAINDAMAGE ALERT!
   1516 	 * The stupid chip uses the same size for every buffer, which
   1517 	 * is set in the Receive Control register.  We are using the 2K
   1518 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1519 	 * reason, we can't "scoot" packets longer than the standard
   1520 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1521 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1522 	 * the upper layer copy the headers.
   1523 	 */
   1524 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1525 
   1526 	wm_set_dma_addr(&rxd->wrx_addr,
   1527 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1528 	rxd->wrx_len = 0;
   1529 	rxd->wrx_cksum = 0;
   1530 	rxd->wrx_status = 0;
   1531 	rxd->wrx_errors = 0;
   1532 	rxd->wrx_special = 0;
   1533 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1534 
   1535 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1536 }
   1537 
   1538 /*
   1539  * Device driver interface functions and commonly used functions.
   1540  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1541  */
   1542 
   1543 /* Lookup supported device table */
   1544 static const struct wm_product *
   1545 wm_lookup(const struct pci_attach_args *pa)
   1546 {
   1547 	const struct wm_product *wmp;
   1548 
   1549 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1550 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1551 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1552 			return wmp;
   1553 	}
   1554 	return NULL;
   1555 }
   1556 
   1557 /* The match function (ca_match) */
   1558 static int
   1559 wm_match(device_t parent, cfdata_t cf, void *aux)
   1560 {
   1561 	struct pci_attach_args *pa = aux;
   1562 
   1563 	if (wm_lookup(pa) != NULL)
   1564 		return 1;
   1565 
   1566 	return 0;
   1567 }
   1568 
   1569 /* The attach function (ca_attach) */
   1570 static void
   1571 wm_attach(device_t parent, device_t self, void *aux)
   1572 {
   1573 	struct wm_softc *sc = device_private(self);
   1574 	struct pci_attach_args *pa = aux;
   1575 	prop_dictionary_t dict;
   1576 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1577 	pci_chipset_tag_t pc = pa->pa_pc;
   1578 	int counts[PCI_INTR_TYPE_SIZE];
   1579 	pci_intr_type_t max_type;
   1580 	const char *eetype, *xname;
   1581 	bus_space_tag_t memt;
   1582 	bus_space_handle_t memh;
   1583 	bus_size_t memsize;
   1584 	int memh_valid;
   1585 	int i, error;
   1586 	const struct wm_product *wmp;
   1587 	prop_data_t ea;
   1588 	prop_number_t pn;
   1589 	uint8_t enaddr[ETHER_ADDR_LEN];
   1590 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1591 	pcireg_t preg, memtype;
   1592 	uint16_t eeprom_data, apme_mask;
   1593 	bool force_clear_smbi;
   1594 	uint32_t link_mode;
   1595 	uint32_t reg;
   1596 
   1597 	sc->sc_dev = self;
   1598 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1599 	sc->sc_stopping = false;
   1600 
   1601 	wmp = wm_lookup(pa);
   1602 #ifdef DIAGNOSTIC
   1603 	if (wmp == NULL) {
   1604 		printf("\n");
   1605 		panic("wm_attach: impossible");
   1606 	}
   1607 #endif
   1608 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1609 
   1610 	sc->sc_pc = pa->pa_pc;
   1611 	sc->sc_pcitag = pa->pa_tag;
   1612 
   1613 	if (pci_dma64_available(pa))
   1614 		sc->sc_dmat = pa->pa_dmat64;
   1615 	else
   1616 		sc->sc_dmat = pa->pa_dmat;
   1617 
   1618 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1619 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1620 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1621 
   1622 	sc->sc_type = wmp->wmp_type;
   1623 
   1624 	/* Set default function pointers */
   1625 	sc->phy.acquire = wm_get_null;
   1626 	sc->phy.release = wm_put_null;
   1627 
   1628 	if (sc->sc_type < WM_T_82543) {
   1629 		if (sc->sc_rev < 2) {
   1630 			aprint_error_dev(sc->sc_dev,
   1631 			    "i82542 must be at least rev. 2\n");
   1632 			return;
   1633 		}
   1634 		if (sc->sc_rev < 3)
   1635 			sc->sc_type = WM_T_82542_2_0;
   1636 	}
   1637 
   1638 	/*
   1639 	 * Disable MSI for Errata:
   1640 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1641 	 *
   1642 	 *  82544: Errata 25
   1643 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1644 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1645 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1646 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1647 	 *
   1648 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1649 	 *
   1650 	 *  82571 & 82572: Errata 63
   1651 	 */
   1652 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1653 	    || (sc->sc_type == WM_T_82572))
   1654 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1655 
   1656 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1657 	    || (sc->sc_type == WM_T_82580)
   1658 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1659 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1660 		sc->sc_flags |= WM_F_NEWQUEUE;
   1661 
   1662 	/* Set device properties (mactype) */
   1663 	dict = device_properties(sc->sc_dev);
   1664 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1665 
   1666 	/*
   1667 	 * Map the device.  All devices support memory-mapped acccess,
   1668 	 * and it is really required for normal operation.
   1669 	 */
   1670 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1671 	switch (memtype) {
   1672 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1673 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1674 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1675 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1676 		break;
   1677 	default:
   1678 		memh_valid = 0;
   1679 		break;
   1680 	}
   1681 
   1682 	if (memh_valid) {
   1683 		sc->sc_st = memt;
   1684 		sc->sc_sh = memh;
   1685 		sc->sc_ss = memsize;
   1686 	} else {
   1687 		aprint_error_dev(sc->sc_dev,
   1688 		    "unable to map device registers\n");
   1689 		return;
   1690 	}
   1691 
   1692 	/*
   1693 	 * In addition, i82544 and later support I/O mapped indirect
   1694 	 * register access.  It is not desirable (nor supported in
   1695 	 * this driver) to use it for normal operation, though it is
   1696 	 * required to work around bugs in some chip versions.
   1697 	 */
   1698 	if (sc->sc_type >= WM_T_82544) {
   1699 		/* First we have to find the I/O BAR. */
   1700 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1701 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1702 			if (memtype == PCI_MAPREG_TYPE_IO)
   1703 				break;
   1704 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1705 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1706 				i += 4;	/* skip high bits, too */
   1707 		}
   1708 		if (i < PCI_MAPREG_END) {
   1709 			/*
   1710 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1711 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1712 			 * It's no problem because newer chips has no this
   1713 			 * bug.
   1714 			 *
   1715 			 * The i8254x doesn't apparently respond when the
   1716 			 * I/O BAR is 0, which looks somewhat like it's not
   1717 			 * been configured.
   1718 			 */
   1719 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1720 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1721 				aprint_error_dev(sc->sc_dev,
   1722 				    "WARNING: I/O BAR at zero.\n");
   1723 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1724 					0, &sc->sc_iot, &sc->sc_ioh,
   1725 					NULL, &sc->sc_ios) == 0) {
   1726 				sc->sc_flags |= WM_F_IOH_VALID;
   1727 			} else {
   1728 				aprint_error_dev(sc->sc_dev,
   1729 				    "WARNING: unable to map I/O space\n");
   1730 			}
   1731 		}
   1732 
   1733 	}
   1734 
   1735 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1736 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1737 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1738 	if (sc->sc_type < WM_T_82542_2_1)
   1739 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1740 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1741 
   1742 	/* power up chip */
   1743 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1744 	    NULL)) && error != EOPNOTSUPP) {
   1745 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1746 		return;
   1747 	}
   1748 
   1749 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1750 
   1751 	/* Allocation settings */
   1752 	max_type = PCI_INTR_TYPE_MSIX;
   1753 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1754 	counts[PCI_INTR_TYPE_MSI] = 1;
   1755 	counts[PCI_INTR_TYPE_INTX] = 1;
   1756 
   1757 alloc_retry:
   1758 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1759 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1760 		return;
   1761 	}
   1762 
   1763 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1764 		error = wm_setup_msix(sc);
   1765 		if (error) {
   1766 			pci_intr_release(pc, sc->sc_intrs,
   1767 			    counts[PCI_INTR_TYPE_MSIX]);
   1768 
   1769 			/* Setup for MSI: Disable MSI-X */
   1770 			max_type = PCI_INTR_TYPE_MSI;
   1771 			counts[PCI_INTR_TYPE_MSI] = 1;
   1772 			counts[PCI_INTR_TYPE_INTX] = 1;
   1773 			goto alloc_retry;
   1774 		}
   1775 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1776 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1777 		error = wm_setup_legacy(sc);
   1778 		if (error) {
   1779 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1780 			    counts[PCI_INTR_TYPE_MSI]);
   1781 
   1782 			/* The next try is for INTx: Disable MSI */
   1783 			max_type = PCI_INTR_TYPE_INTX;
   1784 			counts[PCI_INTR_TYPE_INTX] = 1;
   1785 			goto alloc_retry;
   1786 		}
   1787 	} else {
   1788 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1789 		error = wm_setup_legacy(sc);
   1790 		if (error) {
   1791 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1792 			    counts[PCI_INTR_TYPE_INTX]);
   1793 			return;
   1794 		}
   1795 	}
   1796 
   1797 	/*
   1798 	 * Check the function ID (unit number of the chip).
   1799 	 */
   1800 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1801 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1802 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1803 	    || (sc->sc_type == WM_T_82580)
   1804 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1805 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1806 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1807 	else
   1808 		sc->sc_funcid = 0;
   1809 
   1810 	/*
   1811 	 * Determine a few things about the bus we're connected to.
   1812 	 */
   1813 	if (sc->sc_type < WM_T_82543) {
   1814 		/* We don't really know the bus characteristics here. */
   1815 		sc->sc_bus_speed = 33;
   1816 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1817 		/*
   1818 		 * CSA (Communication Streaming Architecture) is about as fast
   1819 		 * a 32-bit 66MHz PCI Bus.
   1820 		 */
   1821 		sc->sc_flags |= WM_F_CSA;
   1822 		sc->sc_bus_speed = 66;
   1823 		aprint_verbose_dev(sc->sc_dev,
   1824 		    "Communication Streaming Architecture\n");
   1825 		if (sc->sc_type == WM_T_82547) {
   1826 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1827 			callout_setfunc(&sc->sc_txfifo_ch,
   1828 					wm_82547_txfifo_stall, sc);
   1829 			aprint_verbose_dev(sc->sc_dev,
   1830 			    "using 82547 Tx FIFO stall work-around\n");
   1831 		}
   1832 	} else if (sc->sc_type >= WM_T_82571) {
   1833 		sc->sc_flags |= WM_F_PCIE;
   1834 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1835 		    && (sc->sc_type != WM_T_ICH10)
   1836 		    && (sc->sc_type != WM_T_PCH)
   1837 		    && (sc->sc_type != WM_T_PCH2)
   1838 		    && (sc->sc_type != WM_T_PCH_LPT)
   1839 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1840 			/* ICH* and PCH* have no PCIe capability registers */
   1841 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1842 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1843 				NULL) == 0)
   1844 				aprint_error_dev(sc->sc_dev,
   1845 				    "unable to find PCIe capability\n");
   1846 		}
   1847 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1848 	} else {
   1849 		reg = CSR_READ(sc, WMREG_STATUS);
   1850 		if (reg & STATUS_BUS64)
   1851 			sc->sc_flags |= WM_F_BUS64;
   1852 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1853 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1854 
   1855 			sc->sc_flags |= WM_F_PCIX;
   1856 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1857 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1858 				aprint_error_dev(sc->sc_dev,
   1859 				    "unable to find PCIX capability\n");
   1860 			else if (sc->sc_type != WM_T_82545_3 &&
   1861 				 sc->sc_type != WM_T_82546_3) {
   1862 				/*
   1863 				 * Work around a problem caused by the BIOS
   1864 				 * setting the max memory read byte count
   1865 				 * incorrectly.
   1866 				 */
   1867 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1868 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1869 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1870 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1871 
   1872 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1873 				    PCIX_CMD_BYTECNT_SHIFT;
   1874 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1875 				    PCIX_STATUS_MAXB_SHIFT;
   1876 				if (bytecnt > maxb) {
   1877 					aprint_verbose_dev(sc->sc_dev,
   1878 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1879 					    512 << bytecnt, 512 << maxb);
   1880 					pcix_cmd = (pcix_cmd &
   1881 					    ~PCIX_CMD_BYTECNT_MASK) |
   1882 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1883 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1884 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1885 					    pcix_cmd);
   1886 				}
   1887 			}
   1888 		}
   1889 		/*
   1890 		 * The quad port adapter is special; it has a PCIX-PCIX
   1891 		 * bridge on the board, and can run the secondary bus at
   1892 		 * a higher speed.
   1893 		 */
   1894 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1895 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1896 								      : 66;
   1897 		} else if (sc->sc_flags & WM_F_PCIX) {
   1898 			switch (reg & STATUS_PCIXSPD_MASK) {
   1899 			case STATUS_PCIXSPD_50_66:
   1900 				sc->sc_bus_speed = 66;
   1901 				break;
   1902 			case STATUS_PCIXSPD_66_100:
   1903 				sc->sc_bus_speed = 100;
   1904 				break;
   1905 			case STATUS_PCIXSPD_100_133:
   1906 				sc->sc_bus_speed = 133;
   1907 				break;
   1908 			default:
   1909 				aprint_error_dev(sc->sc_dev,
   1910 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1911 				    reg & STATUS_PCIXSPD_MASK);
   1912 				sc->sc_bus_speed = 66;
   1913 				break;
   1914 			}
   1915 		} else
   1916 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1917 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1918 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1919 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1920 	}
   1921 
   1922 	/* clear interesting stat counters */
   1923 	CSR_READ(sc, WMREG_COLC);
   1924 	CSR_READ(sc, WMREG_RXERRC);
   1925 
   1926 	/* get PHY control from SMBus to PCIe */
   1927 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1928 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   1929 		wm_smbustopci(sc);
   1930 
   1931 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   1932 	    || (sc->sc_type >= WM_T_ICH8))
   1933 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1934 	if (sc->sc_type >= WM_T_ICH8)
   1935 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1936 
   1937 	/* Set PHY, NVM mutex related stuff */
   1938 	switch (sc->sc_type) {
   1939 	case WM_T_82542_2_0:
   1940 	case WM_T_82542_2_1:
   1941 	case WM_T_82543:
   1942 	case WM_T_82544:
   1943 		/* Microwire */
   1944 		sc->sc_nvm_wordsize = 64;
   1945 		sc->sc_nvm_addrbits = 6;
   1946 		break;
   1947 	case WM_T_82540:
   1948 	case WM_T_82545:
   1949 	case WM_T_82545_3:
   1950 	case WM_T_82546:
   1951 	case WM_T_82546_3:
   1952 		/* Microwire */
   1953 		reg = CSR_READ(sc, WMREG_EECD);
   1954 		if (reg & EECD_EE_SIZE) {
   1955 			sc->sc_nvm_wordsize = 256;
   1956 			sc->sc_nvm_addrbits = 8;
   1957 		} else {
   1958 			sc->sc_nvm_wordsize = 64;
   1959 			sc->sc_nvm_addrbits = 6;
   1960 		}
   1961 		sc->sc_flags |= WM_F_LOCK_EECD;
   1962 		break;
   1963 	case WM_T_82541:
   1964 	case WM_T_82541_2:
   1965 	case WM_T_82547:
   1966 	case WM_T_82547_2:
   1967 		sc->sc_flags |= WM_F_LOCK_EECD;
   1968 		reg = CSR_READ(sc, WMREG_EECD);
   1969 		if (reg & EECD_EE_TYPE) {
   1970 			/* SPI */
   1971 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1972 			wm_nvm_set_addrbits_size_eecd(sc);
   1973 		} else {
   1974 			/* Microwire */
   1975 			if ((reg & EECD_EE_ABITS) != 0) {
   1976 				sc->sc_nvm_wordsize = 256;
   1977 				sc->sc_nvm_addrbits = 8;
   1978 			} else {
   1979 				sc->sc_nvm_wordsize = 64;
   1980 				sc->sc_nvm_addrbits = 6;
   1981 			}
   1982 		}
   1983 		break;
   1984 	case WM_T_82571:
   1985 	case WM_T_82572:
   1986 		/* SPI */
   1987 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1988 		wm_nvm_set_addrbits_size_eecd(sc);
   1989 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1990 		sc->phy.acquire = wm_get_swsm_semaphore;
   1991 		sc->phy.release = wm_put_swsm_semaphore;
   1992 		break;
   1993 	case WM_T_82573:
   1994 	case WM_T_82574:
   1995 	case WM_T_82583:
   1996 		if (sc->sc_type == WM_T_82573) {
   1997 			sc->sc_flags |= WM_F_LOCK_SWSM;
   1998 			sc->phy.acquire = wm_get_swsm_semaphore;
   1999 			sc->phy.release = wm_put_swsm_semaphore;
   2000 		} else {
   2001 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2002 			/* Both PHY and NVM use the same semaphore. */
   2003 			sc->phy.acquire
   2004 			    = wm_get_swfwhw_semaphore;
   2005 			sc->phy.release
   2006 			    = wm_put_swfwhw_semaphore;
   2007 		}
   2008 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2009 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2010 			sc->sc_nvm_wordsize = 2048;
   2011 		} else {
   2012 			/* SPI */
   2013 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2014 			wm_nvm_set_addrbits_size_eecd(sc);
   2015 		}
   2016 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2017 		break;
   2018 	case WM_T_82575:
   2019 	case WM_T_82576:
   2020 	case WM_T_82580:
   2021 	case WM_T_I350:
   2022 	case WM_T_I354:
   2023 	case WM_T_80003:
   2024 		/* SPI */
   2025 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2026 		wm_nvm_set_addrbits_size_eecd(sc);
   2027 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2028 		    | WM_F_LOCK_SWSM;
   2029 		sc->phy.acquire = wm_get_phy_82575;
   2030 		sc->phy.release = wm_put_phy_82575;
   2031 		break;
   2032 	case WM_T_ICH8:
   2033 	case WM_T_ICH9:
   2034 	case WM_T_ICH10:
   2035 	case WM_T_PCH:
   2036 	case WM_T_PCH2:
   2037 	case WM_T_PCH_LPT:
   2038 		/* FLASH */
   2039 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2040 		sc->sc_nvm_wordsize = 2048;
   2041 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2042 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2043 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2044 			aprint_error_dev(sc->sc_dev,
   2045 			    "can't map FLASH registers\n");
   2046 			goto out;
   2047 		}
   2048 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2049 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2050 		    ICH_FLASH_SECTOR_SIZE;
   2051 		sc->sc_ich8_flash_bank_size =
   2052 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2053 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2054 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2055 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2056 		sc->sc_flashreg_offset = 0;
   2057 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2058 		sc->phy.release = wm_put_swflag_ich8lan;
   2059 		break;
   2060 	case WM_T_PCH_SPT:
   2061 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2062 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2063 		sc->sc_flasht = sc->sc_st;
   2064 		sc->sc_flashh = sc->sc_sh;
   2065 		sc->sc_ich8_flash_base = 0;
   2066 		sc->sc_nvm_wordsize =
   2067 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2068 			* NVM_SIZE_MULTIPLIER;
   2069 		/* It is size in bytes, we want words */
   2070 		sc->sc_nvm_wordsize /= 2;
   2071 		/* assume 2 banks */
   2072 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2073 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2074 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2075 		sc->phy.release = wm_put_swflag_ich8lan;
   2076 		break;
   2077 	case WM_T_I210:
   2078 	case WM_T_I211:
   2079 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2080 			wm_nvm_set_addrbits_size_eecd(sc);
   2081 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2082 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2083 		} else {
   2084 			sc->sc_nvm_wordsize = INVM_SIZE;
   2085 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2086 		}
   2087 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2088 		sc->phy.acquire = wm_get_phy_82575;
   2089 		sc->phy.release = wm_put_phy_82575;
   2090 		break;
   2091 	default:
   2092 		break;
   2093 	}
   2094 
   2095 	/* Reset the chip to a known state. */
   2096 	wm_reset(sc);
   2097 
   2098 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2099 	switch (sc->sc_type) {
   2100 	case WM_T_82571:
   2101 	case WM_T_82572:
   2102 		reg = CSR_READ(sc, WMREG_SWSM2);
   2103 		if ((reg & SWSM2_LOCK) == 0) {
   2104 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2105 			force_clear_smbi = true;
   2106 		} else
   2107 			force_clear_smbi = false;
   2108 		break;
   2109 	case WM_T_82573:
   2110 	case WM_T_82574:
   2111 	case WM_T_82583:
   2112 		force_clear_smbi = true;
   2113 		break;
   2114 	default:
   2115 		force_clear_smbi = false;
   2116 		break;
   2117 	}
   2118 	if (force_clear_smbi) {
   2119 		reg = CSR_READ(sc, WMREG_SWSM);
   2120 		if ((reg & SWSM_SMBI) != 0)
   2121 			aprint_error_dev(sc->sc_dev,
   2122 			    "Please update the Bootagent\n");
   2123 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2124 	}
   2125 
   2126 	/*
   2127 	 * Defer printing the EEPROM type until after verifying the checksum
   2128 	 * This allows the EEPROM type to be printed correctly in the case
   2129 	 * that no EEPROM is attached.
   2130 	 */
   2131 	/*
   2132 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2133 	 * this for later, so we can fail future reads from the EEPROM.
   2134 	 */
   2135 	if (wm_nvm_validate_checksum(sc)) {
   2136 		/*
   2137 		 * Read twice again because some PCI-e parts fail the
   2138 		 * first check due to the link being in sleep state.
   2139 		 */
   2140 		if (wm_nvm_validate_checksum(sc))
   2141 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2142 	}
   2143 
   2144 	/* Set device properties (macflags) */
   2145 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2146 
   2147 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2148 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2149 	else {
   2150 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2151 		    sc->sc_nvm_wordsize);
   2152 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2153 			aprint_verbose("iNVM");
   2154 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2155 			aprint_verbose("FLASH(HW)");
   2156 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2157 			aprint_verbose("FLASH");
   2158 		else {
   2159 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2160 				eetype = "SPI";
   2161 			else
   2162 				eetype = "MicroWire";
   2163 			aprint_verbose("(%d address bits) %s EEPROM",
   2164 			    sc->sc_nvm_addrbits, eetype);
   2165 		}
   2166 	}
   2167 	wm_nvm_version(sc);
   2168 	aprint_verbose("\n");
   2169 
   2170 	/* Check for I21[01] PLL workaround */
   2171 	if (sc->sc_type == WM_T_I210)
   2172 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2173 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2174 		/* NVM image release 3.25 has a workaround */
   2175 		if ((sc->sc_nvm_ver_major < 3)
   2176 		    || ((sc->sc_nvm_ver_major == 3)
   2177 			&& (sc->sc_nvm_ver_minor < 25))) {
   2178 			aprint_verbose_dev(sc->sc_dev,
   2179 			    "ROM image version %d.%d is older than 3.25\n",
   2180 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2181 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2182 		}
   2183 	}
   2184 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2185 		wm_pll_workaround_i210(sc);
   2186 
   2187 	wm_get_wakeup(sc);
   2188 	switch (sc->sc_type) {
   2189 	case WM_T_82571:
   2190 	case WM_T_82572:
   2191 	case WM_T_82573:
   2192 	case WM_T_82574:
   2193 	case WM_T_82583:
   2194 	case WM_T_80003:
   2195 	case WM_T_ICH8:
   2196 	case WM_T_ICH9:
   2197 	case WM_T_ICH10:
   2198 	case WM_T_PCH:
   2199 	case WM_T_PCH2:
   2200 	case WM_T_PCH_LPT:
   2201 	case WM_T_PCH_SPT:
   2202 		/* Non-AMT based hardware can now take control from firmware */
   2203 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2204 			wm_get_hw_control(sc);
   2205 		break;
   2206 	default:
   2207 		break;
   2208 	}
   2209 
   2210 	/*
   2211 	 * Read the Ethernet address from the EEPROM, if not first found
   2212 	 * in device properties.
   2213 	 */
   2214 	ea = prop_dictionary_get(dict, "mac-address");
   2215 	if (ea != NULL) {
   2216 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2217 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2218 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2219 	} else {
   2220 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2221 			aprint_error_dev(sc->sc_dev,
   2222 			    "unable to read Ethernet address\n");
   2223 			goto out;
   2224 		}
   2225 	}
   2226 
   2227 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2228 	    ether_sprintf(enaddr));
   2229 
   2230 	/*
   2231 	 * Read the config info from the EEPROM, and set up various
   2232 	 * bits in the control registers based on their contents.
   2233 	 */
   2234 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2235 	if (pn != NULL) {
   2236 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2237 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2238 	} else {
   2239 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2240 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2241 			goto out;
   2242 		}
   2243 	}
   2244 
   2245 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2246 	if (pn != NULL) {
   2247 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2248 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2249 	} else {
   2250 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2251 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2252 			goto out;
   2253 		}
   2254 	}
   2255 
   2256 	/* check for WM_F_WOL */
   2257 	switch (sc->sc_type) {
   2258 	case WM_T_82542_2_0:
   2259 	case WM_T_82542_2_1:
   2260 	case WM_T_82543:
   2261 		/* dummy? */
   2262 		eeprom_data = 0;
   2263 		apme_mask = NVM_CFG3_APME;
   2264 		break;
   2265 	case WM_T_82544:
   2266 		apme_mask = NVM_CFG2_82544_APM_EN;
   2267 		eeprom_data = cfg2;
   2268 		break;
   2269 	case WM_T_82546:
   2270 	case WM_T_82546_3:
   2271 	case WM_T_82571:
   2272 	case WM_T_82572:
   2273 	case WM_T_82573:
   2274 	case WM_T_82574:
   2275 	case WM_T_82583:
   2276 	case WM_T_80003:
   2277 	default:
   2278 		apme_mask = NVM_CFG3_APME;
   2279 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2280 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2281 		break;
   2282 	case WM_T_82575:
   2283 	case WM_T_82576:
   2284 	case WM_T_82580:
   2285 	case WM_T_I350:
   2286 	case WM_T_I354: /* XXX ok? */
   2287 	case WM_T_ICH8:
   2288 	case WM_T_ICH9:
   2289 	case WM_T_ICH10:
   2290 	case WM_T_PCH:
   2291 	case WM_T_PCH2:
   2292 	case WM_T_PCH_LPT:
   2293 	case WM_T_PCH_SPT:
   2294 		/* XXX The funcid should be checked on some devices */
   2295 		apme_mask = WUC_APME;
   2296 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2297 		break;
   2298 	}
   2299 
   2300 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2301 	if ((eeprom_data & apme_mask) != 0)
   2302 		sc->sc_flags |= WM_F_WOL;
   2303 #ifdef WM_DEBUG
   2304 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2305 		printf("WOL\n");
   2306 #endif
   2307 
   2308 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2309 		/* Check NVM for autonegotiation */
   2310 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2311 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2312 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2313 		}
   2314 	}
   2315 
   2316 	/*
   2317 	 * XXX need special handling for some multiple port cards
   2318 	 * to disable a paticular port.
   2319 	 */
   2320 
   2321 	if (sc->sc_type >= WM_T_82544) {
   2322 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2323 		if (pn != NULL) {
   2324 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2325 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2326 		} else {
   2327 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2328 				aprint_error_dev(sc->sc_dev,
   2329 				    "unable to read SWDPIN\n");
   2330 				goto out;
   2331 			}
   2332 		}
   2333 	}
   2334 
   2335 	if (cfg1 & NVM_CFG1_ILOS)
   2336 		sc->sc_ctrl |= CTRL_ILOS;
   2337 
   2338 	/*
   2339 	 * XXX
   2340 	 * This code isn't correct because pin 2 and 3 are located
   2341 	 * in different position on newer chips. Check all datasheet.
   2342 	 *
   2343 	 * Until resolve this problem, check if a chip < 82580
   2344 	 */
   2345 	if (sc->sc_type <= WM_T_82580) {
   2346 		if (sc->sc_type >= WM_T_82544) {
   2347 			sc->sc_ctrl |=
   2348 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2349 			    CTRL_SWDPIO_SHIFT;
   2350 			sc->sc_ctrl |=
   2351 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2352 			    CTRL_SWDPINS_SHIFT;
   2353 		} else {
   2354 			sc->sc_ctrl |=
   2355 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2356 			    CTRL_SWDPIO_SHIFT;
   2357 		}
   2358 	}
   2359 
   2360 	/* XXX For other than 82580? */
   2361 	if (sc->sc_type == WM_T_82580) {
   2362 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2363 		if (nvmword & __BIT(13))
   2364 			sc->sc_ctrl |= CTRL_ILOS;
   2365 	}
   2366 
   2367 #if 0
   2368 	if (sc->sc_type >= WM_T_82544) {
   2369 		if (cfg1 & NVM_CFG1_IPS0)
   2370 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2371 		if (cfg1 & NVM_CFG1_IPS1)
   2372 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2373 		sc->sc_ctrl_ext |=
   2374 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2375 		    CTRL_EXT_SWDPIO_SHIFT;
   2376 		sc->sc_ctrl_ext |=
   2377 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2378 		    CTRL_EXT_SWDPINS_SHIFT;
   2379 	} else {
   2380 		sc->sc_ctrl_ext |=
   2381 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2382 		    CTRL_EXT_SWDPIO_SHIFT;
   2383 	}
   2384 #endif
   2385 
   2386 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2387 #if 0
   2388 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2389 #endif
   2390 
   2391 	if (sc->sc_type == WM_T_PCH) {
   2392 		uint16_t val;
   2393 
   2394 		/* Save the NVM K1 bit setting */
   2395 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2396 
   2397 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2398 			sc->sc_nvm_k1_enabled = 1;
   2399 		else
   2400 			sc->sc_nvm_k1_enabled = 0;
   2401 	}
   2402 
   2403 	/*
   2404 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2405 	 * media structures accordingly.
   2406 	 */
   2407 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2408 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2409 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2410 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2411 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2412 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2413 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2414 	} else if (sc->sc_type < WM_T_82543 ||
   2415 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2416 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2417 			aprint_error_dev(sc->sc_dev,
   2418 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2419 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2420 		}
   2421 		wm_tbi_mediainit(sc);
   2422 	} else {
   2423 		switch (sc->sc_type) {
   2424 		case WM_T_82575:
   2425 		case WM_T_82576:
   2426 		case WM_T_82580:
   2427 		case WM_T_I350:
   2428 		case WM_T_I354:
   2429 		case WM_T_I210:
   2430 		case WM_T_I211:
   2431 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2432 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2433 			switch (link_mode) {
   2434 			case CTRL_EXT_LINK_MODE_1000KX:
   2435 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2436 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2437 				break;
   2438 			case CTRL_EXT_LINK_MODE_SGMII:
   2439 				if (wm_sgmii_uses_mdio(sc)) {
   2440 					aprint_verbose_dev(sc->sc_dev,
   2441 					    "SGMII(MDIO)\n");
   2442 					sc->sc_flags |= WM_F_SGMII;
   2443 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2444 					break;
   2445 				}
   2446 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2447 				/*FALLTHROUGH*/
   2448 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2449 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2450 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2451 					if (link_mode
   2452 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2453 						sc->sc_mediatype
   2454 						    = WM_MEDIATYPE_COPPER;
   2455 						sc->sc_flags |= WM_F_SGMII;
   2456 					} else {
   2457 						sc->sc_mediatype
   2458 						    = WM_MEDIATYPE_SERDES;
   2459 						aprint_verbose_dev(sc->sc_dev,
   2460 						    "SERDES\n");
   2461 					}
   2462 					break;
   2463 				}
   2464 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2465 					aprint_verbose_dev(sc->sc_dev,
   2466 					    "SERDES\n");
   2467 
   2468 				/* Change current link mode setting */
   2469 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2470 				switch (sc->sc_mediatype) {
   2471 				case WM_MEDIATYPE_COPPER:
   2472 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2473 					break;
   2474 				case WM_MEDIATYPE_SERDES:
   2475 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2476 					break;
   2477 				default:
   2478 					break;
   2479 				}
   2480 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2481 				break;
   2482 			case CTRL_EXT_LINK_MODE_GMII:
   2483 			default:
   2484 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2485 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2486 				break;
   2487 			}
   2488 
   2489 			reg &= ~CTRL_EXT_I2C_ENA;
   2490 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2491 				reg |= CTRL_EXT_I2C_ENA;
   2492 			else
   2493 				reg &= ~CTRL_EXT_I2C_ENA;
   2494 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2495 
   2496 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2497 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2498 			else
   2499 				wm_tbi_mediainit(sc);
   2500 			break;
   2501 		default:
   2502 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2503 				aprint_error_dev(sc->sc_dev,
   2504 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2505 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2506 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2507 		}
   2508 	}
   2509 
   2510 	ifp = &sc->sc_ethercom.ec_if;
   2511 	xname = device_xname(sc->sc_dev);
   2512 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2513 	ifp->if_softc = sc;
   2514 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2515 	ifp->if_extflags = IFEF_START_MPSAFE;
   2516 	ifp->if_ioctl = wm_ioctl;
   2517 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2518 		ifp->if_start = wm_nq_start;
   2519 		if (sc->sc_nqueues > 1)
   2520 			ifp->if_transmit = wm_nq_transmit;
   2521 	} else
   2522 		ifp->if_start = wm_start;
   2523 	ifp->if_watchdog = wm_watchdog;
   2524 	ifp->if_init = wm_init;
   2525 	ifp->if_stop = wm_stop;
   2526 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2527 	IFQ_SET_READY(&ifp->if_snd);
   2528 
   2529 	/* Check for jumbo frame */
   2530 	switch (sc->sc_type) {
   2531 	case WM_T_82573:
   2532 		/* XXX limited to 9234 if ASPM is disabled */
   2533 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2534 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2535 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2536 		break;
   2537 	case WM_T_82571:
   2538 	case WM_T_82572:
   2539 	case WM_T_82574:
   2540 	case WM_T_82575:
   2541 	case WM_T_82576:
   2542 	case WM_T_82580:
   2543 	case WM_T_I350:
   2544 	case WM_T_I354: /* XXXX ok? */
   2545 	case WM_T_I210:
   2546 	case WM_T_I211:
   2547 	case WM_T_80003:
   2548 	case WM_T_ICH9:
   2549 	case WM_T_ICH10:
   2550 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2551 	case WM_T_PCH_LPT:
   2552 	case WM_T_PCH_SPT:
   2553 		/* XXX limited to 9234 */
   2554 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2555 		break;
   2556 	case WM_T_PCH:
   2557 		/* XXX limited to 4096 */
   2558 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2559 		break;
   2560 	case WM_T_82542_2_0:
   2561 	case WM_T_82542_2_1:
   2562 	case WM_T_82583:
   2563 	case WM_T_ICH8:
   2564 		/* No support for jumbo frame */
   2565 		break;
   2566 	default:
   2567 		/* ETHER_MAX_LEN_JUMBO */
   2568 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2569 		break;
   2570 	}
   2571 
   2572 	/* If we're a i82543 or greater, we can support VLANs. */
   2573 	if (sc->sc_type >= WM_T_82543)
   2574 		sc->sc_ethercom.ec_capabilities |=
   2575 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2576 
   2577 	/*
   2578 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2579 	 * on i82543 and later.
   2580 	 */
   2581 	if (sc->sc_type >= WM_T_82543) {
   2582 		ifp->if_capabilities |=
   2583 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2584 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2585 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2586 		    IFCAP_CSUM_TCPv6_Tx |
   2587 		    IFCAP_CSUM_UDPv6_Tx;
   2588 	}
   2589 
   2590 	/*
   2591 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2592 	 *
   2593 	 *	82541GI (8086:1076) ... no
   2594 	 *	82572EI (8086:10b9) ... yes
   2595 	 */
   2596 	if (sc->sc_type >= WM_T_82571) {
   2597 		ifp->if_capabilities |=
   2598 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2599 	}
   2600 
   2601 	/*
   2602 	 * If we're a i82544 or greater (except i82547), we can do
   2603 	 * TCP segmentation offload.
   2604 	 */
   2605 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2606 		ifp->if_capabilities |= IFCAP_TSOv4;
   2607 	}
   2608 
   2609 	if (sc->sc_type >= WM_T_82571) {
   2610 		ifp->if_capabilities |= IFCAP_TSOv6;
   2611 	}
   2612 
   2613 #ifdef WM_MPSAFE
   2614 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2615 #else
   2616 	sc->sc_core_lock = NULL;
   2617 #endif
   2618 
   2619 	/* Attach the interface. */
   2620 	if_initialize(ifp);
   2621 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2622 	ether_ifattach(ifp, enaddr);
   2623 	if_register(ifp);
   2624 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2625 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2626 			  RND_FLAG_DEFAULT);
   2627 
   2628 #ifdef WM_EVENT_COUNTERS
   2629 	/* Attach event counters. */
   2630 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2631 	    NULL, xname, "linkintr");
   2632 
   2633 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2634 	    NULL, xname, "tx_xoff");
   2635 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2636 	    NULL, xname, "tx_xon");
   2637 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2638 	    NULL, xname, "rx_xoff");
   2639 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2640 	    NULL, xname, "rx_xon");
   2641 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2642 	    NULL, xname, "rx_macctl");
   2643 #endif /* WM_EVENT_COUNTERS */
   2644 
   2645 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2646 		pmf_class_network_register(self, ifp);
   2647 	else
   2648 		aprint_error_dev(self, "couldn't establish power handler\n");
   2649 
   2650 	sc->sc_flags |= WM_F_ATTACHED;
   2651  out:
   2652 	return;
   2653 }
   2654 
   2655 /* The detach function (ca_detach) */
   2656 static int
   2657 wm_detach(device_t self, int flags __unused)
   2658 {
   2659 	struct wm_softc *sc = device_private(self);
   2660 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2661 	int i;
   2662 
   2663 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2664 		return 0;
   2665 
   2666 	/* Stop the interface. Callouts are stopped in it. */
   2667 	wm_stop(ifp, 1);
   2668 
   2669 	pmf_device_deregister(self);
   2670 
   2671 	/* Tell the firmware about the release */
   2672 	WM_CORE_LOCK(sc);
   2673 	wm_release_manageability(sc);
   2674 	wm_release_hw_control(sc);
   2675 	WM_CORE_UNLOCK(sc);
   2676 
   2677 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2678 
   2679 	/* Delete all remaining media. */
   2680 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2681 
   2682 	ether_ifdetach(ifp);
   2683 	if_detach(ifp);
   2684 	if_percpuq_destroy(sc->sc_ipq);
   2685 
   2686 	/* Unload RX dmamaps and free mbufs */
   2687 	for (i = 0; i < sc->sc_nqueues; i++) {
   2688 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2689 		mutex_enter(rxq->rxq_lock);
   2690 		wm_rxdrain(rxq);
   2691 		mutex_exit(rxq->rxq_lock);
   2692 	}
   2693 	/* Must unlock here */
   2694 
   2695 	/* Disestablish the interrupt handler */
   2696 	for (i = 0; i < sc->sc_nintrs; i++) {
   2697 		if (sc->sc_ihs[i] != NULL) {
   2698 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2699 			sc->sc_ihs[i] = NULL;
   2700 		}
   2701 	}
   2702 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2703 
   2704 	wm_free_txrx_queues(sc);
   2705 
   2706 	/* Unmap the registers */
   2707 	if (sc->sc_ss) {
   2708 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2709 		sc->sc_ss = 0;
   2710 	}
   2711 	if (sc->sc_ios) {
   2712 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2713 		sc->sc_ios = 0;
   2714 	}
   2715 	if (sc->sc_flashs) {
   2716 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2717 		sc->sc_flashs = 0;
   2718 	}
   2719 
   2720 	if (sc->sc_core_lock)
   2721 		mutex_obj_free(sc->sc_core_lock);
   2722 	if (sc->sc_ich_phymtx)
   2723 		mutex_obj_free(sc->sc_ich_phymtx);
   2724 	if (sc->sc_ich_nvmmtx)
   2725 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2726 
   2727 	return 0;
   2728 }
   2729 
   2730 static bool
   2731 wm_suspend(device_t self, const pmf_qual_t *qual)
   2732 {
   2733 	struct wm_softc *sc = device_private(self);
   2734 
   2735 	wm_release_manageability(sc);
   2736 	wm_release_hw_control(sc);
   2737 #ifdef WM_WOL
   2738 	wm_enable_wakeup(sc);
   2739 #endif
   2740 
   2741 	return true;
   2742 }
   2743 
   2744 static bool
   2745 wm_resume(device_t self, const pmf_qual_t *qual)
   2746 {
   2747 	struct wm_softc *sc = device_private(self);
   2748 
   2749 	wm_init_manageability(sc);
   2750 
   2751 	return true;
   2752 }
   2753 
   2754 /*
   2755  * wm_watchdog:		[ifnet interface function]
   2756  *
   2757  *	Watchdog timer handler.
   2758  */
   2759 static void
   2760 wm_watchdog(struct ifnet *ifp)
   2761 {
   2762 	int qid;
   2763 	struct wm_softc *sc = ifp->if_softc;
   2764 
   2765 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2766 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2767 
   2768 		wm_watchdog_txq(ifp, txq);
   2769 	}
   2770 
   2771 	/* Reset the interface. */
   2772 	(void) wm_init(ifp);
   2773 
   2774 	/*
   2775 	 * There are still some upper layer processing which call
   2776 	 * ifp->if_start(). e.g. ALTQ
   2777 	 */
   2778 	/* Try to get more packets going. */
   2779 	ifp->if_start(ifp);
   2780 }
   2781 
   2782 static void
   2783 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2784 {
   2785 	struct wm_softc *sc = ifp->if_softc;
   2786 
   2787 	/*
   2788 	 * Since we're using delayed interrupts, sweep up
   2789 	 * before we report an error.
   2790 	 */
   2791 	mutex_enter(txq->txq_lock);
   2792 	wm_txeof(sc, txq);
   2793 	mutex_exit(txq->txq_lock);
   2794 
   2795 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2796 #ifdef WM_DEBUG
   2797 		int i, j;
   2798 		struct wm_txsoft *txs;
   2799 #endif
   2800 		log(LOG_ERR,
   2801 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2802 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2803 		    txq->txq_next);
   2804 		ifp->if_oerrors++;
   2805 #ifdef WM_DEBUG
   2806 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2807 		    i = WM_NEXTTXS(txq, i)) {
   2808 		    txs = &txq->txq_soft[i];
   2809 		    printf("txs %d tx %d -> %d\n",
   2810 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2811 		    for (j = txs->txs_firstdesc; ;
   2812 			j = WM_NEXTTX(txq, j)) {
   2813 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2814 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2815 			printf("\t %#08x%08x\n",
   2816 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2817 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2818 			if (j == txs->txs_lastdesc)
   2819 				break;
   2820 			}
   2821 		}
   2822 #endif
   2823 	}
   2824 }
   2825 
   2826 /*
   2827  * wm_tick:
   2828  *
   2829  *	One second timer, used to check link status, sweep up
   2830  *	completed transmit jobs, etc.
   2831  */
   2832 static void
   2833 wm_tick(void *arg)
   2834 {
   2835 	struct wm_softc *sc = arg;
   2836 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2837 #ifndef WM_MPSAFE
   2838 	int s = splnet();
   2839 #endif
   2840 
   2841 	WM_CORE_LOCK(sc);
   2842 
   2843 	if (sc->sc_stopping)
   2844 		goto out;
   2845 
   2846 	if (sc->sc_type >= WM_T_82542_2_1) {
   2847 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2848 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2849 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2850 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2851 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2852 	}
   2853 
   2854 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2855 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2856 	    + CSR_READ(sc, WMREG_CRCERRS)
   2857 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2858 	    + CSR_READ(sc, WMREG_SYMERRC)
   2859 	    + CSR_READ(sc, WMREG_RXERRC)
   2860 	    + CSR_READ(sc, WMREG_SEC)
   2861 	    + CSR_READ(sc, WMREG_CEXTERR)
   2862 	    + CSR_READ(sc, WMREG_RLEC);
   2863 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2864 
   2865 	if (sc->sc_flags & WM_F_HAS_MII)
   2866 		mii_tick(&sc->sc_mii);
   2867 	else if ((sc->sc_type >= WM_T_82575)
   2868 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2869 		wm_serdes_tick(sc);
   2870 	else
   2871 		wm_tbi_tick(sc);
   2872 
   2873 out:
   2874 	WM_CORE_UNLOCK(sc);
   2875 #ifndef WM_MPSAFE
   2876 	splx(s);
   2877 #endif
   2878 
   2879 	if (!sc->sc_stopping)
   2880 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2881 }
   2882 
   2883 static int
   2884 wm_ifflags_cb(struct ethercom *ec)
   2885 {
   2886 	struct ifnet *ifp = &ec->ec_if;
   2887 	struct wm_softc *sc = ifp->if_softc;
   2888 	int rc = 0;
   2889 
   2890 	WM_CORE_LOCK(sc);
   2891 
   2892 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2893 	sc->sc_if_flags = ifp->if_flags;
   2894 
   2895 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2896 		rc = ENETRESET;
   2897 		goto out;
   2898 	}
   2899 
   2900 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2901 		wm_set_filter(sc);
   2902 
   2903 	wm_set_vlan(sc);
   2904 
   2905 out:
   2906 	WM_CORE_UNLOCK(sc);
   2907 
   2908 	return rc;
   2909 }
   2910 
   2911 /*
   2912  * wm_ioctl:		[ifnet interface function]
   2913  *
   2914  *	Handle control requests from the operator.
   2915  */
   2916 static int
   2917 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2918 {
   2919 	struct wm_softc *sc = ifp->if_softc;
   2920 	struct ifreq *ifr = (struct ifreq *) data;
   2921 	struct ifaddr *ifa = (struct ifaddr *)data;
   2922 	struct sockaddr_dl *sdl;
   2923 	int s, error;
   2924 
   2925 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2926 		device_xname(sc->sc_dev), __func__));
   2927 
   2928 #ifndef WM_MPSAFE
   2929 	s = splnet();
   2930 #endif
   2931 	switch (cmd) {
   2932 	case SIOCSIFMEDIA:
   2933 	case SIOCGIFMEDIA:
   2934 		WM_CORE_LOCK(sc);
   2935 		/* Flow control requires full-duplex mode. */
   2936 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2937 		    (ifr->ifr_media & IFM_FDX) == 0)
   2938 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2939 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2940 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2941 				/* We can do both TXPAUSE and RXPAUSE. */
   2942 				ifr->ifr_media |=
   2943 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2944 			}
   2945 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2946 		}
   2947 		WM_CORE_UNLOCK(sc);
   2948 #ifdef WM_MPSAFE
   2949 		s = splnet();
   2950 #endif
   2951 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2952 #ifdef WM_MPSAFE
   2953 		splx(s);
   2954 #endif
   2955 		break;
   2956 	case SIOCINITIFADDR:
   2957 		WM_CORE_LOCK(sc);
   2958 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2959 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2960 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2961 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2962 			/* unicast address is first multicast entry */
   2963 			wm_set_filter(sc);
   2964 			error = 0;
   2965 			WM_CORE_UNLOCK(sc);
   2966 			break;
   2967 		}
   2968 		WM_CORE_UNLOCK(sc);
   2969 		/*FALLTHROUGH*/
   2970 	default:
   2971 #ifdef WM_MPSAFE
   2972 		s = splnet();
   2973 #endif
   2974 		/* It may call wm_start, so unlock here */
   2975 		error = ether_ioctl(ifp, cmd, data);
   2976 #ifdef WM_MPSAFE
   2977 		splx(s);
   2978 #endif
   2979 		if (error != ENETRESET)
   2980 			break;
   2981 
   2982 		error = 0;
   2983 
   2984 		if (cmd == SIOCSIFCAP) {
   2985 			error = (*ifp->if_init)(ifp);
   2986 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2987 			;
   2988 		else if (ifp->if_flags & IFF_RUNNING) {
   2989 			/*
   2990 			 * Multicast list has changed; set the hardware filter
   2991 			 * accordingly.
   2992 			 */
   2993 			WM_CORE_LOCK(sc);
   2994 			wm_set_filter(sc);
   2995 			WM_CORE_UNLOCK(sc);
   2996 		}
   2997 		break;
   2998 	}
   2999 
   3000 #ifndef WM_MPSAFE
   3001 	splx(s);
   3002 #endif
   3003 	return error;
   3004 }
   3005 
   3006 /* MAC address related */
   3007 
   3008 /*
   3009  * Get the offset of MAC address and return it.
   3010  * If error occured, use offset 0.
   3011  */
   3012 static uint16_t
   3013 wm_check_alt_mac_addr(struct wm_softc *sc)
   3014 {
   3015 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3016 	uint16_t offset = NVM_OFF_MACADDR;
   3017 
   3018 	/* Try to read alternative MAC address pointer */
   3019 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3020 		return 0;
   3021 
   3022 	/* Check pointer if it's valid or not. */
   3023 	if ((offset == 0x0000) || (offset == 0xffff))
   3024 		return 0;
   3025 
   3026 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3027 	/*
   3028 	 * Check whether alternative MAC address is valid or not.
   3029 	 * Some cards have non 0xffff pointer but those don't use
   3030 	 * alternative MAC address in reality.
   3031 	 *
   3032 	 * Check whether the broadcast bit is set or not.
   3033 	 */
   3034 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3035 		if (((myea[0] & 0xff) & 0x01) == 0)
   3036 			return offset; /* Found */
   3037 
   3038 	/* Not found */
   3039 	return 0;
   3040 }
   3041 
   3042 static int
   3043 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3044 {
   3045 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3046 	uint16_t offset = NVM_OFF_MACADDR;
   3047 	int do_invert = 0;
   3048 
   3049 	switch (sc->sc_type) {
   3050 	case WM_T_82580:
   3051 	case WM_T_I350:
   3052 	case WM_T_I354:
   3053 		/* EEPROM Top Level Partitioning */
   3054 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3055 		break;
   3056 	case WM_T_82571:
   3057 	case WM_T_82575:
   3058 	case WM_T_82576:
   3059 	case WM_T_80003:
   3060 	case WM_T_I210:
   3061 	case WM_T_I211:
   3062 		offset = wm_check_alt_mac_addr(sc);
   3063 		if (offset == 0)
   3064 			if ((sc->sc_funcid & 0x01) == 1)
   3065 				do_invert = 1;
   3066 		break;
   3067 	default:
   3068 		if ((sc->sc_funcid & 0x01) == 1)
   3069 			do_invert = 1;
   3070 		break;
   3071 	}
   3072 
   3073 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3074 		goto bad;
   3075 
   3076 	enaddr[0] = myea[0] & 0xff;
   3077 	enaddr[1] = myea[0] >> 8;
   3078 	enaddr[2] = myea[1] & 0xff;
   3079 	enaddr[3] = myea[1] >> 8;
   3080 	enaddr[4] = myea[2] & 0xff;
   3081 	enaddr[5] = myea[2] >> 8;
   3082 
   3083 	/*
   3084 	 * Toggle the LSB of the MAC address on the second port
   3085 	 * of some dual port cards.
   3086 	 */
   3087 	if (do_invert != 0)
   3088 		enaddr[5] ^= 1;
   3089 
   3090 	return 0;
   3091 
   3092  bad:
   3093 	return -1;
   3094 }
   3095 
   3096 /*
   3097  * wm_set_ral:
   3098  *
   3099  *	Set an entery in the receive address list.
   3100  */
   3101 static void
   3102 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3103 {
   3104 	uint32_t ral_lo, ral_hi;
   3105 
   3106 	if (enaddr != NULL) {
   3107 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3108 		    (enaddr[3] << 24);
   3109 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3110 		ral_hi |= RAL_AV;
   3111 	} else {
   3112 		ral_lo = 0;
   3113 		ral_hi = 0;
   3114 	}
   3115 
   3116 	if (sc->sc_type >= WM_T_82544) {
   3117 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3118 		    ral_lo);
   3119 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3120 		    ral_hi);
   3121 	} else {
   3122 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3123 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3124 	}
   3125 }
   3126 
   3127 /*
   3128  * wm_mchash:
   3129  *
   3130  *	Compute the hash of the multicast address for the 4096-bit
   3131  *	multicast filter.
   3132  */
   3133 static uint32_t
   3134 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3135 {
   3136 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3137 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3138 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3139 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3140 	uint32_t hash;
   3141 
   3142 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3143 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3144 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3145 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3146 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3147 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3148 		return (hash & 0x3ff);
   3149 	}
   3150 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3151 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3152 
   3153 	return (hash & 0xfff);
   3154 }
   3155 
   3156 /*
   3157  * wm_set_filter:
   3158  *
   3159  *	Set up the receive filter.
   3160  */
   3161 static void
   3162 wm_set_filter(struct wm_softc *sc)
   3163 {
   3164 	struct ethercom *ec = &sc->sc_ethercom;
   3165 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3166 	struct ether_multi *enm;
   3167 	struct ether_multistep step;
   3168 	bus_addr_t mta_reg;
   3169 	uint32_t hash, reg, bit;
   3170 	int i, size, ralmax;
   3171 
   3172 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3173 		device_xname(sc->sc_dev), __func__));
   3174 
   3175 	if (sc->sc_type >= WM_T_82544)
   3176 		mta_reg = WMREG_CORDOVA_MTA;
   3177 	else
   3178 		mta_reg = WMREG_MTA;
   3179 
   3180 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3181 
   3182 	if (ifp->if_flags & IFF_BROADCAST)
   3183 		sc->sc_rctl |= RCTL_BAM;
   3184 	if (ifp->if_flags & IFF_PROMISC) {
   3185 		sc->sc_rctl |= RCTL_UPE;
   3186 		goto allmulti;
   3187 	}
   3188 
   3189 	/*
   3190 	 * Set the station address in the first RAL slot, and
   3191 	 * clear the remaining slots.
   3192 	 */
   3193 	if (sc->sc_type == WM_T_ICH8)
   3194 		size = WM_RAL_TABSIZE_ICH8 -1;
   3195 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3196 	    || (sc->sc_type == WM_T_PCH))
   3197 		size = WM_RAL_TABSIZE_ICH8;
   3198 	else if (sc->sc_type == WM_T_PCH2)
   3199 		size = WM_RAL_TABSIZE_PCH2;
   3200 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3201 		size = WM_RAL_TABSIZE_PCH_LPT;
   3202 	else if (sc->sc_type == WM_T_82575)
   3203 		size = WM_RAL_TABSIZE_82575;
   3204 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3205 		size = WM_RAL_TABSIZE_82576;
   3206 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3207 		size = WM_RAL_TABSIZE_I350;
   3208 	else
   3209 		size = WM_RAL_TABSIZE;
   3210 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3211 
   3212 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3213 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3214 		switch (i) {
   3215 		case 0:
   3216 			/* We can use all entries */
   3217 			ralmax = size;
   3218 			break;
   3219 		case 1:
   3220 			/* Only RAR[0] */
   3221 			ralmax = 1;
   3222 			break;
   3223 		default:
   3224 			/* available SHRA + RAR[0] */
   3225 			ralmax = i + 1;
   3226 		}
   3227 	} else
   3228 		ralmax = size;
   3229 	for (i = 1; i < size; i++) {
   3230 		if (i < ralmax)
   3231 			wm_set_ral(sc, NULL, i);
   3232 	}
   3233 
   3234 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3235 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3236 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3237 	    || (sc->sc_type == WM_T_PCH_SPT))
   3238 		size = WM_ICH8_MC_TABSIZE;
   3239 	else
   3240 		size = WM_MC_TABSIZE;
   3241 	/* Clear out the multicast table. */
   3242 	for (i = 0; i < size; i++)
   3243 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3244 
   3245 	ETHER_FIRST_MULTI(step, ec, enm);
   3246 	while (enm != NULL) {
   3247 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3248 			/*
   3249 			 * We must listen to a range of multicast addresses.
   3250 			 * For now, just accept all multicasts, rather than
   3251 			 * trying to set only those filter bits needed to match
   3252 			 * the range.  (At this time, the only use of address
   3253 			 * ranges is for IP multicast routing, for which the
   3254 			 * range is big enough to require all bits set.)
   3255 			 */
   3256 			goto allmulti;
   3257 		}
   3258 
   3259 		hash = wm_mchash(sc, enm->enm_addrlo);
   3260 
   3261 		reg = (hash >> 5);
   3262 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3263 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3264 		    || (sc->sc_type == WM_T_PCH2)
   3265 		    || (sc->sc_type == WM_T_PCH_LPT)
   3266 		    || (sc->sc_type == WM_T_PCH_SPT))
   3267 			reg &= 0x1f;
   3268 		else
   3269 			reg &= 0x7f;
   3270 		bit = hash & 0x1f;
   3271 
   3272 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3273 		hash |= 1U << bit;
   3274 
   3275 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3276 			/*
   3277 			 * 82544 Errata 9: Certain register cannot be written
   3278 			 * with particular alignments in PCI-X bus operation
   3279 			 * (FCAH, MTA and VFTA).
   3280 			 */
   3281 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3282 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3283 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3284 		} else
   3285 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3286 
   3287 		ETHER_NEXT_MULTI(step, enm);
   3288 	}
   3289 
   3290 	ifp->if_flags &= ~IFF_ALLMULTI;
   3291 	goto setit;
   3292 
   3293  allmulti:
   3294 	ifp->if_flags |= IFF_ALLMULTI;
   3295 	sc->sc_rctl |= RCTL_MPE;
   3296 
   3297  setit:
   3298 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3299 }
   3300 
   3301 /* Reset and init related */
   3302 
   3303 static void
   3304 wm_set_vlan(struct wm_softc *sc)
   3305 {
   3306 
   3307 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3308 		device_xname(sc->sc_dev), __func__));
   3309 
   3310 	/* Deal with VLAN enables. */
   3311 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3312 		sc->sc_ctrl |= CTRL_VME;
   3313 	else
   3314 		sc->sc_ctrl &= ~CTRL_VME;
   3315 
   3316 	/* Write the control registers. */
   3317 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3318 }
   3319 
   3320 static void
   3321 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3322 {
   3323 	uint32_t gcr;
   3324 	pcireg_t ctrl2;
   3325 
   3326 	gcr = CSR_READ(sc, WMREG_GCR);
   3327 
   3328 	/* Only take action if timeout value is defaulted to 0 */
   3329 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3330 		goto out;
   3331 
   3332 	if ((gcr & GCR_CAP_VER2) == 0) {
   3333 		gcr |= GCR_CMPL_TMOUT_10MS;
   3334 		goto out;
   3335 	}
   3336 
   3337 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3338 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3339 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3340 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3341 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3342 
   3343 out:
   3344 	/* Disable completion timeout resend */
   3345 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3346 
   3347 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3348 }
   3349 
   3350 void
   3351 wm_get_auto_rd_done(struct wm_softc *sc)
   3352 {
   3353 	int i;
   3354 
   3355 	/* wait for eeprom to reload */
   3356 	switch (sc->sc_type) {
   3357 	case WM_T_82571:
   3358 	case WM_T_82572:
   3359 	case WM_T_82573:
   3360 	case WM_T_82574:
   3361 	case WM_T_82583:
   3362 	case WM_T_82575:
   3363 	case WM_T_82576:
   3364 	case WM_T_82580:
   3365 	case WM_T_I350:
   3366 	case WM_T_I354:
   3367 	case WM_T_I210:
   3368 	case WM_T_I211:
   3369 	case WM_T_80003:
   3370 	case WM_T_ICH8:
   3371 	case WM_T_ICH9:
   3372 		for (i = 0; i < 10; i++) {
   3373 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3374 				break;
   3375 			delay(1000);
   3376 		}
   3377 		if (i == 10) {
   3378 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3379 			    "complete\n", device_xname(sc->sc_dev));
   3380 		}
   3381 		break;
   3382 	default:
   3383 		break;
   3384 	}
   3385 }
   3386 
   3387 void
   3388 wm_lan_init_done(struct wm_softc *sc)
   3389 {
   3390 	uint32_t reg = 0;
   3391 	int i;
   3392 
   3393 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3394 		device_xname(sc->sc_dev), __func__));
   3395 
   3396 	/* Wait for eeprom to reload */
   3397 	switch (sc->sc_type) {
   3398 	case WM_T_ICH10:
   3399 	case WM_T_PCH:
   3400 	case WM_T_PCH2:
   3401 	case WM_T_PCH_LPT:
   3402 	case WM_T_PCH_SPT:
   3403 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3404 			reg = CSR_READ(sc, WMREG_STATUS);
   3405 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3406 				break;
   3407 			delay(100);
   3408 		}
   3409 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3410 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3411 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3412 		}
   3413 		break;
   3414 	default:
   3415 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3416 		    __func__);
   3417 		break;
   3418 	}
   3419 
   3420 	reg &= ~STATUS_LAN_INIT_DONE;
   3421 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3422 }
   3423 
   3424 void
   3425 wm_get_cfg_done(struct wm_softc *sc)
   3426 {
   3427 	int mask;
   3428 	uint32_t reg;
   3429 	int i;
   3430 
   3431 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3432 		device_xname(sc->sc_dev), __func__));
   3433 
   3434 	/* Wait for eeprom to reload */
   3435 	switch (sc->sc_type) {
   3436 	case WM_T_82542_2_0:
   3437 	case WM_T_82542_2_1:
   3438 		/* null */
   3439 		break;
   3440 	case WM_T_82543:
   3441 	case WM_T_82544:
   3442 	case WM_T_82540:
   3443 	case WM_T_82545:
   3444 	case WM_T_82545_3:
   3445 	case WM_T_82546:
   3446 	case WM_T_82546_3:
   3447 	case WM_T_82541:
   3448 	case WM_T_82541_2:
   3449 	case WM_T_82547:
   3450 	case WM_T_82547_2:
   3451 	case WM_T_82573:
   3452 	case WM_T_82574:
   3453 	case WM_T_82583:
   3454 		/* generic */
   3455 		delay(10*1000);
   3456 		break;
   3457 	case WM_T_80003:
   3458 	case WM_T_82571:
   3459 	case WM_T_82572:
   3460 	case WM_T_82575:
   3461 	case WM_T_82576:
   3462 	case WM_T_82580:
   3463 	case WM_T_I350:
   3464 	case WM_T_I354:
   3465 	case WM_T_I210:
   3466 	case WM_T_I211:
   3467 		if (sc->sc_type == WM_T_82571) {
   3468 			/* Only 82571 shares port 0 */
   3469 			mask = EEMNGCTL_CFGDONE_0;
   3470 		} else
   3471 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3472 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3473 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3474 				break;
   3475 			delay(1000);
   3476 		}
   3477 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3478 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3479 				device_xname(sc->sc_dev), __func__));
   3480 		}
   3481 		break;
   3482 	case WM_T_ICH8:
   3483 	case WM_T_ICH9:
   3484 	case WM_T_ICH10:
   3485 	case WM_T_PCH:
   3486 	case WM_T_PCH2:
   3487 	case WM_T_PCH_LPT:
   3488 	case WM_T_PCH_SPT:
   3489 		delay(10*1000);
   3490 		if (sc->sc_type >= WM_T_ICH10)
   3491 			wm_lan_init_done(sc);
   3492 		else
   3493 			wm_get_auto_rd_done(sc);
   3494 
   3495 		reg = CSR_READ(sc, WMREG_STATUS);
   3496 		if ((reg & STATUS_PHYRA) != 0)
   3497 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3498 		break;
   3499 	default:
   3500 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3501 		    __func__);
   3502 		break;
   3503 	}
   3504 }
   3505 
   3506 /* Init hardware bits */
   3507 void
   3508 wm_initialize_hardware_bits(struct wm_softc *sc)
   3509 {
   3510 	uint32_t tarc0, tarc1, reg;
   3511 
   3512 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3513 		device_xname(sc->sc_dev), __func__));
   3514 
   3515 	/* For 82571 variant, 80003 and ICHs */
   3516 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3517 	    || (sc->sc_type >= WM_T_80003)) {
   3518 
   3519 		/* Transmit Descriptor Control 0 */
   3520 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3521 		reg |= TXDCTL_COUNT_DESC;
   3522 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3523 
   3524 		/* Transmit Descriptor Control 1 */
   3525 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3526 		reg |= TXDCTL_COUNT_DESC;
   3527 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3528 
   3529 		/* TARC0 */
   3530 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3531 		switch (sc->sc_type) {
   3532 		case WM_T_82571:
   3533 		case WM_T_82572:
   3534 		case WM_T_82573:
   3535 		case WM_T_82574:
   3536 		case WM_T_82583:
   3537 		case WM_T_80003:
   3538 			/* Clear bits 30..27 */
   3539 			tarc0 &= ~__BITS(30, 27);
   3540 			break;
   3541 		default:
   3542 			break;
   3543 		}
   3544 
   3545 		switch (sc->sc_type) {
   3546 		case WM_T_82571:
   3547 		case WM_T_82572:
   3548 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3549 
   3550 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3551 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3552 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3553 			/* 8257[12] Errata No.7 */
   3554 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3555 
   3556 			/* TARC1 bit 28 */
   3557 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3558 				tarc1 &= ~__BIT(28);
   3559 			else
   3560 				tarc1 |= __BIT(28);
   3561 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3562 
   3563 			/*
   3564 			 * 8257[12] Errata No.13
   3565 			 * Disable Dyamic Clock Gating.
   3566 			 */
   3567 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3568 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3569 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3570 			break;
   3571 		case WM_T_82573:
   3572 		case WM_T_82574:
   3573 		case WM_T_82583:
   3574 			if ((sc->sc_type == WM_T_82574)
   3575 			    || (sc->sc_type == WM_T_82583))
   3576 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3577 
   3578 			/* Extended Device Control */
   3579 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3580 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3581 			reg |= __BIT(22);	/* Set bit 22 */
   3582 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3583 
   3584 			/* Device Control */
   3585 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3586 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3587 
   3588 			/* PCIe Control Register */
   3589 			/*
   3590 			 * 82573 Errata (unknown).
   3591 			 *
   3592 			 * 82574 Errata 25 and 82583 Errata 12
   3593 			 * "Dropped Rx Packets":
   3594 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3595 			 */
   3596 			reg = CSR_READ(sc, WMREG_GCR);
   3597 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3598 			CSR_WRITE(sc, WMREG_GCR, reg);
   3599 
   3600 			if ((sc->sc_type == WM_T_82574)
   3601 			    || (sc->sc_type == WM_T_82583)) {
   3602 				/*
   3603 				 * Document says this bit must be set for
   3604 				 * proper operation.
   3605 				 */
   3606 				reg = CSR_READ(sc, WMREG_GCR);
   3607 				reg |= __BIT(22);
   3608 				CSR_WRITE(sc, WMREG_GCR, reg);
   3609 
   3610 				/*
   3611 				 * Apply workaround for hardware errata
   3612 				 * documented in errata docs Fixes issue where
   3613 				 * some error prone or unreliable PCIe
   3614 				 * completions are occurring, particularly
   3615 				 * with ASPM enabled. Without fix, issue can
   3616 				 * cause Tx timeouts.
   3617 				 */
   3618 				reg = CSR_READ(sc, WMREG_GCR2);
   3619 				reg |= __BIT(0);
   3620 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3621 			}
   3622 			break;
   3623 		case WM_T_80003:
   3624 			/* TARC0 */
   3625 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3626 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3627 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3628 
   3629 			/* TARC1 bit 28 */
   3630 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3631 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3632 				tarc1 &= ~__BIT(28);
   3633 			else
   3634 				tarc1 |= __BIT(28);
   3635 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3636 			break;
   3637 		case WM_T_ICH8:
   3638 		case WM_T_ICH9:
   3639 		case WM_T_ICH10:
   3640 		case WM_T_PCH:
   3641 		case WM_T_PCH2:
   3642 		case WM_T_PCH_LPT:
   3643 		case WM_T_PCH_SPT:
   3644 			/* TARC0 */
   3645 			if ((sc->sc_type == WM_T_ICH8)
   3646 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3647 				/* Set TARC0 bits 29 and 28 */
   3648 				tarc0 |= __BITS(29, 28);
   3649 			}
   3650 			/* Set TARC0 bits 23,24,26,27 */
   3651 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3652 
   3653 			/* CTRL_EXT */
   3654 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3655 			reg |= __BIT(22);	/* Set bit 22 */
   3656 			/*
   3657 			 * Enable PHY low-power state when MAC is at D3
   3658 			 * w/o WoL
   3659 			 */
   3660 			if (sc->sc_type >= WM_T_PCH)
   3661 				reg |= CTRL_EXT_PHYPDEN;
   3662 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3663 
   3664 			/* TARC1 */
   3665 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3666 			/* bit 28 */
   3667 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3668 				tarc1 &= ~__BIT(28);
   3669 			else
   3670 				tarc1 |= __BIT(28);
   3671 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3672 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3673 
   3674 			/* Device Status */
   3675 			if (sc->sc_type == WM_T_ICH8) {
   3676 				reg = CSR_READ(sc, WMREG_STATUS);
   3677 				reg &= ~__BIT(31);
   3678 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3679 
   3680 			}
   3681 
   3682 			/* IOSFPC */
   3683 			if (sc->sc_type == WM_T_PCH_SPT) {
   3684 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3685 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3686 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3687 			}
   3688 			/*
   3689 			 * Work-around descriptor data corruption issue during
   3690 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3691 			 * capability.
   3692 			 */
   3693 			reg = CSR_READ(sc, WMREG_RFCTL);
   3694 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3695 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3696 			break;
   3697 		default:
   3698 			break;
   3699 		}
   3700 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3701 
   3702 		/*
   3703 		 * 8257[12] Errata No.52 and some others.
   3704 		 * Avoid RSS Hash Value bug.
   3705 		 */
   3706 		switch (sc->sc_type) {
   3707 		case WM_T_82571:
   3708 		case WM_T_82572:
   3709 		case WM_T_82573:
   3710 		case WM_T_80003:
   3711 		case WM_T_ICH8:
   3712 			reg = CSR_READ(sc, WMREG_RFCTL);
   3713 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3714 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3715 			break;
   3716 		default:
   3717 			break;
   3718 		}
   3719 	}
   3720 }
   3721 
   3722 static uint32_t
   3723 wm_rxpbs_adjust_82580(uint32_t val)
   3724 {
   3725 	uint32_t rv = 0;
   3726 
   3727 	if (val < __arraycount(wm_82580_rxpbs_table))
   3728 		rv = wm_82580_rxpbs_table[val];
   3729 
   3730 	return rv;
   3731 }
   3732 
   3733 /*
   3734  * wm_reset:
   3735  *
   3736  *	Reset the i82542 chip.
   3737  */
   3738 static void
   3739 wm_reset(struct wm_softc *sc)
   3740 {
   3741 	int phy_reset = 0;
   3742 	int i, error = 0;
   3743 	uint32_t reg;
   3744 
   3745 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3746 		device_xname(sc->sc_dev), __func__));
   3747 	KASSERT(sc->sc_type != 0);
   3748 
   3749 	/*
   3750 	 * Allocate on-chip memory according to the MTU size.
   3751 	 * The Packet Buffer Allocation register must be written
   3752 	 * before the chip is reset.
   3753 	 */
   3754 	switch (sc->sc_type) {
   3755 	case WM_T_82547:
   3756 	case WM_T_82547_2:
   3757 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3758 		    PBA_22K : PBA_30K;
   3759 		for (i = 0; i < sc->sc_nqueues; i++) {
   3760 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3761 			txq->txq_fifo_head = 0;
   3762 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3763 			txq->txq_fifo_size =
   3764 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3765 			txq->txq_fifo_stall = 0;
   3766 		}
   3767 		break;
   3768 	case WM_T_82571:
   3769 	case WM_T_82572:
   3770 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3771 	case WM_T_80003:
   3772 		sc->sc_pba = PBA_32K;
   3773 		break;
   3774 	case WM_T_82573:
   3775 		sc->sc_pba = PBA_12K;
   3776 		break;
   3777 	case WM_T_82574:
   3778 	case WM_T_82583:
   3779 		sc->sc_pba = PBA_20K;
   3780 		break;
   3781 	case WM_T_82576:
   3782 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3783 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3784 		break;
   3785 	case WM_T_82580:
   3786 	case WM_T_I350:
   3787 	case WM_T_I354:
   3788 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3789 		break;
   3790 	case WM_T_I210:
   3791 	case WM_T_I211:
   3792 		sc->sc_pba = PBA_34K;
   3793 		break;
   3794 	case WM_T_ICH8:
   3795 		/* Workaround for a bit corruption issue in FIFO memory */
   3796 		sc->sc_pba = PBA_8K;
   3797 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3798 		break;
   3799 	case WM_T_ICH9:
   3800 	case WM_T_ICH10:
   3801 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3802 		    PBA_14K : PBA_10K;
   3803 		break;
   3804 	case WM_T_PCH:
   3805 	case WM_T_PCH2:
   3806 	case WM_T_PCH_LPT:
   3807 	case WM_T_PCH_SPT:
   3808 		sc->sc_pba = PBA_26K;
   3809 		break;
   3810 	default:
   3811 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3812 		    PBA_40K : PBA_48K;
   3813 		break;
   3814 	}
   3815 	/*
   3816 	 * Only old or non-multiqueue devices have the PBA register
   3817 	 * XXX Need special handling for 82575.
   3818 	 */
   3819 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3820 	    || (sc->sc_type == WM_T_82575))
   3821 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3822 
   3823 	/* Prevent the PCI-E bus from sticking */
   3824 	if (sc->sc_flags & WM_F_PCIE) {
   3825 		int timeout = 800;
   3826 
   3827 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3828 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3829 
   3830 		while (timeout--) {
   3831 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3832 			    == 0)
   3833 				break;
   3834 			delay(100);
   3835 		}
   3836 	}
   3837 
   3838 	/* Set the completion timeout for interface */
   3839 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3840 	    || (sc->sc_type == WM_T_82580)
   3841 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3842 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3843 		wm_set_pcie_completion_timeout(sc);
   3844 
   3845 	/* Clear interrupt */
   3846 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3847 	if (sc->sc_nintrs > 1) {
   3848 		if (sc->sc_type != WM_T_82574) {
   3849 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3850 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3851 		} else {
   3852 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3853 		}
   3854 	}
   3855 
   3856 	/* Stop the transmit and receive processes. */
   3857 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3858 	sc->sc_rctl &= ~RCTL_EN;
   3859 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3860 	CSR_WRITE_FLUSH(sc);
   3861 
   3862 	/* XXX set_tbi_sbp_82543() */
   3863 
   3864 	delay(10*1000);
   3865 
   3866 	/* Must acquire the MDIO ownership before MAC reset */
   3867 	switch (sc->sc_type) {
   3868 	case WM_T_82573:
   3869 	case WM_T_82574:
   3870 	case WM_T_82583:
   3871 		error = wm_get_hw_semaphore_82573(sc);
   3872 		break;
   3873 	default:
   3874 		break;
   3875 	}
   3876 
   3877 	/*
   3878 	 * 82541 Errata 29? & 82547 Errata 28?
   3879 	 * See also the description about PHY_RST bit in CTRL register
   3880 	 * in 8254x_GBe_SDM.pdf.
   3881 	 */
   3882 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3883 		CSR_WRITE(sc, WMREG_CTRL,
   3884 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3885 		CSR_WRITE_FLUSH(sc);
   3886 		delay(5000);
   3887 	}
   3888 
   3889 	switch (sc->sc_type) {
   3890 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3891 	case WM_T_82541:
   3892 	case WM_T_82541_2:
   3893 	case WM_T_82547:
   3894 	case WM_T_82547_2:
   3895 		/*
   3896 		 * On some chipsets, a reset through a memory-mapped write
   3897 		 * cycle can cause the chip to reset before completing the
   3898 		 * write cycle.  This causes major headache that can be
   3899 		 * avoided by issuing the reset via indirect register writes
   3900 		 * through I/O space.
   3901 		 *
   3902 		 * So, if we successfully mapped the I/O BAR at attach time,
   3903 		 * use that.  Otherwise, try our luck with a memory-mapped
   3904 		 * reset.
   3905 		 */
   3906 		if (sc->sc_flags & WM_F_IOH_VALID)
   3907 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3908 		else
   3909 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3910 		break;
   3911 	case WM_T_82545_3:
   3912 	case WM_T_82546_3:
   3913 		/* Use the shadow control register on these chips. */
   3914 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3915 		break;
   3916 	case WM_T_80003:
   3917 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3918 		sc->phy.acquire(sc);
   3919 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3920 		sc->phy.release(sc);
   3921 		break;
   3922 	case WM_T_ICH8:
   3923 	case WM_T_ICH9:
   3924 	case WM_T_ICH10:
   3925 	case WM_T_PCH:
   3926 	case WM_T_PCH2:
   3927 	case WM_T_PCH_LPT:
   3928 	case WM_T_PCH_SPT:
   3929 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3930 		if (wm_phy_resetisblocked(sc) == false) {
   3931 			/*
   3932 			 * Gate automatic PHY configuration by hardware on
   3933 			 * non-managed 82579
   3934 			 */
   3935 			if ((sc->sc_type == WM_T_PCH2)
   3936 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3937 				== 0))
   3938 				wm_gate_hw_phy_config_ich8lan(sc, true);
   3939 
   3940 			reg |= CTRL_PHY_RESET;
   3941 			phy_reset = 1;
   3942 		} else
   3943 			printf("XXX reset is blocked!!!\n");
   3944 		sc->phy.acquire(sc);
   3945 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3946 		/* Don't insert a completion barrier when reset */
   3947 		delay(20*1000);
   3948 		mutex_exit(sc->sc_ich_phymtx);
   3949 		break;
   3950 	case WM_T_82580:
   3951 	case WM_T_I350:
   3952 	case WM_T_I354:
   3953 	case WM_T_I210:
   3954 	case WM_T_I211:
   3955 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3956 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   3957 			CSR_WRITE_FLUSH(sc);
   3958 		delay(5000);
   3959 		break;
   3960 	case WM_T_82542_2_0:
   3961 	case WM_T_82542_2_1:
   3962 	case WM_T_82543:
   3963 	case WM_T_82540:
   3964 	case WM_T_82545:
   3965 	case WM_T_82546:
   3966 	case WM_T_82571:
   3967 	case WM_T_82572:
   3968 	case WM_T_82573:
   3969 	case WM_T_82574:
   3970 	case WM_T_82575:
   3971 	case WM_T_82576:
   3972 	case WM_T_82583:
   3973 	default:
   3974 		/* Everything else can safely use the documented method. */
   3975 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3976 		break;
   3977 	}
   3978 
   3979 	/* Must release the MDIO ownership after MAC reset */
   3980 	switch (sc->sc_type) {
   3981 	case WM_T_82573:
   3982 	case WM_T_82574:
   3983 	case WM_T_82583:
   3984 		if (error == 0)
   3985 			wm_put_hw_semaphore_82573(sc);
   3986 		break;
   3987 	default:
   3988 		break;
   3989 	}
   3990 
   3991 	if (phy_reset != 0) {
   3992 		wm_get_cfg_done(sc);
   3993 		delay(10 * 1000);
   3994 		if (sc->sc_type >= WM_T_PCH) {
   3995 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3996 			    BM_PORT_GEN_CFG);
   3997 			reg &= ~BM_WUC_HOST_WU_BIT;
   3998 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   3999 			    BM_PORT_GEN_CFG, reg);
   4000 		}
   4001 	}
   4002 
   4003 	/* reload EEPROM */
   4004 	switch (sc->sc_type) {
   4005 	case WM_T_82542_2_0:
   4006 	case WM_T_82542_2_1:
   4007 	case WM_T_82543:
   4008 	case WM_T_82544:
   4009 		delay(10);
   4010 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4011 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4012 		CSR_WRITE_FLUSH(sc);
   4013 		delay(2000);
   4014 		break;
   4015 	case WM_T_82540:
   4016 	case WM_T_82545:
   4017 	case WM_T_82545_3:
   4018 	case WM_T_82546:
   4019 	case WM_T_82546_3:
   4020 		delay(5*1000);
   4021 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4022 		break;
   4023 	case WM_T_82541:
   4024 	case WM_T_82541_2:
   4025 	case WM_T_82547:
   4026 	case WM_T_82547_2:
   4027 		delay(20000);
   4028 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4029 		break;
   4030 	case WM_T_82571:
   4031 	case WM_T_82572:
   4032 	case WM_T_82573:
   4033 	case WM_T_82574:
   4034 	case WM_T_82583:
   4035 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4036 			delay(10);
   4037 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4038 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4039 			CSR_WRITE_FLUSH(sc);
   4040 		}
   4041 		/* check EECD_EE_AUTORD */
   4042 		wm_get_auto_rd_done(sc);
   4043 		/*
   4044 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4045 		 * is set.
   4046 		 */
   4047 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4048 		    || (sc->sc_type == WM_T_82583))
   4049 			delay(25*1000);
   4050 		break;
   4051 	case WM_T_82575:
   4052 	case WM_T_82576:
   4053 	case WM_T_82580:
   4054 	case WM_T_I350:
   4055 	case WM_T_I354:
   4056 	case WM_T_I210:
   4057 	case WM_T_I211:
   4058 	case WM_T_80003:
   4059 		/* check EECD_EE_AUTORD */
   4060 		wm_get_auto_rd_done(sc);
   4061 		break;
   4062 	case WM_T_ICH8:
   4063 	case WM_T_ICH9:
   4064 	case WM_T_ICH10:
   4065 	case WM_T_PCH:
   4066 	case WM_T_PCH2:
   4067 	case WM_T_PCH_LPT:
   4068 	case WM_T_PCH_SPT:
   4069 		break;
   4070 	default:
   4071 		panic("%s: unknown type\n", __func__);
   4072 	}
   4073 
   4074 	/* Check whether EEPROM is present or not */
   4075 	switch (sc->sc_type) {
   4076 	case WM_T_82575:
   4077 	case WM_T_82576:
   4078 	case WM_T_82580:
   4079 	case WM_T_I350:
   4080 	case WM_T_I354:
   4081 	case WM_T_ICH8:
   4082 	case WM_T_ICH9:
   4083 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4084 			/* Not found */
   4085 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4086 			if (sc->sc_type == WM_T_82575)
   4087 				wm_reset_init_script_82575(sc);
   4088 		}
   4089 		break;
   4090 	default:
   4091 		break;
   4092 	}
   4093 
   4094 	if ((sc->sc_type == WM_T_82580)
   4095 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4096 		/* clear global device reset status bit */
   4097 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4098 	}
   4099 
   4100 	/* Clear any pending interrupt events. */
   4101 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4102 	reg = CSR_READ(sc, WMREG_ICR);
   4103 	if (sc->sc_nintrs > 1) {
   4104 		if (sc->sc_type != WM_T_82574) {
   4105 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4106 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4107 		} else
   4108 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4109 	}
   4110 
   4111 	/* reload sc_ctrl */
   4112 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4113 
   4114 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4115 		wm_set_eee_i350(sc);
   4116 
   4117 	/* dummy read from WUC */
   4118 	if (sc->sc_type == WM_T_PCH)
   4119 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   4120 	/*
   4121 	 * For PCH, this write will make sure that any noise will be detected
   4122 	 * as a CRC error and be dropped rather than show up as a bad packet
   4123 	 * to the DMA engine
   4124 	 */
   4125 	if (sc->sc_type == WM_T_PCH)
   4126 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4127 
   4128 	if (sc->sc_type >= WM_T_82544)
   4129 		CSR_WRITE(sc, WMREG_WUC, 0);
   4130 
   4131 	wm_reset_mdicnfg_82580(sc);
   4132 
   4133 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4134 		wm_pll_workaround_i210(sc);
   4135 }
   4136 
   4137 /*
   4138  * wm_add_rxbuf:
   4139  *
   4140  *	Add a receive buffer to the indiciated descriptor.
   4141  */
   4142 static int
   4143 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4144 {
   4145 	struct wm_softc *sc = rxq->rxq_sc;
   4146 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4147 	struct mbuf *m;
   4148 	int error;
   4149 
   4150 	KASSERT(mutex_owned(rxq->rxq_lock));
   4151 
   4152 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4153 	if (m == NULL)
   4154 		return ENOBUFS;
   4155 
   4156 	MCLGET(m, M_DONTWAIT);
   4157 	if ((m->m_flags & M_EXT) == 0) {
   4158 		m_freem(m);
   4159 		return ENOBUFS;
   4160 	}
   4161 
   4162 	if (rxs->rxs_mbuf != NULL)
   4163 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4164 
   4165 	rxs->rxs_mbuf = m;
   4166 
   4167 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4168 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4169 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4170 	if (error) {
   4171 		/* XXX XXX XXX */
   4172 		aprint_error_dev(sc->sc_dev,
   4173 		    "unable to load rx DMA map %d, error = %d\n",
   4174 		    idx, error);
   4175 		panic("wm_add_rxbuf");
   4176 	}
   4177 
   4178 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4179 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4180 
   4181 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4182 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4183 			wm_init_rxdesc(rxq, idx);
   4184 	} else
   4185 		wm_init_rxdesc(rxq, idx);
   4186 
   4187 	return 0;
   4188 }
   4189 
   4190 /*
   4191  * wm_rxdrain:
   4192  *
   4193  *	Drain the receive queue.
   4194  */
   4195 static void
   4196 wm_rxdrain(struct wm_rxqueue *rxq)
   4197 {
   4198 	struct wm_softc *sc = rxq->rxq_sc;
   4199 	struct wm_rxsoft *rxs;
   4200 	int i;
   4201 
   4202 	KASSERT(mutex_owned(rxq->rxq_lock));
   4203 
   4204 	for (i = 0; i < WM_NRXDESC; i++) {
   4205 		rxs = &rxq->rxq_soft[i];
   4206 		if (rxs->rxs_mbuf != NULL) {
   4207 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4208 			m_freem(rxs->rxs_mbuf);
   4209 			rxs->rxs_mbuf = NULL;
   4210 		}
   4211 	}
   4212 }
   4213 
   4214 
   4215 /*
   4216  * XXX copy from FreeBSD's sys/net/rss_config.c
   4217  */
   4218 /*
   4219  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4220  * effectiveness may be limited by algorithm choice and available entropy
   4221  * during the boot.
   4222  *
   4223  * XXXRW: And that we don't randomize it yet!
   4224  *
   4225  * This is the default Microsoft RSS specification key which is also
   4226  * the Chelsio T5 firmware default key.
   4227  */
   4228 #define RSS_KEYSIZE 40
   4229 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4230 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4231 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4232 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4233 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4234 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4235 };
   4236 
   4237 /*
   4238  * Caller must pass an array of size sizeof(rss_key).
   4239  *
   4240  * XXX
   4241  * As if_ixgbe may use this function, this function should not be
   4242  * if_wm specific function.
   4243  */
   4244 static void
   4245 wm_rss_getkey(uint8_t *key)
   4246 {
   4247 
   4248 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4249 }
   4250 
   4251 /*
   4252  * Setup registers for RSS.
   4253  *
   4254  * XXX not yet VMDq support
   4255  */
   4256 static void
   4257 wm_init_rss(struct wm_softc *sc)
   4258 {
   4259 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4260 	int i;
   4261 
   4262 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4263 
   4264 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4265 		int qid, reta_ent;
   4266 
   4267 		qid  = i % sc->sc_nqueues;
   4268 		switch(sc->sc_type) {
   4269 		case WM_T_82574:
   4270 			reta_ent = __SHIFTIN(qid,
   4271 			    RETA_ENT_QINDEX_MASK_82574);
   4272 			break;
   4273 		case WM_T_82575:
   4274 			reta_ent = __SHIFTIN(qid,
   4275 			    RETA_ENT_QINDEX1_MASK_82575);
   4276 			break;
   4277 		default:
   4278 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4279 			break;
   4280 		}
   4281 
   4282 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4283 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4284 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4285 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4286 	}
   4287 
   4288 	wm_rss_getkey((uint8_t *)rss_key);
   4289 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4290 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4291 
   4292 	if (sc->sc_type == WM_T_82574)
   4293 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4294 	else
   4295 		mrqc = MRQC_ENABLE_RSS_MQ;
   4296 
   4297 	/* XXXX
   4298 	 * The same as FreeBSD igb.
   4299 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4300 	 */
   4301 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4302 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4303 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4304 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4305 
   4306 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4307 }
   4308 
   4309 /*
   4310  * Adjust TX and RX queue numbers which the system actulally uses.
   4311  *
   4312  * The numbers are affected by below parameters.
   4313  *     - The nubmer of hardware queues
   4314  *     - The number of MSI-X vectors (= "nvectors" argument)
   4315  *     - ncpu
   4316  */
   4317 static void
   4318 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4319 {
   4320 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4321 
   4322 	if (nvectors < 2) {
   4323 		sc->sc_nqueues = 1;
   4324 		return;
   4325 	}
   4326 
   4327 	switch(sc->sc_type) {
   4328 	case WM_T_82572:
   4329 		hw_ntxqueues = 2;
   4330 		hw_nrxqueues = 2;
   4331 		break;
   4332 	case WM_T_82574:
   4333 		hw_ntxqueues = 2;
   4334 		hw_nrxqueues = 2;
   4335 		break;
   4336 	case WM_T_82575:
   4337 		hw_ntxqueues = 4;
   4338 		hw_nrxqueues = 4;
   4339 		break;
   4340 	case WM_T_82576:
   4341 		hw_ntxqueues = 16;
   4342 		hw_nrxqueues = 16;
   4343 		break;
   4344 	case WM_T_82580:
   4345 	case WM_T_I350:
   4346 	case WM_T_I354:
   4347 		hw_ntxqueues = 8;
   4348 		hw_nrxqueues = 8;
   4349 		break;
   4350 	case WM_T_I210:
   4351 		hw_ntxqueues = 4;
   4352 		hw_nrxqueues = 4;
   4353 		break;
   4354 	case WM_T_I211:
   4355 		hw_ntxqueues = 2;
   4356 		hw_nrxqueues = 2;
   4357 		break;
   4358 		/*
   4359 		 * As below ethernet controllers does not support MSI-X,
   4360 		 * this driver let them not use multiqueue.
   4361 		 *     - WM_T_80003
   4362 		 *     - WM_T_ICH8
   4363 		 *     - WM_T_ICH9
   4364 		 *     - WM_T_ICH10
   4365 		 *     - WM_T_PCH
   4366 		 *     - WM_T_PCH2
   4367 		 *     - WM_T_PCH_LPT
   4368 		 */
   4369 	default:
   4370 		hw_ntxqueues = 1;
   4371 		hw_nrxqueues = 1;
   4372 		break;
   4373 	}
   4374 
   4375 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4376 
   4377 	/*
   4378 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4379 	 * the number of queues used actually.
   4380 	 */
   4381 	if (nvectors < hw_nqueues + 1) {
   4382 		sc->sc_nqueues = nvectors - 1;
   4383 	} else {
   4384 		sc->sc_nqueues = hw_nqueues;
   4385 	}
   4386 
   4387 	/*
   4388 	 * As queues more then cpus cannot improve scaling, we limit
   4389 	 * the number of queues used actually.
   4390 	 */
   4391 	if (ncpu < sc->sc_nqueues)
   4392 		sc->sc_nqueues = ncpu;
   4393 }
   4394 
   4395 /*
   4396  * Both single interrupt MSI and INTx can use this function.
   4397  */
   4398 static int
   4399 wm_setup_legacy(struct wm_softc *sc)
   4400 {
   4401 	pci_chipset_tag_t pc = sc->sc_pc;
   4402 	const char *intrstr = NULL;
   4403 	char intrbuf[PCI_INTRSTR_LEN];
   4404 	int error;
   4405 
   4406 	error = wm_alloc_txrx_queues(sc);
   4407 	if (error) {
   4408 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4409 		    error);
   4410 		return ENOMEM;
   4411 	}
   4412 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4413 	    sizeof(intrbuf));
   4414 #ifdef WM_MPSAFE
   4415 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4416 #endif
   4417 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4418 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4419 	if (sc->sc_ihs[0] == NULL) {
   4420 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4421 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4422 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4423 		return ENOMEM;
   4424 	}
   4425 
   4426 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4427 	sc->sc_nintrs = 1;
   4428 	return 0;
   4429 }
   4430 
   4431 static int
   4432 wm_setup_msix(struct wm_softc *sc)
   4433 {
   4434 	void *vih;
   4435 	kcpuset_t *affinity;
   4436 	int qidx, error, intr_idx, txrx_established;
   4437 	pci_chipset_tag_t pc = sc->sc_pc;
   4438 	const char *intrstr = NULL;
   4439 	char intrbuf[PCI_INTRSTR_LEN];
   4440 	char intr_xname[INTRDEVNAMEBUF];
   4441 
   4442 	if (sc->sc_nqueues < ncpu) {
   4443 		/*
   4444 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4445 		 * interrupts start from CPU#1.
   4446 		 */
   4447 		sc->sc_affinity_offset = 1;
   4448 	} else {
   4449 		/*
   4450 		 * In this case, this device use all CPUs. So, we unify
   4451 		 * affinitied cpu_index to msix vector number for readability.
   4452 		 */
   4453 		sc->sc_affinity_offset = 0;
   4454 	}
   4455 
   4456 	error = wm_alloc_txrx_queues(sc);
   4457 	if (error) {
   4458 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4459 		    error);
   4460 		return ENOMEM;
   4461 	}
   4462 
   4463 	kcpuset_create(&affinity, false);
   4464 	intr_idx = 0;
   4465 
   4466 	/*
   4467 	 * TX and RX
   4468 	 */
   4469 	txrx_established = 0;
   4470 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4471 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4472 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4473 
   4474 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4475 		    sizeof(intrbuf));
   4476 #ifdef WM_MPSAFE
   4477 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4478 		    PCI_INTR_MPSAFE, true);
   4479 #endif
   4480 		memset(intr_xname, 0, sizeof(intr_xname));
   4481 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4482 		    device_xname(sc->sc_dev), qidx);
   4483 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4484 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4485 		if (vih == NULL) {
   4486 			aprint_error_dev(sc->sc_dev,
   4487 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4488 			    intrstr ? " at " : "",
   4489 			    intrstr ? intrstr : "");
   4490 
   4491 			goto fail;
   4492 		}
   4493 		kcpuset_zero(affinity);
   4494 		/* Round-robin affinity */
   4495 		kcpuset_set(affinity, affinity_to);
   4496 		error = interrupt_distribute(vih, affinity, NULL);
   4497 		if (error == 0) {
   4498 			aprint_normal_dev(sc->sc_dev,
   4499 			    "for TX and RX interrupting at %s affinity to %u\n",
   4500 			    intrstr, affinity_to);
   4501 		} else {
   4502 			aprint_normal_dev(sc->sc_dev,
   4503 			    "for TX and RX interrupting at %s\n", intrstr);
   4504 		}
   4505 		sc->sc_ihs[intr_idx] = vih;
   4506 		wmq->wmq_id= qidx;
   4507 		wmq->wmq_intr_idx = intr_idx;
   4508 
   4509 		txrx_established++;
   4510 		intr_idx++;
   4511 	}
   4512 
   4513 	/*
   4514 	 * LINK
   4515 	 */
   4516 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4517 	    sizeof(intrbuf));
   4518 #ifdef WM_MPSAFE
   4519 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4520 #endif
   4521 	memset(intr_xname, 0, sizeof(intr_xname));
   4522 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4523 	    device_xname(sc->sc_dev));
   4524 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4525 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4526 	if (vih == NULL) {
   4527 		aprint_error_dev(sc->sc_dev,
   4528 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4529 		    intrstr ? " at " : "",
   4530 		    intrstr ? intrstr : "");
   4531 
   4532 		goto fail;
   4533 	}
   4534 	/* keep default affinity to LINK interrupt */
   4535 	aprint_normal_dev(sc->sc_dev,
   4536 	    "for LINK interrupting at %s\n", intrstr);
   4537 	sc->sc_ihs[intr_idx] = vih;
   4538 	sc->sc_link_intr_idx = intr_idx;
   4539 
   4540 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4541 	kcpuset_destroy(affinity);
   4542 	return 0;
   4543 
   4544  fail:
   4545 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4546 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4547 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4548 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4549 	}
   4550 
   4551 	kcpuset_destroy(affinity);
   4552 	return ENOMEM;
   4553 }
   4554 
   4555 /*
   4556  * wm_init:		[ifnet interface function]
   4557  *
   4558  *	Initialize the interface.
   4559  */
   4560 static int
   4561 wm_init(struct ifnet *ifp)
   4562 {
   4563 	struct wm_softc *sc = ifp->if_softc;
   4564 	int ret;
   4565 
   4566 	WM_CORE_LOCK(sc);
   4567 	ret = wm_init_locked(ifp);
   4568 	WM_CORE_UNLOCK(sc);
   4569 
   4570 	return ret;
   4571 }
   4572 
   4573 static int
   4574 wm_init_locked(struct ifnet *ifp)
   4575 {
   4576 	struct wm_softc *sc = ifp->if_softc;
   4577 	int i, j, trynum, error = 0;
   4578 	uint32_t reg;
   4579 
   4580 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4581 		device_xname(sc->sc_dev), __func__));
   4582 	KASSERT(WM_CORE_LOCKED(sc));
   4583 
   4584 	/*
   4585 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4586 	 * There is a small but measurable benefit to avoiding the adjusment
   4587 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4588 	 * on such platforms.  One possibility is that the DMA itself is
   4589 	 * slightly more efficient if the front of the entire packet (instead
   4590 	 * of the front of the headers) is aligned.
   4591 	 *
   4592 	 * Note we must always set align_tweak to 0 if we are using
   4593 	 * jumbo frames.
   4594 	 */
   4595 #ifdef __NO_STRICT_ALIGNMENT
   4596 	sc->sc_align_tweak = 0;
   4597 #else
   4598 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4599 		sc->sc_align_tweak = 0;
   4600 	else
   4601 		sc->sc_align_tweak = 2;
   4602 #endif /* __NO_STRICT_ALIGNMENT */
   4603 
   4604 	/* Cancel any pending I/O. */
   4605 	wm_stop_locked(ifp, 0);
   4606 
   4607 	/* update statistics before reset */
   4608 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4609 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4610 
   4611 	/* Reset the chip to a known state. */
   4612 	wm_reset(sc);
   4613 
   4614 	switch (sc->sc_type) {
   4615 	case WM_T_82571:
   4616 	case WM_T_82572:
   4617 	case WM_T_82573:
   4618 	case WM_T_82574:
   4619 	case WM_T_82583:
   4620 	case WM_T_80003:
   4621 	case WM_T_ICH8:
   4622 	case WM_T_ICH9:
   4623 	case WM_T_ICH10:
   4624 	case WM_T_PCH:
   4625 	case WM_T_PCH2:
   4626 	case WM_T_PCH_LPT:
   4627 	case WM_T_PCH_SPT:
   4628 		/* AMT based hardware can now take control from firmware */
   4629 		if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4630 			wm_get_hw_control(sc);
   4631 		break;
   4632 	default:
   4633 		break;
   4634 	}
   4635 
   4636 	/* Init hardware bits */
   4637 	wm_initialize_hardware_bits(sc);
   4638 
   4639 	/* Reset the PHY. */
   4640 	if (sc->sc_flags & WM_F_HAS_MII)
   4641 		wm_gmii_reset(sc);
   4642 
   4643 	/* Calculate (E)ITR value */
   4644 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4645 		sc->sc_itr = 450;	/* For EITR */
   4646 	} else if (sc->sc_type >= WM_T_82543) {
   4647 		/*
   4648 		 * Set up the interrupt throttling register (units of 256ns)
   4649 		 * Note that a footnote in Intel's documentation says this
   4650 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4651 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4652 		 * that that is also true for the 1024ns units of the other
   4653 		 * interrupt-related timer registers -- so, really, we ought
   4654 		 * to divide this value by 4 when the link speed is low.
   4655 		 *
   4656 		 * XXX implement this division at link speed change!
   4657 		 */
   4658 
   4659 		/*
   4660 		 * For N interrupts/sec, set this value to:
   4661 		 * 1000000000 / (N * 256).  Note that we set the
   4662 		 * absolute and packet timer values to this value
   4663 		 * divided by 4 to get "simple timer" behavior.
   4664 		 */
   4665 
   4666 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4667 	}
   4668 
   4669 	error = wm_init_txrx_queues(sc);
   4670 	if (error)
   4671 		goto out;
   4672 
   4673 	/*
   4674 	 * Clear out the VLAN table -- we don't use it (yet).
   4675 	 */
   4676 	CSR_WRITE(sc, WMREG_VET, 0);
   4677 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4678 		trynum = 10; /* Due to hw errata */
   4679 	else
   4680 		trynum = 1;
   4681 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4682 		for (j = 0; j < trynum; j++)
   4683 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4684 
   4685 	/*
   4686 	 * Set up flow-control parameters.
   4687 	 *
   4688 	 * XXX Values could probably stand some tuning.
   4689 	 */
   4690 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4691 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4692 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4693 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4694 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4695 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4696 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4697 	}
   4698 
   4699 	sc->sc_fcrtl = FCRTL_DFLT;
   4700 	if (sc->sc_type < WM_T_82543) {
   4701 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4702 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4703 	} else {
   4704 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4705 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4706 	}
   4707 
   4708 	if (sc->sc_type == WM_T_80003)
   4709 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4710 	else
   4711 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4712 
   4713 	/* Writes the control register. */
   4714 	wm_set_vlan(sc);
   4715 
   4716 	if (sc->sc_flags & WM_F_HAS_MII) {
   4717 		int val;
   4718 
   4719 		switch (sc->sc_type) {
   4720 		case WM_T_80003:
   4721 		case WM_T_ICH8:
   4722 		case WM_T_ICH9:
   4723 		case WM_T_ICH10:
   4724 		case WM_T_PCH:
   4725 		case WM_T_PCH2:
   4726 		case WM_T_PCH_LPT:
   4727 		case WM_T_PCH_SPT:
   4728 			/*
   4729 			 * Set the mac to wait the maximum time between each
   4730 			 * iteration and increase the max iterations when
   4731 			 * polling the phy; this fixes erroneous timeouts at
   4732 			 * 10Mbps.
   4733 			 */
   4734 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4735 			    0xFFFF);
   4736 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4737 			val |= 0x3F;
   4738 			wm_kmrn_writereg(sc,
   4739 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4740 			break;
   4741 		default:
   4742 			break;
   4743 		}
   4744 
   4745 		if (sc->sc_type == WM_T_80003) {
   4746 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4747 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4748 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4749 
   4750 			/* Bypass RX and TX FIFO's */
   4751 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4752 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4753 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4754 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4755 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4756 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4757 		}
   4758 	}
   4759 #if 0
   4760 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4761 #endif
   4762 
   4763 	/* Set up checksum offload parameters. */
   4764 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4765 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4766 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4767 		reg |= RXCSUM_IPOFL;
   4768 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4769 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4770 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4771 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4772 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4773 
   4774 	/* Set up MSI-X */
   4775 	if (sc->sc_nintrs > 1) {
   4776 		uint32_t ivar;
   4777 		struct wm_queue *wmq;
   4778 		int qid, qintr_idx;
   4779 
   4780 		if (sc->sc_type == WM_T_82575) {
   4781 			/* Interrupt control */
   4782 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4783 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4784 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4785 
   4786 			/* TX and RX */
   4787 			for (i = 0; i < sc->sc_nqueues; i++) {
   4788 				wmq = &sc->sc_queue[i];
   4789 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   4790 				    EITR_TX_QUEUE(wmq->wmq_id)
   4791 				    | EITR_RX_QUEUE(wmq->wmq_id));
   4792 			}
   4793 			/* Link status */
   4794 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4795 			    EITR_OTHER);
   4796 		} else if (sc->sc_type == WM_T_82574) {
   4797 			/* Interrupt control */
   4798 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4799 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4800 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4801 
   4802 			ivar = 0;
   4803 			/* TX and RX */
   4804 			for (i = 0; i < sc->sc_nqueues; i++) {
   4805 				wmq = &sc->sc_queue[i];
   4806 				qid = wmq->wmq_id;
   4807 				qintr_idx = wmq->wmq_intr_idx;
   4808 
   4809 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4810 				    IVAR_TX_MASK_Q_82574(qid));
   4811 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4812 				    IVAR_RX_MASK_Q_82574(qid));
   4813 			}
   4814 			/* Link status */
   4815 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4816 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   4817 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4818 		} else {
   4819 			/* Interrupt control */
   4820 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   4821 			    | GPIE_EIAME | GPIE_PBA);
   4822 
   4823 			switch (sc->sc_type) {
   4824 			case WM_T_82580:
   4825 			case WM_T_I350:
   4826 			case WM_T_I354:
   4827 			case WM_T_I210:
   4828 			case WM_T_I211:
   4829 				/* TX and RX */
   4830 				for (i = 0; i < sc->sc_nqueues; i++) {
   4831 					wmq = &sc->sc_queue[i];
   4832 					qid = wmq->wmq_id;
   4833 					qintr_idx = wmq->wmq_intr_idx;
   4834 
   4835 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4836 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4837 					ivar |= __SHIFTIN((qintr_idx
   4838 						| IVAR_VALID),
   4839 					    IVAR_TX_MASK_Q(qid));
   4840 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4841 					ivar |= __SHIFTIN((qintr_idx
   4842 						| IVAR_VALID),
   4843 					    IVAR_RX_MASK_Q(qid));
   4844 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4845 				}
   4846 				break;
   4847 			case WM_T_82576:
   4848 				/* TX and RX */
   4849 				for (i = 0; i < sc->sc_nqueues; i++) {
   4850 					wmq = &sc->sc_queue[i];
   4851 					qid = wmq->wmq_id;
   4852 					qintr_idx = wmq->wmq_intr_idx;
   4853 
   4854 					ivar = CSR_READ(sc,
   4855 					    WMREG_IVAR_Q_82576(qid));
   4856 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   4857 					ivar |= __SHIFTIN((qintr_idx
   4858 						| IVAR_VALID),
   4859 					    IVAR_TX_MASK_Q_82576(qid));
   4860 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   4861 					ivar |= __SHIFTIN((qintr_idx
   4862 						| IVAR_VALID),
   4863 					    IVAR_RX_MASK_Q_82576(qid));
   4864 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   4865 					    ivar);
   4866 				}
   4867 				break;
   4868 			default:
   4869 				break;
   4870 			}
   4871 
   4872 			/* Link status */
   4873 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   4874 			    IVAR_MISC_OTHER);
   4875 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   4876 		}
   4877 
   4878 		if (sc->sc_nqueues > 1) {
   4879 			wm_init_rss(sc);
   4880 
   4881 			/*
   4882 			** NOTE: Receive Full-Packet Checksum Offload
   4883 			** is mutually exclusive with Multiqueue. However
   4884 			** this is not the same as TCP/IP checksums which
   4885 			** still work.
   4886 			*/
   4887 			reg = CSR_READ(sc, WMREG_RXCSUM);
   4888 			reg |= RXCSUM_PCSD;
   4889 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4890 		}
   4891 	}
   4892 
   4893 	/* Set up the interrupt registers. */
   4894 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4895 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4896 	    ICR_RXO | ICR_RXT0;
   4897 	if (sc->sc_nintrs > 1) {
   4898 		uint32_t mask;
   4899 		struct wm_queue *wmq;
   4900 
   4901 		switch (sc->sc_type) {
   4902 		case WM_T_82574:
   4903 			CSR_WRITE(sc, WMREG_EIAC_82574,
   4904 			    WMREG_EIAC_82574_MSIX_MASK);
   4905 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   4906 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4907 			break;
   4908 		default:
   4909 			if (sc->sc_type == WM_T_82575) {
   4910 				mask = 0;
   4911 				for (i = 0; i < sc->sc_nqueues; i++) {
   4912 					wmq = &sc->sc_queue[i];
   4913 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   4914 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   4915 				}
   4916 				mask |= EITR_OTHER;
   4917 			} else {
   4918 				mask = 0;
   4919 				for (i = 0; i < sc->sc_nqueues; i++) {
   4920 					wmq = &sc->sc_queue[i];
   4921 					mask |= 1 << wmq->wmq_intr_idx;
   4922 				}
   4923 				mask |= 1 << sc->sc_link_intr_idx;
   4924 			}
   4925 			CSR_WRITE(sc, WMREG_EIAC, mask);
   4926 			CSR_WRITE(sc, WMREG_EIAM, mask);
   4927 			CSR_WRITE(sc, WMREG_EIMS, mask);
   4928 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   4929 			break;
   4930 		}
   4931 	} else
   4932 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4933 
   4934 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4935 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4936 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4937 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4938 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4939 		reg |= KABGTXD_BGSQLBIAS;
   4940 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4941 	}
   4942 
   4943 	/* Set up the inter-packet gap. */
   4944 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4945 
   4946 	if (sc->sc_type >= WM_T_82543) {
   4947 		/*
   4948 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   4949 		 * the multi queue function with MSI-X.
   4950 		 */
   4951 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4952 			int qidx;
   4953 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4954 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   4955 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
   4956 				    sc->sc_itr);
   4957 			}
   4958 			/*
   4959 			 * Link interrupts occur much less than TX
   4960 			 * interrupts and RX interrupts. So, we don't
   4961 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   4962 			 * FreeBSD's if_igb.
   4963 			 */
   4964 		} else
   4965 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   4966 	}
   4967 
   4968 	/* Set the VLAN ethernetype. */
   4969 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   4970 
   4971 	/*
   4972 	 * Set up the transmit control register; we start out with
   4973 	 * a collision distance suitable for FDX, but update it whe
   4974 	 * we resolve the media type.
   4975 	 */
   4976 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   4977 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   4978 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4979 	if (sc->sc_type >= WM_T_82571)
   4980 		sc->sc_tctl |= TCTL_MULR;
   4981 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4982 
   4983 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4984 		/* Write TDT after TCTL.EN is set. See the document. */
   4985 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   4986 	}
   4987 
   4988 	if (sc->sc_type == WM_T_80003) {
   4989 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   4990 		reg &= ~TCTL_EXT_GCEX_MASK;
   4991 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   4992 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   4993 	}
   4994 
   4995 	/* Set the media. */
   4996 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   4997 		goto out;
   4998 
   4999 	/* Configure for OS presence */
   5000 	wm_init_manageability(sc);
   5001 
   5002 	/*
   5003 	 * Set up the receive control register; we actually program
   5004 	 * the register when we set the receive filter.  Use multicast
   5005 	 * address offset type 0.
   5006 	 *
   5007 	 * Only the i82544 has the ability to strip the incoming
   5008 	 * CRC, so we don't enable that feature.
   5009 	 */
   5010 	sc->sc_mchash_type = 0;
   5011 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5012 	    | RCTL_MO(sc->sc_mchash_type);
   5013 
   5014 	/*
   5015 	 * The I350 has a bug where it always strips the CRC whether
   5016 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5017 	 */
   5018 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5019 	    || (sc->sc_type == WM_T_I210))
   5020 		sc->sc_rctl |= RCTL_SECRC;
   5021 
   5022 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5023 	    && (ifp->if_mtu > ETHERMTU)) {
   5024 		sc->sc_rctl |= RCTL_LPE;
   5025 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5026 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5027 	}
   5028 
   5029 	if (MCLBYTES == 2048) {
   5030 		sc->sc_rctl |= RCTL_2k;
   5031 	} else {
   5032 		if (sc->sc_type >= WM_T_82543) {
   5033 			switch (MCLBYTES) {
   5034 			case 4096:
   5035 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5036 				break;
   5037 			case 8192:
   5038 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5039 				break;
   5040 			case 16384:
   5041 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5042 				break;
   5043 			default:
   5044 				panic("wm_init: MCLBYTES %d unsupported",
   5045 				    MCLBYTES);
   5046 				break;
   5047 			}
   5048 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5049 	}
   5050 
   5051 	/* Set the receive filter. */
   5052 	wm_set_filter(sc);
   5053 
   5054 	/* Enable ECC */
   5055 	switch (sc->sc_type) {
   5056 	case WM_T_82571:
   5057 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5058 		reg |= PBA_ECC_CORR_EN;
   5059 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5060 		break;
   5061 	case WM_T_PCH_LPT:
   5062 	case WM_T_PCH_SPT:
   5063 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5064 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5065 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5066 
   5067 		reg = CSR_READ(sc, WMREG_CTRL);
   5068 		reg |= CTRL_MEHE;
   5069 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5070 		break;
   5071 	default:
   5072 		break;
   5073 	}
   5074 
   5075 	/* On 575 and later set RDT only if RX enabled */
   5076 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5077 		int qidx;
   5078 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5079 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5080 			for (i = 0; i < WM_NRXDESC; i++) {
   5081 				mutex_enter(rxq->rxq_lock);
   5082 				wm_init_rxdesc(rxq, i);
   5083 				mutex_exit(rxq->rxq_lock);
   5084 
   5085 			}
   5086 		}
   5087 	}
   5088 
   5089 	sc->sc_stopping = false;
   5090 
   5091 	/* Start the one second link check clock. */
   5092 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5093 
   5094 	/* ...all done! */
   5095 	ifp->if_flags |= IFF_RUNNING;
   5096 	ifp->if_flags &= ~IFF_OACTIVE;
   5097 
   5098  out:
   5099 	sc->sc_if_flags = ifp->if_flags;
   5100 	if (error)
   5101 		log(LOG_ERR, "%s: interface not running\n",
   5102 		    device_xname(sc->sc_dev));
   5103 	return error;
   5104 }
   5105 
   5106 /*
   5107  * wm_stop:		[ifnet interface function]
   5108  *
   5109  *	Stop transmission on the interface.
   5110  */
   5111 static void
   5112 wm_stop(struct ifnet *ifp, int disable)
   5113 {
   5114 	struct wm_softc *sc = ifp->if_softc;
   5115 
   5116 	WM_CORE_LOCK(sc);
   5117 	wm_stop_locked(ifp, disable);
   5118 	WM_CORE_UNLOCK(sc);
   5119 }
   5120 
   5121 static void
   5122 wm_stop_locked(struct ifnet *ifp, int disable)
   5123 {
   5124 	struct wm_softc *sc = ifp->if_softc;
   5125 	struct wm_txsoft *txs;
   5126 	int i, qidx;
   5127 
   5128 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5129 		device_xname(sc->sc_dev), __func__));
   5130 	KASSERT(WM_CORE_LOCKED(sc));
   5131 
   5132 	sc->sc_stopping = true;
   5133 
   5134 	/* Stop the one second clock. */
   5135 	callout_stop(&sc->sc_tick_ch);
   5136 
   5137 	/* Stop the 82547 Tx FIFO stall check timer. */
   5138 	if (sc->sc_type == WM_T_82547)
   5139 		callout_stop(&sc->sc_txfifo_ch);
   5140 
   5141 	if (sc->sc_flags & WM_F_HAS_MII) {
   5142 		/* Down the MII. */
   5143 		mii_down(&sc->sc_mii);
   5144 	} else {
   5145 #if 0
   5146 		/* Should we clear PHY's status properly? */
   5147 		wm_reset(sc);
   5148 #endif
   5149 	}
   5150 
   5151 	/* Stop the transmit and receive processes. */
   5152 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5153 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5154 	sc->sc_rctl &= ~RCTL_EN;
   5155 
   5156 	/*
   5157 	 * Clear the interrupt mask to ensure the device cannot assert its
   5158 	 * interrupt line.
   5159 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5160 	 * service any currently pending or shared interrupt.
   5161 	 */
   5162 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5163 	sc->sc_icr = 0;
   5164 	if (sc->sc_nintrs > 1) {
   5165 		if (sc->sc_type != WM_T_82574) {
   5166 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5167 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5168 		} else
   5169 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5170 	}
   5171 
   5172 	/* Release any queued transmit buffers. */
   5173 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5174 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5175 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5176 		mutex_enter(txq->txq_lock);
   5177 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5178 			txs = &txq->txq_soft[i];
   5179 			if (txs->txs_mbuf != NULL) {
   5180 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5181 				m_freem(txs->txs_mbuf);
   5182 				txs->txs_mbuf = NULL;
   5183 			}
   5184 		}
   5185 		if (sc->sc_type == WM_T_PCH_SPT) {
   5186 			pcireg_t preg;
   5187 			uint32_t reg;
   5188 			int nexttx;
   5189 
   5190 			/* First, disable MULR fix in FEXTNVM11 */
   5191 			reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5192 			reg |= FEXTNVM11_DIS_MULRFIX;
   5193 			CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5194 
   5195 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5196 			    WM_PCI_DESCRING_STATUS);
   5197 			reg = CSR_READ(sc, WMREG_TDLEN(0));
   5198 			printf("XXX RST: FLUSH = %08x, len = %u\n",
   5199 			    (uint32_t)(preg & DESCRING_STATUS_FLUSH_REQ), reg);
   5200 			if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0)
   5201 			    && (reg != 0)) {
   5202 				/* TX */
   5203 				printf("XXX need TX flush (reg = %08x)\n",
   5204 				    preg);
   5205 				wm_init_tx_descs(sc, txq);
   5206 				wm_init_tx_regs(sc, wmq, txq);
   5207 				nexttx = txq->txq_next;
   5208 				wm_set_dma_addr(
   5209 					&txq->txq_descs[nexttx].wtx_addr,
   5210 					WM_CDTXADDR(txq, nexttx));
   5211 				txq->txq_descs[nexttx].wtx_cmdlen
   5212 				    = htole32(WTX_CMD_IFCS | 512);
   5213 				wm_cdtxsync(txq, nexttx, 1,
   5214 				    BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
   5215 				CSR_WRITE(sc, WMREG_TCTL, TCTL_EN);
   5216 				CSR_WRITE(sc, WMREG_TDT(0), nexttx);
   5217 				CSR_WRITE_FLUSH(sc);
   5218 				delay(250);
   5219 				CSR_WRITE(sc, WMREG_TCTL, 0);
   5220 			}
   5221 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5222 			    WM_PCI_DESCRING_STATUS);
   5223 			if (preg & DESCRING_STATUS_FLUSH_REQ) {
   5224 				/* RX */
   5225 				printf("XXX need RX flush\n");
   5226 			}
   5227 		}
   5228 		mutex_exit(txq->txq_lock);
   5229 	}
   5230 
   5231 	/* Mark the interface as down and cancel the watchdog timer. */
   5232 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5233 	ifp->if_timer = 0;
   5234 
   5235 	if (disable) {
   5236 		for (i = 0; i < sc->sc_nqueues; i++) {
   5237 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5238 			mutex_enter(rxq->rxq_lock);
   5239 			wm_rxdrain(rxq);
   5240 			mutex_exit(rxq->rxq_lock);
   5241 		}
   5242 	}
   5243 
   5244 #if 0 /* notyet */
   5245 	if (sc->sc_type >= WM_T_82544)
   5246 		CSR_WRITE(sc, WMREG_WUC, 0);
   5247 #endif
   5248 }
   5249 
   5250 static void
   5251 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5252 {
   5253 	struct mbuf *m;
   5254 	int i;
   5255 
   5256 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5257 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5258 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5259 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5260 		    m->m_data, m->m_len, m->m_flags);
   5261 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5262 	    i, i == 1 ? "" : "s");
   5263 }
   5264 
   5265 /*
   5266  * wm_82547_txfifo_stall:
   5267  *
   5268  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5269  *	reset the FIFO pointers, and restart packet transmission.
   5270  */
   5271 static void
   5272 wm_82547_txfifo_stall(void *arg)
   5273 {
   5274 	struct wm_softc *sc = arg;
   5275 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5276 
   5277 	mutex_enter(txq->txq_lock);
   5278 
   5279 	if (sc->sc_stopping)
   5280 		goto out;
   5281 
   5282 	if (txq->txq_fifo_stall) {
   5283 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5284 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5285 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5286 			/*
   5287 			 * Packets have drained.  Stop transmitter, reset
   5288 			 * FIFO pointers, restart transmitter, and kick
   5289 			 * the packet queue.
   5290 			 */
   5291 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5292 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5293 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5294 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5295 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5296 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5297 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5298 			CSR_WRITE_FLUSH(sc);
   5299 
   5300 			txq->txq_fifo_head = 0;
   5301 			txq->txq_fifo_stall = 0;
   5302 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5303 		} else {
   5304 			/*
   5305 			 * Still waiting for packets to drain; try again in
   5306 			 * another tick.
   5307 			 */
   5308 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5309 		}
   5310 	}
   5311 
   5312 out:
   5313 	mutex_exit(txq->txq_lock);
   5314 }
   5315 
   5316 /*
   5317  * wm_82547_txfifo_bugchk:
   5318  *
   5319  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5320  *	prevent enqueueing a packet that would wrap around the end
   5321  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5322  *
   5323  *	We do this by checking the amount of space before the end
   5324  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5325  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5326  *	the internal FIFO pointers to the beginning, and restart
   5327  *	transmission on the interface.
   5328  */
   5329 #define	WM_FIFO_HDR		0x10
   5330 #define	WM_82547_PAD_LEN	0x3e0
   5331 static int
   5332 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5333 {
   5334 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5335 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5336 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5337 
   5338 	/* Just return if already stalled. */
   5339 	if (txq->txq_fifo_stall)
   5340 		return 1;
   5341 
   5342 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5343 		/* Stall only occurs in half-duplex mode. */
   5344 		goto send_packet;
   5345 	}
   5346 
   5347 	if (len >= WM_82547_PAD_LEN + space) {
   5348 		txq->txq_fifo_stall = 1;
   5349 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5350 		return 1;
   5351 	}
   5352 
   5353  send_packet:
   5354 	txq->txq_fifo_head += len;
   5355 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5356 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5357 
   5358 	return 0;
   5359 }
   5360 
   5361 static int
   5362 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5363 {
   5364 	int error;
   5365 
   5366 	/*
   5367 	 * Allocate the control data structures, and create and load the
   5368 	 * DMA map for it.
   5369 	 *
   5370 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5371 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5372 	 * both sets within the same 4G segment.
   5373 	 */
   5374 	if (sc->sc_type < WM_T_82544)
   5375 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5376 	else
   5377 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5378 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5379 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5380 	else
   5381 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5382 
   5383 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5384 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5385 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5386 		aprint_error_dev(sc->sc_dev,
   5387 		    "unable to allocate TX control data, error = %d\n",
   5388 		    error);
   5389 		goto fail_0;
   5390 	}
   5391 
   5392 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5393 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5394 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5395 		aprint_error_dev(sc->sc_dev,
   5396 		    "unable to map TX control data, error = %d\n", error);
   5397 		goto fail_1;
   5398 	}
   5399 
   5400 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5401 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5402 		aprint_error_dev(sc->sc_dev,
   5403 		    "unable to create TX control data DMA map, error = %d\n",
   5404 		    error);
   5405 		goto fail_2;
   5406 	}
   5407 
   5408 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5409 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5410 		aprint_error_dev(sc->sc_dev,
   5411 		    "unable to load TX control data DMA map, error = %d\n",
   5412 		    error);
   5413 		goto fail_3;
   5414 	}
   5415 
   5416 	return 0;
   5417 
   5418  fail_3:
   5419 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5420  fail_2:
   5421 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5422 	    WM_TXDESCS_SIZE(txq));
   5423  fail_1:
   5424 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5425  fail_0:
   5426 	return error;
   5427 }
   5428 
   5429 static void
   5430 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5431 {
   5432 
   5433 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5434 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5435 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5436 	    WM_TXDESCS_SIZE(txq));
   5437 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5438 }
   5439 
   5440 static int
   5441 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5442 {
   5443 	int error;
   5444 
   5445 	/*
   5446 	 * Allocate the control data structures, and create and load the
   5447 	 * DMA map for it.
   5448 	 *
   5449 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5450 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5451 	 * both sets within the same 4G segment.
   5452 	 */
   5453 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5454 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
   5455 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5456 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5457 		aprint_error_dev(sc->sc_dev,
   5458 		    "unable to allocate RX control data, error = %d\n",
   5459 		    error);
   5460 		goto fail_0;
   5461 	}
   5462 
   5463 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5464 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5465 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5466 		aprint_error_dev(sc->sc_dev,
   5467 		    "unable to map RX control data, error = %d\n", error);
   5468 		goto fail_1;
   5469 	}
   5470 
   5471 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5472 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5473 		aprint_error_dev(sc->sc_dev,
   5474 		    "unable to create RX control data DMA map, error = %d\n",
   5475 		    error);
   5476 		goto fail_2;
   5477 	}
   5478 
   5479 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5480 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5481 		aprint_error_dev(sc->sc_dev,
   5482 		    "unable to load RX control data DMA map, error = %d\n",
   5483 		    error);
   5484 		goto fail_3;
   5485 	}
   5486 
   5487 	return 0;
   5488 
   5489  fail_3:
   5490 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5491  fail_2:
   5492 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5493 	    rxq->rxq_desc_size);
   5494  fail_1:
   5495 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5496  fail_0:
   5497 	return error;
   5498 }
   5499 
   5500 static void
   5501 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5502 {
   5503 
   5504 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5505 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5506 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5507 	    rxq->rxq_desc_size);
   5508 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5509 }
   5510 
   5511 
   5512 static int
   5513 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5514 {
   5515 	int i, error;
   5516 
   5517 	/* Create the transmit buffer DMA maps. */
   5518 	WM_TXQUEUELEN(txq) =
   5519 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5520 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5521 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5522 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5523 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5524 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5525 			aprint_error_dev(sc->sc_dev,
   5526 			    "unable to create Tx DMA map %d, error = %d\n",
   5527 			    i, error);
   5528 			goto fail;
   5529 		}
   5530 	}
   5531 
   5532 	return 0;
   5533 
   5534  fail:
   5535 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5536 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5537 			bus_dmamap_destroy(sc->sc_dmat,
   5538 			    txq->txq_soft[i].txs_dmamap);
   5539 	}
   5540 	return error;
   5541 }
   5542 
   5543 static void
   5544 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5545 {
   5546 	int i;
   5547 
   5548 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5549 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5550 			bus_dmamap_destroy(sc->sc_dmat,
   5551 			    txq->txq_soft[i].txs_dmamap);
   5552 	}
   5553 }
   5554 
   5555 static int
   5556 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5557 {
   5558 	int i, error;
   5559 
   5560 	/* Create the receive buffer DMA maps. */
   5561 	for (i = 0; i < WM_NRXDESC; i++) {
   5562 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5563 			    MCLBYTES, 0, 0,
   5564 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5565 			aprint_error_dev(sc->sc_dev,
   5566 			    "unable to create Rx DMA map %d error = %d\n",
   5567 			    i, error);
   5568 			goto fail;
   5569 		}
   5570 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5571 	}
   5572 
   5573 	return 0;
   5574 
   5575  fail:
   5576 	for (i = 0; i < WM_NRXDESC; i++) {
   5577 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5578 			bus_dmamap_destroy(sc->sc_dmat,
   5579 			    rxq->rxq_soft[i].rxs_dmamap);
   5580 	}
   5581 	return error;
   5582 }
   5583 
   5584 static void
   5585 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5586 {
   5587 	int i;
   5588 
   5589 	for (i = 0; i < WM_NRXDESC; i++) {
   5590 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5591 			bus_dmamap_destroy(sc->sc_dmat,
   5592 			    rxq->rxq_soft[i].rxs_dmamap);
   5593 	}
   5594 }
   5595 
   5596 /*
   5597  * wm_alloc_quques:
   5598  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5599  */
   5600 static int
   5601 wm_alloc_txrx_queues(struct wm_softc *sc)
   5602 {
   5603 	int i, error, tx_done, rx_done;
   5604 
   5605 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5606 	    KM_SLEEP);
   5607 	if (sc->sc_queue == NULL) {
   5608 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5609 		error = ENOMEM;
   5610 		goto fail_0;
   5611 	}
   5612 
   5613 	/*
   5614 	 * For transmission
   5615 	 */
   5616 	error = 0;
   5617 	tx_done = 0;
   5618 	for (i = 0; i < sc->sc_nqueues; i++) {
   5619 #ifdef WM_EVENT_COUNTERS
   5620 		int j;
   5621 		const char *xname;
   5622 #endif
   5623 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5624 		txq->txq_sc = sc;
   5625 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5626 
   5627 		error = wm_alloc_tx_descs(sc, txq);
   5628 		if (error)
   5629 			break;
   5630 		error = wm_alloc_tx_buffer(sc, txq);
   5631 		if (error) {
   5632 			wm_free_tx_descs(sc, txq);
   5633 			break;
   5634 		}
   5635 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5636 		if (txq->txq_interq == NULL) {
   5637 			wm_free_tx_descs(sc, txq);
   5638 			wm_free_tx_buffer(sc, txq);
   5639 			error = ENOMEM;
   5640 			break;
   5641 		}
   5642 
   5643 #ifdef WM_EVENT_COUNTERS
   5644 		xname = device_xname(sc->sc_dev);
   5645 
   5646 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5647 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5648 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5649 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5650 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5651 
   5652 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5653 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5654 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5655 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5656 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5657 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5658 
   5659 		for (j = 0; j < WM_NTXSEGS; j++) {
   5660 			snprintf(txq->txq_txseg_evcnt_names[j],
   5661 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   5662 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   5663 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   5664 		}
   5665 
   5666 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   5667 
   5668 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   5669 #endif /* WM_EVENT_COUNTERS */
   5670 
   5671 		tx_done++;
   5672 	}
   5673 	if (error)
   5674 		goto fail_1;
   5675 
   5676 	/*
   5677 	 * For recieve
   5678 	 */
   5679 	error = 0;
   5680 	rx_done = 0;
   5681 	for (i = 0; i < sc->sc_nqueues; i++) {
   5682 #ifdef WM_EVENT_COUNTERS
   5683 		const char *xname;
   5684 #endif
   5685 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5686 		rxq->rxq_sc = sc;
   5687 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5688 
   5689 		error = wm_alloc_rx_descs(sc, rxq);
   5690 		if (error)
   5691 			break;
   5692 
   5693 		error = wm_alloc_rx_buffer(sc, rxq);
   5694 		if (error) {
   5695 			wm_free_rx_descs(sc, rxq);
   5696 			break;
   5697 		}
   5698 
   5699 #ifdef WM_EVENT_COUNTERS
   5700 		xname = device_xname(sc->sc_dev);
   5701 
   5702 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   5703 
   5704 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   5705 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   5706 #endif /* WM_EVENT_COUNTERS */
   5707 
   5708 		rx_done++;
   5709 	}
   5710 	if (error)
   5711 		goto fail_2;
   5712 
   5713 	return 0;
   5714 
   5715  fail_2:
   5716 	for (i = 0; i < rx_done; i++) {
   5717 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5718 		wm_free_rx_buffer(sc, rxq);
   5719 		wm_free_rx_descs(sc, rxq);
   5720 		if (rxq->rxq_lock)
   5721 			mutex_obj_free(rxq->rxq_lock);
   5722 	}
   5723  fail_1:
   5724 	for (i = 0; i < tx_done; i++) {
   5725 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5726 		pcq_destroy(txq->txq_interq);
   5727 		wm_free_tx_buffer(sc, txq);
   5728 		wm_free_tx_descs(sc, txq);
   5729 		if (txq->txq_lock)
   5730 			mutex_obj_free(txq->txq_lock);
   5731 	}
   5732 
   5733 	kmem_free(sc->sc_queue,
   5734 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   5735  fail_0:
   5736 	return error;
   5737 }
   5738 
   5739 /*
   5740  * wm_free_quques:
   5741  *	Free {tx,rx}descs and {tx,rx} buffers
   5742  */
   5743 static void
   5744 wm_free_txrx_queues(struct wm_softc *sc)
   5745 {
   5746 	int i;
   5747 
   5748 	for (i = 0; i < sc->sc_nqueues; i++) {
   5749 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5750 		wm_free_rx_buffer(sc, rxq);
   5751 		wm_free_rx_descs(sc, rxq);
   5752 		if (rxq->rxq_lock)
   5753 			mutex_obj_free(rxq->rxq_lock);
   5754 	}
   5755 
   5756 	for (i = 0; i < sc->sc_nqueues; i++) {
   5757 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5758 		wm_free_tx_buffer(sc, txq);
   5759 		wm_free_tx_descs(sc, txq);
   5760 		if (txq->txq_lock)
   5761 			mutex_obj_free(txq->txq_lock);
   5762 	}
   5763 
   5764 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   5765 }
   5766 
   5767 static void
   5768 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5769 {
   5770 
   5771 	KASSERT(mutex_owned(txq->txq_lock));
   5772 
   5773 	/* Initialize the transmit descriptor ring. */
   5774 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5775 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5776 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5777 	txq->txq_free = WM_NTXDESC(txq);
   5778 	txq->txq_next = 0;
   5779 }
   5780 
   5781 static void
   5782 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5783     struct wm_txqueue *txq)
   5784 {
   5785 
   5786 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5787 		device_xname(sc->sc_dev), __func__));
   5788 	KASSERT(mutex_owned(txq->txq_lock));
   5789 
   5790 	if (sc->sc_type < WM_T_82543) {
   5791 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5792 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5793 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5794 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5795 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5796 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5797 	} else {
   5798 		int qid = wmq->wmq_id;
   5799 
   5800 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5801 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5802 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5803 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5804 
   5805 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5806 			/*
   5807 			 * Don't write TDT before TCTL.EN is set.
   5808 			 * See the document.
   5809 			 */
   5810 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5811 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5812 			    | TXDCTL_WTHRESH(0));
   5813 		else {
   5814 			/* ITR / 4 */
   5815 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5816 			if (sc->sc_type >= WM_T_82540) {
   5817 				/* should be same */
   5818 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5819 			}
   5820 
   5821 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5822 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5823 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5824 		}
   5825 	}
   5826 }
   5827 
   5828 static void
   5829 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5830 {
   5831 	int i;
   5832 
   5833 	KASSERT(mutex_owned(txq->txq_lock));
   5834 
   5835 	/* Initialize the transmit job descriptors. */
   5836 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5837 		txq->txq_soft[i].txs_mbuf = NULL;
   5838 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5839 	txq->txq_snext = 0;
   5840 	txq->txq_sdirty = 0;
   5841 }
   5842 
   5843 static void
   5844 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5845     struct wm_txqueue *txq)
   5846 {
   5847 
   5848 	KASSERT(mutex_owned(txq->txq_lock));
   5849 
   5850 	/*
   5851 	 * Set up some register offsets that are different between
   5852 	 * the i82542 and the i82543 and later chips.
   5853 	 */
   5854 	if (sc->sc_type < WM_T_82543)
   5855 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5856 	else
   5857 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   5858 
   5859 	wm_init_tx_descs(sc, txq);
   5860 	wm_init_tx_regs(sc, wmq, txq);
   5861 	wm_init_tx_buffer(sc, txq);
   5862 }
   5863 
   5864 static void
   5865 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5866     struct wm_rxqueue *rxq)
   5867 {
   5868 
   5869 	KASSERT(mutex_owned(rxq->rxq_lock));
   5870 
   5871 	/*
   5872 	 * Initialize the receive descriptor and receive job
   5873 	 * descriptor rings.
   5874 	 */
   5875 	if (sc->sc_type < WM_T_82543) {
   5876 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5877 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5878 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5879 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5880 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5881 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5882 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5883 
   5884 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5885 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5886 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5887 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5888 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5889 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5890 	} else {
   5891 		int qid = wmq->wmq_id;
   5892 
   5893 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5894 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5895 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   5896 
   5897 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5898 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   5899 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   5900 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   5901 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   5902 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   5903 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   5904 			    | RXDCTL_WTHRESH(1));
   5905 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5906 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5907 		} else {
   5908 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5909 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5910 			/* ITR / 4 */
   5911 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   5912 			/* MUST be same */
   5913 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   5914 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   5915 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   5916 		}
   5917 	}
   5918 }
   5919 
   5920 static int
   5921 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5922 {
   5923 	struct wm_rxsoft *rxs;
   5924 	int error, i;
   5925 
   5926 	KASSERT(mutex_owned(rxq->rxq_lock));
   5927 
   5928 	for (i = 0; i < WM_NRXDESC; i++) {
   5929 		rxs = &rxq->rxq_soft[i];
   5930 		if (rxs->rxs_mbuf == NULL) {
   5931 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   5932 				log(LOG_ERR, "%s: unable to allocate or map "
   5933 				    "rx buffer %d, error = %d\n",
   5934 				    device_xname(sc->sc_dev), i, error);
   5935 				/*
   5936 				 * XXX Should attempt to run with fewer receive
   5937 				 * XXX buffers instead of just failing.
   5938 				 */
   5939 				wm_rxdrain(rxq);
   5940 				return ENOMEM;
   5941 			}
   5942 		} else {
   5943 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5944 				wm_init_rxdesc(rxq, i);
   5945 			/*
   5946 			 * For 82575 and newer device, the RX descriptors
   5947 			 * must be initialized after the setting of RCTL.EN in
   5948 			 * wm_set_filter()
   5949 			 */
   5950 		}
   5951 	}
   5952 	rxq->rxq_ptr = 0;
   5953 	rxq->rxq_discard = 0;
   5954 	WM_RXCHAIN_RESET(rxq);
   5955 
   5956 	return 0;
   5957 }
   5958 
   5959 static int
   5960 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5961     struct wm_rxqueue *rxq)
   5962 {
   5963 
   5964 	KASSERT(mutex_owned(rxq->rxq_lock));
   5965 
   5966 	/*
   5967 	 * Set up some register offsets that are different between
   5968 	 * the i82542 and the i82543 and later chips.
   5969 	 */
   5970 	if (sc->sc_type < WM_T_82543)
   5971 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   5972 	else
   5973 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   5974 
   5975 	wm_init_rx_regs(sc, wmq, rxq);
   5976 	return wm_init_rx_buffer(sc, rxq);
   5977 }
   5978 
   5979 /*
   5980  * wm_init_quques:
   5981  *	Initialize {tx,rx}descs and {tx,rx} buffers
   5982  */
   5983 static int
   5984 wm_init_txrx_queues(struct wm_softc *sc)
   5985 {
   5986 	int i, error = 0;
   5987 
   5988 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5989 		device_xname(sc->sc_dev), __func__));
   5990 
   5991 	for (i = 0; i < sc->sc_nqueues; i++) {
   5992 		struct wm_queue *wmq = &sc->sc_queue[i];
   5993 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5994 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5995 
   5996 		mutex_enter(txq->txq_lock);
   5997 		wm_init_tx_queue(sc, wmq, txq);
   5998 		mutex_exit(txq->txq_lock);
   5999 
   6000 		mutex_enter(rxq->rxq_lock);
   6001 		error = wm_init_rx_queue(sc, wmq, rxq);
   6002 		mutex_exit(rxq->rxq_lock);
   6003 		if (error)
   6004 			break;
   6005 	}
   6006 
   6007 	return error;
   6008 }
   6009 
   6010 /*
   6011  * wm_tx_offload:
   6012  *
   6013  *	Set up TCP/IP checksumming parameters for the
   6014  *	specified packet.
   6015  */
   6016 static int
   6017 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   6018     uint8_t *fieldsp)
   6019 {
   6020 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6021 	struct mbuf *m0 = txs->txs_mbuf;
   6022 	struct livengood_tcpip_ctxdesc *t;
   6023 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6024 	uint32_t ipcse;
   6025 	struct ether_header *eh;
   6026 	int offset, iphl;
   6027 	uint8_t fields;
   6028 
   6029 	/*
   6030 	 * XXX It would be nice if the mbuf pkthdr had offset
   6031 	 * fields for the protocol headers.
   6032 	 */
   6033 
   6034 	eh = mtod(m0, struct ether_header *);
   6035 	switch (htons(eh->ether_type)) {
   6036 	case ETHERTYPE_IP:
   6037 	case ETHERTYPE_IPV6:
   6038 		offset = ETHER_HDR_LEN;
   6039 		break;
   6040 
   6041 	case ETHERTYPE_VLAN:
   6042 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6043 		break;
   6044 
   6045 	default:
   6046 		/*
   6047 		 * Don't support this protocol or encapsulation.
   6048 		 */
   6049 		*fieldsp = 0;
   6050 		*cmdp = 0;
   6051 		return 0;
   6052 	}
   6053 
   6054 	if ((m0->m_pkthdr.csum_flags &
   6055 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   6056 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6057 	} else {
   6058 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6059 	}
   6060 	ipcse = offset + iphl - 1;
   6061 
   6062 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6063 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6064 	seg = 0;
   6065 	fields = 0;
   6066 
   6067 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6068 		int hlen = offset + iphl;
   6069 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6070 
   6071 		if (__predict_false(m0->m_len <
   6072 				    (hlen + sizeof(struct tcphdr)))) {
   6073 			/*
   6074 			 * TCP/IP headers are not in the first mbuf; we need
   6075 			 * to do this the slow and painful way.  Let's just
   6076 			 * hope this doesn't happen very often.
   6077 			 */
   6078 			struct tcphdr th;
   6079 
   6080 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6081 
   6082 			m_copydata(m0, hlen, sizeof(th), &th);
   6083 			if (v4) {
   6084 				struct ip ip;
   6085 
   6086 				m_copydata(m0, offset, sizeof(ip), &ip);
   6087 				ip.ip_len = 0;
   6088 				m_copyback(m0,
   6089 				    offset + offsetof(struct ip, ip_len),
   6090 				    sizeof(ip.ip_len), &ip.ip_len);
   6091 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6092 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6093 			} else {
   6094 				struct ip6_hdr ip6;
   6095 
   6096 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6097 				ip6.ip6_plen = 0;
   6098 				m_copyback(m0,
   6099 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6100 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6101 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6102 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6103 			}
   6104 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6105 			    sizeof(th.th_sum), &th.th_sum);
   6106 
   6107 			hlen += th.th_off << 2;
   6108 		} else {
   6109 			/*
   6110 			 * TCP/IP headers are in the first mbuf; we can do
   6111 			 * this the easy way.
   6112 			 */
   6113 			struct tcphdr *th;
   6114 
   6115 			if (v4) {
   6116 				struct ip *ip =
   6117 				    (void *)(mtod(m0, char *) + offset);
   6118 				th = (void *)(mtod(m0, char *) + hlen);
   6119 
   6120 				ip->ip_len = 0;
   6121 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6122 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6123 			} else {
   6124 				struct ip6_hdr *ip6 =
   6125 				    (void *)(mtod(m0, char *) + offset);
   6126 				th = (void *)(mtod(m0, char *) + hlen);
   6127 
   6128 				ip6->ip6_plen = 0;
   6129 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6130 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6131 			}
   6132 			hlen += th->th_off << 2;
   6133 		}
   6134 
   6135 		if (v4) {
   6136 			WM_Q_EVCNT_INCR(txq, txtso);
   6137 			cmdlen |= WTX_TCPIP_CMD_IP;
   6138 		} else {
   6139 			WM_Q_EVCNT_INCR(txq, txtso6);
   6140 			ipcse = 0;
   6141 		}
   6142 		cmd |= WTX_TCPIP_CMD_TSE;
   6143 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6144 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6145 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6146 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6147 	}
   6148 
   6149 	/*
   6150 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6151 	 * offload feature, if we load the context descriptor, we
   6152 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6153 	 */
   6154 
   6155 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6156 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6157 	    WTX_TCPIP_IPCSE(ipcse);
   6158 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6159 		WM_Q_EVCNT_INCR(txq, txipsum);
   6160 		fields |= WTX_IXSM;
   6161 	}
   6162 
   6163 	offset += iphl;
   6164 
   6165 	if (m0->m_pkthdr.csum_flags &
   6166 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6167 		WM_Q_EVCNT_INCR(txq, txtusum);
   6168 		fields |= WTX_TXSM;
   6169 		tucs = WTX_TCPIP_TUCSS(offset) |
   6170 		    WTX_TCPIP_TUCSO(offset +
   6171 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6172 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6173 	} else if ((m0->m_pkthdr.csum_flags &
   6174 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6175 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6176 		fields |= WTX_TXSM;
   6177 		tucs = WTX_TCPIP_TUCSS(offset) |
   6178 		    WTX_TCPIP_TUCSO(offset +
   6179 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6180 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6181 	} else {
   6182 		/* Just initialize it to a valid TCP context. */
   6183 		tucs = WTX_TCPIP_TUCSS(offset) |
   6184 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6185 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6186 	}
   6187 
   6188 	/* Fill in the context descriptor. */
   6189 	t = (struct livengood_tcpip_ctxdesc *)
   6190 	    &txq->txq_descs[txq->txq_next];
   6191 	t->tcpip_ipcs = htole32(ipcs);
   6192 	t->tcpip_tucs = htole32(tucs);
   6193 	t->tcpip_cmdlen = htole32(cmdlen);
   6194 	t->tcpip_seg = htole32(seg);
   6195 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6196 
   6197 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6198 	txs->txs_ndesc++;
   6199 
   6200 	*cmdp = cmd;
   6201 	*fieldsp = fields;
   6202 
   6203 	return 0;
   6204 }
   6205 
   6206 /*
   6207  * wm_start:		[ifnet interface function]
   6208  *
   6209  *	Start packet transmission on the interface.
   6210  */
   6211 static void
   6212 wm_start(struct ifnet *ifp)
   6213 {
   6214 	struct wm_softc *sc = ifp->if_softc;
   6215 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6216 
   6217 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6218 
   6219 	mutex_enter(txq->txq_lock);
   6220 	if (!sc->sc_stopping)
   6221 		wm_start_locked(ifp);
   6222 	mutex_exit(txq->txq_lock);
   6223 }
   6224 
   6225 static void
   6226 wm_start_locked(struct ifnet *ifp)
   6227 {
   6228 	struct wm_softc *sc = ifp->if_softc;
   6229 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6230 	struct mbuf *m0;
   6231 	struct m_tag *mtag;
   6232 	struct wm_txsoft *txs;
   6233 	bus_dmamap_t dmamap;
   6234 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6235 	bus_addr_t curaddr;
   6236 	bus_size_t seglen, curlen;
   6237 	uint32_t cksumcmd;
   6238 	uint8_t cksumfields;
   6239 
   6240 	KASSERT(mutex_owned(txq->txq_lock));
   6241 
   6242 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6243 		return;
   6244 
   6245 	/* Remember the previous number of free descriptors. */
   6246 	ofree = txq->txq_free;
   6247 
   6248 	/*
   6249 	 * Loop through the send queue, setting up transmit descriptors
   6250 	 * until we drain the queue, or use up all available transmit
   6251 	 * descriptors.
   6252 	 */
   6253 	for (;;) {
   6254 		m0 = NULL;
   6255 
   6256 		/* Get a work queue entry. */
   6257 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6258 			wm_txeof(sc, txq);
   6259 			if (txq->txq_sfree == 0) {
   6260 				DPRINTF(WM_DEBUG_TX,
   6261 				    ("%s: TX: no free job descriptors\n",
   6262 					device_xname(sc->sc_dev)));
   6263 				WM_Q_EVCNT_INCR(txq, txsstall);
   6264 				break;
   6265 			}
   6266 		}
   6267 
   6268 		/* Grab a packet off the queue. */
   6269 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6270 		if (m0 == NULL)
   6271 			break;
   6272 
   6273 		DPRINTF(WM_DEBUG_TX,
   6274 		    ("%s: TX: have packet to transmit: %p\n",
   6275 		    device_xname(sc->sc_dev), m0));
   6276 
   6277 		txs = &txq->txq_soft[txq->txq_snext];
   6278 		dmamap = txs->txs_dmamap;
   6279 
   6280 		use_tso = (m0->m_pkthdr.csum_flags &
   6281 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6282 
   6283 		/*
   6284 		 * So says the Linux driver:
   6285 		 * The controller does a simple calculation to make sure
   6286 		 * there is enough room in the FIFO before initiating the
   6287 		 * DMA for each buffer.  The calc is:
   6288 		 *	4 = ceil(buffer len / MSS)
   6289 		 * To make sure we don't overrun the FIFO, adjust the max
   6290 		 * buffer len if the MSS drops.
   6291 		 */
   6292 		dmamap->dm_maxsegsz =
   6293 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6294 		    ? m0->m_pkthdr.segsz << 2
   6295 		    : WTX_MAX_LEN;
   6296 
   6297 		/*
   6298 		 * Load the DMA map.  If this fails, the packet either
   6299 		 * didn't fit in the allotted number of segments, or we
   6300 		 * were short on resources.  For the too-many-segments
   6301 		 * case, we simply report an error and drop the packet,
   6302 		 * since we can't sanely copy a jumbo packet to a single
   6303 		 * buffer.
   6304 		 */
   6305 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6306 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6307 		if (error) {
   6308 			if (error == EFBIG) {
   6309 				WM_Q_EVCNT_INCR(txq, txdrop);
   6310 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6311 				    "DMA segments, dropping...\n",
   6312 				    device_xname(sc->sc_dev));
   6313 				wm_dump_mbuf_chain(sc, m0);
   6314 				m_freem(m0);
   6315 				continue;
   6316 			}
   6317 			/*  Short on resources, just stop for now. */
   6318 			DPRINTF(WM_DEBUG_TX,
   6319 			    ("%s: TX: dmamap load failed: %d\n",
   6320 			    device_xname(sc->sc_dev), error));
   6321 			break;
   6322 		}
   6323 
   6324 		segs_needed = dmamap->dm_nsegs;
   6325 		if (use_tso) {
   6326 			/* For sentinel descriptor; see below. */
   6327 			segs_needed++;
   6328 		}
   6329 
   6330 		/*
   6331 		 * Ensure we have enough descriptors free to describe
   6332 		 * the packet.  Note, we always reserve one descriptor
   6333 		 * at the end of the ring due to the semantics of the
   6334 		 * TDT register, plus one more in the event we need
   6335 		 * to load offload context.
   6336 		 */
   6337 		if (segs_needed > txq->txq_free - 2) {
   6338 			/*
   6339 			 * Not enough free descriptors to transmit this
   6340 			 * packet.  We haven't committed anything yet,
   6341 			 * so just unload the DMA map, put the packet
   6342 			 * pack on the queue, and punt.  Notify the upper
   6343 			 * layer that there are no more slots left.
   6344 			 */
   6345 			DPRINTF(WM_DEBUG_TX,
   6346 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6347 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6348 			    segs_needed, txq->txq_free - 1));
   6349 			ifp->if_flags |= IFF_OACTIVE;
   6350 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6351 			WM_Q_EVCNT_INCR(txq, txdstall);
   6352 			break;
   6353 		}
   6354 
   6355 		/*
   6356 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6357 		 * once we know we can transmit the packet, since we
   6358 		 * do some internal FIFO space accounting here.
   6359 		 */
   6360 		if (sc->sc_type == WM_T_82547 &&
   6361 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6362 			DPRINTF(WM_DEBUG_TX,
   6363 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6364 			    device_xname(sc->sc_dev)));
   6365 			ifp->if_flags |= IFF_OACTIVE;
   6366 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6367 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6368 			break;
   6369 		}
   6370 
   6371 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6372 
   6373 		DPRINTF(WM_DEBUG_TX,
   6374 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6375 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6376 
   6377 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6378 
   6379 		/*
   6380 		 * Store a pointer to the packet so that we can free it
   6381 		 * later.
   6382 		 *
   6383 		 * Initially, we consider the number of descriptors the
   6384 		 * packet uses the number of DMA segments.  This may be
   6385 		 * incremented by 1 if we do checksum offload (a descriptor
   6386 		 * is used to set the checksum context).
   6387 		 */
   6388 		txs->txs_mbuf = m0;
   6389 		txs->txs_firstdesc = txq->txq_next;
   6390 		txs->txs_ndesc = segs_needed;
   6391 
   6392 		/* Set up offload parameters for this packet. */
   6393 		if (m0->m_pkthdr.csum_flags &
   6394 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6395 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6396 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6397 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6398 					  &cksumfields) != 0) {
   6399 				/* Error message already displayed. */
   6400 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6401 				continue;
   6402 			}
   6403 		} else {
   6404 			cksumcmd = 0;
   6405 			cksumfields = 0;
   6406 		}
   6407 
   6408 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6409 
   6410 		/* Sync the DMA map. */
   6411 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6412 		    BUS_DMASYNC_PREWRITE);
   6413 
   6414 		/* Initialize the transmit descriptor. */
   6415 		for (nexttx = txq->txq_next, seg = 0;
   6416 		     seg < dmamap->dm_nsegs; seg++) {
   6417 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6418 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6419 			     seglen != 0;
   6420 			     curaddr += curlen, seglen -= curlen,
   6421 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6422 				curlen = seglen;
   6423 
   6424 				/*
   6425 				 * So says the Linux driver:
   6426 				 * Work around for premature descriptor
   6427 				 * write-backs in TSO mode.  Append a
   6428 				 * 4-byte sentinel descriptor.
   6429 				 */
   6430 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6431 				    curlen > 8)
   6432 					curlen -= 4;
   6433 
   6434 				wm_set_dma_addr(
   6435 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6436 				txq->txq_descs[nexttx].wtx_cmdlen
   6437 				    = htole32(cksumcmd | curlen);
   6438 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6439 				    = 0;
   6440 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6441 				    = cksumfields;
   6442 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6443 				lasttx = nexttx;
   6444 
   6445 				DPRINTF(WM_DEBUG_TX,
   6446 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6447 				     "len %#04zx\n",
   6448 				    device_xname(sc->sc_dev), nexttx,
   6449 				    (uint64_t)curaddr, curlen));
   6450 			}
   6451 		}
   6452 
   6453 		KASSERT(lasttx != -1);
   6454 
   6455 		/*
   6456 		 * Set up the command byte on the last descriptor of
   6457 		 * the packet.  If we're in the interrupt delay window,
   6458 		 * delay the interrupt.
   6459 		 */
   6460 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6461 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6462 
   6463 		/*
   6464 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6465 		 * up the descriptor to encapsulate the packet for us.
   6466 		 *
   6467 		 * This is only valid on the last descriptor of the packet.
   6468 		 */
   6469 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6470 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6471 			    htole32(WTX_CMD_VLE);
   6472 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6473 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6474 		}
   6475 
   6476 		txs->txs_lastdesc = lasttx;
   6477 
   6478 		DPRINTF(WM_DEBUG_TX,
   6479 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6480 		    device_xname(sc->sc_dev),
   6481 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6482 
   6483 		/* Sync the descriptors we're using. */
   6484 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6485 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6486 
   6487 		/* Give the packet to the chip. */
   6488 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6489 
   6490 		DPRINTF(WM_DEBUG_TX,
   6491 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6492 
   6493 		DPRINTF(WM_DEBUG_TX,
   6494 		    ("%s: TX: finished transmitting packet, job %d\n",
   6495 		    device_xname(sc->sc_dev), txq->txq_snext));
   6496 
   6497 		/* Advance the tx pointer. */
   6498 		txq->txq_free -= txs->txs_ndesc;
   6499 		txq->txq_next = nexttx;
   6500 
   6501 		txq->txq_sfree--;
   6502 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6503 
   6504 		/* Pass the packet to any BPF listeners. */
   6505 		bpf_mtap(ifp, m0);
   6506 	}
   6507 
   6508 	if (m0 != NULL) {
   6509 		ifp->if_flags |= IFF_OACTIVE;
   6510 		WM_Q_EVCNT_INCR(txq, txdrop);
   6511 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6512 			__func__));
   6513 		m_freem(m0);
   6514 	}
   6515 
   6516 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6517 		/* No more slots; notify upper layer. */
   6518 		ifp->if_flags |= IFF_OACTIVE;
   6519 	}
   6520 
   6521 	if (txq->txq_free != ofree) {
   6522 		/* Set a watchdog timer in case the chip flakes out. */
   6523 		ifp->if_timer = 5;
   6524 	}
   6525 }
   6526 
   6527 /*
   6528  * wm_nq_tx_offload:
   6529  *
   6530  *	Set up TCP/IP checksumming parameters for the
   6531  *	specified packet, for NEWQUEUE devices
   6532  */
   6533 static int
   6534 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6535     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6536 {
   6537 	struct mbuf *m0 = txs->txs_mbuf;
   6538 	struct m_tag *mtag;
   6539 	uint32_t vl_len, mssidx, cmdc;
   6540 	struct ether_header *eh;
   6541 	int offset, iphl;
   6542 
   6543 	/*
   6544 	 * XXX It would be nice if the mbuf pkthdr had offset
   6545 	 * fields for the protocol headers.
   6546 	 */
   6547 	*cmdlenp = 0;
   6548 	*fieldsp = 0;
   6549 
   6550 	eh = mtod(m0, struct ether_header *);
   6551 	switch (htons(eh->ether_type)) {
   6552 	case ETHERTYPE_IP:
   6553 	case ETHERTYPE_IPV6:
   6554 		offset = ETHER_HDR_LEN;
   6555 		break;
   6556 
   6557 	case ETHERTYPE_VLAN:
   6558 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6559 		break;
   6560 
   6561 	default:
   6562 		/* Don't support this protocol or encapsulation. */
   6563 		*do_csum = false;
   6564 		return 0;
   6565 	}
   6566 	*do_csum = true;
   6567 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6568 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6569 
   6570 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6571 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6572 
   6573 	if ((m0->m_pkthdr.csum_flags &
   6574 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6575 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6576 	} else {
   6577 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6578 	}
   6579 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6580 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6581 
   6582 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6583 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6584 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6585 		*cmdlenp |= NQTX_CMD_VLE;
   6586 	}
   6587 
   6588 	mssidx = 0;
   6589 
   6590 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6591 		int hlen = offset + iphl;
   6592 		int tcp_hlen;
   6593 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6594 
   6595 		if (__predict_false(m0->m_len <
   6596 				    (hlen + sizeof(struct tcphdr)))) {
   6597 			/*
   6598 			 * TCP/IP headers are not in the first mbuf; we need
   6599 			 * to do this the slow and painful way.  Let's just
   6600 			 * hope this doesn't happen very often.
   6601 			 */
   6602 			struct tcphdr th;
   6603 
   6604 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6605 
   6606 			m_copydata(m0, hlen, sizeof(th), &th);
   6607 			if (v4) {
   6608 				struct ip ip;
   6609 
   6610 				m_copydata(m0, offset, sizeof(ip), &ip);
   6611 				ip.ip_len = 0;
   6612 				m_copyback(m0,
   6613 				    offset + offsetof(struct ip, ip_len),
   6614 				    sizeof(ip.ip_len), &ip.ip_len);
   6615 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6616 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6617 			} else {
   6618 				struct ip6_hdr ip6;
   6619 
   6620 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6621 				ip6.ip6_plen = 0;
   6622 				m_copyback(m0,
   6623 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6624 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6625 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6626 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6627 			}
   6628 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6629 			    sizeof(th.th_sum), &th.th_sum);
   6630 
   6631 			tcp_hlen = th.th_off << 2;
   6632 		} else {
   6633 			/*
   6634 			 * TCP/IP headers are in the first mbuf; we can do
   6635 			 * this the easy way.
   6636 			 */
   6637 			struct tcphdr *th;
   6638 
   6639 			if (v4) {
   6640 				struct ip *ip =
   6641 				    (void *)(mtod(m0, char *) + offset);
   6642 				th = (void *)(mtod(m0, char *) + hlen);
   6643 
   6644 				ip->ip_len = 0;
   6645 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6646 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6647 			} else {
   6648 				struct ip6_hdr *ip6 =
   6649 				    (void *)(mtod(m0, char *) + offset);
   6650 				th = (void *)(mtod(m0, char *) + hlen);
   6651 
   6652 				ip6->ip6_plen = 0;
   6653 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6654 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6655 			}
   6656 			tcp_hlen = th->th_off << 2;
   6657 		}
   6658 		hlen += tcp_hlen;
   6659 		*cmdlenp |= NQTX_CMD_TSE;
   6660 
   6661 		if (v4) {
   6662 			WM_Q_EVCNT_INCR(txq, txtso);
   6663 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6664 		} else {
   6665 			WM_Q_EVCNT_INCR(txq, txtso6);
   6666 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6667 		}
   6668 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6669 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6670 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6671 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6672 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6673 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6674 	} else {
   6675 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6676 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6677 	}
   6678 
   6679 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6680 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6681 		cmdc |= NQTXC_CMD_IP4;
   6682 	}
   6683 
   6684 	if (m0->m_pkthdr.csum_flags &
   6685 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6686 		WM_Q_EVCNT_INCR(txq, txtusum);
   6687 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6688 			cmdc |= NQTXC_CMD_TCP;
   6689 		} else {
   6690 			cmdc |= NQTXC_CMD_UDP;
   6691 		}
   6692 		cmdc |= NQTXC_CMD_IP4;
   6693 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6694 	}
   6695 	if (m0->m_pkthdr.csum_flags &
   6696 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6697 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6698 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6699 			cmdc |= NQTXC_CMD_TCP;
   6700 		} else {
   6701 			cmdc |= NQTXC_CMD_UDP;
   6702 		}
   6703 		cmdc |= NQTXC_CMD_IP6;
   6704 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6705 	}
   6706 
   6707 	/* Fill in the context descriptor. */
   6708 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6709 	    htole32(vl_len);
   6710 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6711 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6712 	    htole32(cmdc);
   6713 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6714 	    htole32(mssidx);
   6715 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6716 	DPRINTF(WM_DEBUG_TX,
   6717 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6718 	    txq->txq_next, 0, vl_len));
   6719 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6720 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6721 	txs->txs_ndesc++;
   6722 	return 0;
   6723 }
   6724 
   6725 /*
   6726  * wm_nq_start:		[ifnet interface function]
   6727  *
   6728  *	Start packet transmission on the interface for NEWQUEUE devices
   6729  */
   6730 static void
   6731 wm_nq_start(struct ifnet *ifp)
   6732 {
   6733 	struct wm_softc *sc = ifp->if_softc;
   6734 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6735 
   6736 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6737 
   6738 	mutex_enter(txq->txq_lock);
   6739 	if (!sc->sc_stopping)
   6740 		wm_nq_start_locked(ifp);
   6741 	mutex_exit(txq->txq_lock);
   6742 }
   6743 
   6744 static void
   6745 wm_nq_start_locked(struct ifnet *ifp)
   6746 {
   6747 	struct wm_softc *sc = ifp->if_softc;
   6748 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6749 
   6750 	wm_nq_send_common_locked(ifp, txq, false);
   6751 }
   6752 
   6753 static inline int
   6754 wm_nq_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6755 {
   6756 	struct wm_softc *sc = ifp->if_softc;
   6757 	u_int cpuid = cpu_index(curcpu());
   6758 
   6759 	/*
   6760 	 * Currently, simple distribute strategy.
   6761 	 * TODO:
   6762 	 * destribute by flowid(RSS has value).
   6763 	 */
   6764 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
   6765 }
   6766 
   6767 static int
   6768 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   6769 {
   6770 	int qid;
   6771 	struct wm_softc *sc = ifp->if_softc;
   6772 	struct wm_txqueue *txq;
   6773 
   6774 	qid = wm_nq_select_txqueue(ifp, m);
   6775 	txq = &sc->sc_queue[qid].wmq_txq;
   6776 
   6777 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6778 		m_freem(m);
   6779 		WM_Q_EVCNT_INCR(txq, txdrop);
   6780 		return ENOBUFS;
   6781 	}
   6782 
   6783 	if (mutex_tryenter(txq->txq_lock)) {
   6784 		/* XXXX should be per TX queue */
   6785 		ifp->if_obytes += m->m_pkthdr.len;
   6786 		if (m->m_flags & M_MCAST)
   6787 			ifp->if_omcasts++;
   6788 
   6789 		if (!sc->sc_stopping)
   6790 			wm_nq_transmit_locked(ifp, txq);
   6791 		mutex_exit(txq->txq_lock);
   6792 	}
   6793 
   6794 	return 0;
   6795 }
   6796 
   6797 static void
   6798 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6799 {
   6800 
   6801 	wm_nq_send_common_locked(ifp, txq, true);
   6802 }
   6803 
   6804 static void
   6805 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6806     bool is_transmit)
   6807 {
   6808 	struct wm_softc *sc = ifp->if_softc;
   6809 	struct mbuf *m0;
   6810 	struct m_tag *mtag;
   6811 	struct wm_txsoft *txs;
   6812 	bus_dmamap_t dmamap;
   6813 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6814 	bool do_csum, sent;
   6815 
   6816 	KASSERT(mutex_owned(txq->txq_lock));
   6817 
   6818 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6819 		return;
   6820 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6821 		return;
   6822 
   6823 	sent = false;
   6824 
   6825 	/*
   6826 	 * Loop through the send queue, setting up transmit descriptors
   6827 	 * until we drain the queue, or use up all available transmit
   6828 	 * descriptors.
   6829 	 */
   6830 	for (;;) {
   6831 		m0 = NULL;
   6832 
   6833 		/* Get a work queue entry. */
   6834 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6835 			wm_txeof(sc, txq);
   6836 			if (txq->txq_sfree == 0) {
   6837 				DPRINTF(WM_DEBUG_TX,
   6838 				    ("%s: TX: no free job descriptors\n",
   6839 					device_xname(sc->sc_dev)));
   6840 				WM_Q_EVCNT_INCR(txq, txsstall);
   6841 				break;
   6842 			}
   6843 		}
   6844 
   6845 		/* Grab a packet off the queue. */
   6846 		if (is_transmit)
   6847 			m0 = pcq_get(txq->txq_interq);
   6848 		else
   6849 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6850 		if (m0 == NULL)
   6851 			break;
   6852 
   6853 		DPRINTF(WM_DEBUG_TX,
   6854 		    ("%s: TX: have packet to transmit: %p\n",
   6855 		    device_xname(sc->sc_dev), m0));
   6856 
   6857 		txs = &txq->txq_soft[txq->txq_snext];
   6858 		dmamap = txs->txs_dmamap;
   6859 
   6860 		/*
   6861 		 * Load the DMA map.  If this fails, the packet either
   6862 		 * didn't fit in the allotted number of segments, or we
   6863 		 * were short on resources.  For the too-many-segments
   6864 		 * case, we simply report an error and drop the packet,
   6865 		 * since we can't sanely copy a jumbo packet to a single
   6866 		 * buffer.
   6867 		 */
   6868 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6869 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6870 		if (error) {
   6871 			if (error == EFBIG) {
   6872 				WM_Q_EVCNT_INCR(txq, txdrop);
   6873 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6874 				    "DMA segments, dropping...\n",
   6875 				    device_xname(sc->sc_dev));
   6876 				wm_dump_mbuf_chain(sc, m0);
   6877 				m_freem(m0);
   6878 				continue;
   6879 			}
   6880 			/* Short on resources, just stop for now. */
   6881 			DPRINTF(WM_DEBUG_TX,
   6882 			    ("%s: TX: dmamap load failed: %d\n",
   6883 			    device_xname(sc->sc_dev), error));
   6884 			break;
   6885 		}
   6886 
   6887 		segs_needed = dmamap->dm_nsegs;
   6888 
   6889 		/*
   6890 		 * Ensure we have enough descriptors free to describe
   6891 		 * the packet.  Note, we always reserve one descriptor
   6892 		 * at the end of the ring due to the semantics of the
   6893 		 * TDT register, plus one more in the event we need
   6894 		 * to load offload context.
   6895 		 */
   6896 		if (segs_needed > txq->txq_free - 2) {
   6897 			/*
   6898 			 * Not enough free descriptors to transmit this
   6899 			 * packet.  We haven't committed anything yet,
   6900 			 * so just unload the DMA map, put the packet
   6901 			 * pack on the queue, and punt.  Notify the upper
   6902 			 * layer that there are no more slots left.
   6903 			 */
   6904 			DPRINTF(WM_DEBUG_TX,
   6905 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6906 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6907 			    segs_needed, txq->txq_free - 1));
   6908 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6909 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6910 			WM_Q_EVCNT_INCR(txq, txdstall);
   6911 			break;
   6912 		}
   6913 
   6914 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6915 
   6916 		DPRINTF(WM_DEBUG_TX,
   6917 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6918 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6919 
   6920 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6921 
   6922 		/*
   6923 		 * Store a pointer to the packet so that we can free it
   6924 		 * later.
   6925 		 *
   6926 		 * Initially, we consider the number of descriptors the
   6927 		 * packet uses the number of DMA segments.  This may be
   6928 		 * incremented by 1 if we do checksum offload (a descriptor
   6929 		 * is used to set the checksum context).
   6930 		 */
   6931 		txs->txs_mbuf = m0;
   6932 		txs->txs_firstdesc = txq->txq_next;
   6933 		txs->txs_ndesc = segs_needed;
   6934 
   6935 		/* Set up offload parameters for this packet. */
   6936 		uint32_t cmdlen, fields, dcmdlen;
   6937 		if (m0->m_pkthdr.csum_flags &
   6938 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6939 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6940 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6941 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   6942 			    &do_csum) != 0) {
   6943 				/* Error message already displayed. */
   6944 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6945 				continue;
   6946 			}
   6947 		} else {
   6948 			do_csum = false;
   6949 			cmdlen = 0;
   6950 			fields = 0;
   6951 		}
   6952 
   6953 		/* Sync the DMA map. */
   6954 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6955 		    BUS_DMASYNC_PREWRITE);
   6956 
   6957 		/* Initialize the first transmit descriptor. */
   6958 		nexttx = txq->txq_next;
   6959 		if (!do_csum) {
   6960 			/* setup a legacy descriptor */
   6961 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   6962 			    dmamap->dm_segs[0].ds_addr);
   6963 			txq->txq_descs[nexttx].wtx_cmdlen =
   6964 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   6965 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   6966 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   6967 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   6968 			    NULL) {
   6969 				txq->txq_descs[nexttx].wtx_cmdlen |=
   6970 				    htole32(WTX_CMD_VLE);
   6971 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   6972 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6973 			} else {
   6974 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6975 			}
   6976 			dcmdlen = 0;
   6977 		} else {
   6978 			/* setup an advanced data descriptor */
   6979 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6980 			    htole64(dmamap->dm_segs[0].ds_addr);
   6981 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   6982 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6983 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   6984 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   6985 			    htole32(fields);
   6986 			DPRINTF(WM_DEBUG_TX,
   6987 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   6988 			    device_xname(sc->sc_dev), nexttx,
   6989 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   6990 			DPRINTF(WM_DEBUG_TX,
   6991 			    ("\t 0x%08x%08x\n", fields,
   6992 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   6993 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   6994 		}
   6995 
   6996 		lasttx = nexttx;
   6997 		nexttx = WM_NEXTTX(txq, nexttx);
   6998 		/*
   6999 		 * fill in the next descriptors. legacy or adcanced format
   7000 		 * is the same here
   7001 		 */
   7002 		for (seg = 1; seg < dmamap->dm_nsegs;
   7003 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7004 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7005 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7006 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7007 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7008 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7009 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7010 			lasttx = nexttx;
   7011 
   7012 			DPRINTF(WM_DEBUG_TX,
   7013 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7014 			     "len %#04zx\n",
   7015 			    device_xname(sc->sc_dev), nexttx,
   7016 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7017 			    dmamap->dm_segs[seg].ds_len));
   7018 		}
   7019 
   7020 		KASSERT(lasttx != -1);
   7021 
   7022 		/*
   7023 		 * Set up the command byte on the last descriptor of
   7024 		 * the packet.  If we're in the interrupt delay window,
   7025 		 * delay the interrupt.
   7026 		 */
   7027 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7028 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7029 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7030 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7031 
   7032 		txs->txs_lastdesc = lasttx;
   7033 
   7034 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7035 		    device_xname(sc->sc_dev),
   7036 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7037 
   7038 		/* Sync the descriptors we're using. */
   7039 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7040 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7041 
   7042 		/* Give the packet to the chip. */
   7043 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7044 		sent = true;
   7045 
   7046 		DPRINTF(WM_DEBUG_TX,
   7047 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7048 
   7049 		DPRINTF(WM_DEBUG_TX,
   7050 		    ("%s: TX: finished transmitting packet, job %d\n",
   7051 		    device_xname(sc->sc_dev), txq->txq_snext));
   7052 
   7053 		/* Advance the tx pointer. */
   7054 		txq->txq_free -= txs->txs_ndesc;
   7055 		txq->txq_next = nexttx;
   7056 
   7057 		txq->txq_sfree--;
   7058 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7059 
   7060 		/* Pass the packet to any BPF listeners. */
   7061 		bpf_mtap(ifp, m0);
   7062 	}
   7063 
   7064 	if (m0 != NULL) {
   7065 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7066 		WM_Q_EVCNT_INCR(txq, txdrop);
   7067 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7068 			__func__));
   7069 		m_freem(m0);
   7070 	}
   7071 
   7072 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7073 		/* No more slots; notify upper layer. */
   7074 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7075 	}
   7076 
   7077 	if (sent) {
   7078 		/* Set a watchdog timer in case the chip flakes out. */
   7079 		ifp->if_timer = 5;
   7080 	}
   7081 }
   7082 
   7083 /* Interrupt */
   7084 
   7085 /*
   7086  * wm_txeof:
   7087  *
   7088  *	Helper; handle transmit interrupts.
   7089  */
   7090 static int
   7091 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7092 {
   7093 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7094 	struct wm_txsoft *txs;
   7095 	bool processed = false;
   7096 	int count = 0;
   7097 	int i;
   7098 	uint8_t status;
   7099 
   7100 	KASSERT(mutex_owned(txq->txq_lock));
   7101 
   7102 	if (sc->sc_stopping)
   7103 		return 0;
   7104 
   7105 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7106 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7107 	else
   7108 		ifp->if_flags &= ~IFF_OACTIVE;
   7109 
   7110 	/*
   7111 	 * Go through the Tx list and free mbufs for those
   7112 	 * frames which have been transmitted.
   7113 	 */
   7114 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7115 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7116 		txs = &txq->txq_soft[i];
   7117 
   7118 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7119 			device_xname(sc->sc_dev), i));
   7120 
   7121 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7122 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7123 
   7124 		status =
   7125 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7126 		if ((status & WTX_ST_DD) == 0) {
   7127 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7128 			    BUS_DMASYNC_PREREAD);
   7129 			break;
   7130 		}
   7131 
   7132 		processed = true;
   7133 		count++;
   7134 		DPRINTF(WM_DEBUG_TX,
   7135 		    ("%s: TX: job %d done: descs %d..%d\n",
   7136 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7137 		    txs->txs_lastdesc));
   7138 
   7139 		/*
   7140 		 * XXX We should probably be using the statistics
   7141 		 * XXX registers, but I don't know if they exist
   7142 		 * XXX on chips before the i82544.
   7143 		 */
   7144 
   7145 #ifdef WM_EVENT_COUNTERS
   7146 		if (status & WTX_ST_TU)
   7147 			WM_Q_EVCNT_INCR(txq, tu);
   7148 #endif /* WM_EVENT_COUNTERS */
   7149 
   7150 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7151 			ifp->if_oerrors++;
   7152 			if (status & WTX_ST_LC)
   7153 				log(LOG_WARNING, "%s: late collision\n",
   7154 				    device_xname(sc->sc_dev));
   7155 			else if (status & WTX_ST_EC) {
   7156 				ifp->if_collisions += 16;
   7157 				log(LOG_WARNING, "%s: excessive collisions\n",
   7158 				    device_xname(sc->sc_dev));
   7159 			}
   7160 		} else
   7161 			ifp->if_opackets++;
   7162 
   7163 		txq->txq_free += txs->txs_ndesc;
   7164 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7165 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7166 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7167 		m_freem(txs->txs_mbuf);
   7168 		txs->txs_mbuf = NULL;
   7169 	}
   7170 
   7171 	/* Update the dirty transmit buffer pointer. */
   7172 	txq->txq_sdirty = i;
   7173 	DPRINTF(WM_DEBUG_TX,
   7174 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7175 
   7176 	if (count != 0)
   7177 		rnd_add_uint32(&sc->rnd_source, count);
   7178 
   7179 	/*
   7180 	 * If there are no more pending transmissions, cancel the watchdog
   7181 	 * timer.
   7182 	 */
   7183 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7184 		ifp->if_timer = 0;
   7185 
   7186 	return processed;
   7187 }
   7188 
   7189 /*
   7190  * wm_rxeof:
   7191  *
   7192  *	Helper; handle receive interrupts.
   7193  */
   7194 static void
   7195 wm_rxeof(struct wm_rxqueue *rxq)
   7196 {
   7197 	struct wm_softc *sc = rxq->rxq_sc;
   7198 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7199 	struct wm_rxsoft *rxs;
   7200 	struct mbuf *m;
   7201 	int i, len;
   7202 	int count = 0;
   7203 	uint8_t status, errors;
   7204 	uint16_t vlantag;
   7205 
   7206 	KASSERT(mutex_owned(rxq->rxq_lock));
   7207 
   7208 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7209 		rxs = &rxq->rxq_soft[i];
   7210 
   7211 		DPRINTF(WM_DEBUG_RX,
   7212 		    ("%s: RX: checking descriptor %d\n",
   7213 		    device_xname(sc->sc_dev), i));
   7214 
   7215 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7216 
   7217 		status = rxq->rxq_descs[i].wrx_status;
   7218 		errors = rxq->rxq_descs[i].wrx_errors;
   7219 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   7220 		vlantag = rxq->rxq_descs[i].wrx_special;
   7221 
   7222 		if ((status & WRX_ST_DD) == 0) {
   7223 			/* We have processed all of the receive descriptors. */
   7224 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   7225 			break;
   7226 		}
   7227 
   7228 		count++;
   7229 		if (__predict_false(rxq->rxq_discard)) {
   7230 			DPRINTF(WM_DEBUG_RX,
   7231 			    ("%s: RX: discarding contents of descriptor %d\n",
   7232 			    device_xname(sc->sc_dev), i));
   7233 			wm_init_rxdesc(rxq, i);
   7234 			if (status & WRX_ST_EOP) {
   7235 				/* Reset our state. */
   7236 				DPRINTF(WM_DEBUG_RX,
   7237 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7238 				    device_xname(sc->sc_dev)));
   7239 				rxq->rxq_discard = 0;
   7240 			}
   7241 			continue;
   7242 		}
   7243 
   7244 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7245 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7246 
   7247 		m = rxs->rxs_mbuf;
   7248 
   7249 		/*
   7250 		 * Add a new receive buffer to the ring, unless of
   7251 		 * course the length is zero. Treat the latter as a
   7252 		 * failed mapping.
   7253 		 */
   7254 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7255 			/*
   7256 			 * Failed, throw away what we've done so
   7257 			 * far, and discard the rest of the packet.
   7258 			 */
   7259 			ifp->if_ierrors++;
   7260 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7261 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7262 			wm_init_rxdesc(rxq, i);
   7263 			if ((status & WRX_ST_EOP) == 0)
   7264 				rxq->rxq_discard = 1;
   7265 			if (rxq->rxq_head != NULL)
   7266 				m_freem(rxq->rxq_head);
   7267 			WM_RXCHAIN_RESET(rxq);
   7268 			DPRINTF(WM_DEBUG_RX,
   7269 			    ("%s: RX: Rx buffer allocation failed, "
   7270 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7271 			    rxq->rxq_discard ? " (discard)" : ""));
   7272 			continue;
   7273 		}
   7274 
   7275 		m->m_len = len;
   7276 		rxq->rxq_len += len;
   7277 		DPRINTF(WM_DEBUG_RX,
   7278 		    ("%s: RX: buffer at %p len %d\n",
   7279 		    device_xname(sc->sc_dev), m->m_data, len));
   7280 
   7281 		/* If this is not the end of the packet, keep looking. */
   7282 		if ((status & WRX_ST_EOP) == 0) {
   7283 			WM_RXCHAIN_LINK(rxq, m);
   7284 			DPRINTF(WM_DEBUG_RX,
   7285 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7286 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7287 			continue;
   7288 		}
   7289 
   7290 		/*
   7291 		 * Okay, we have the entire packet now.  The chip is
   7292 		 * configured to include the FCS except I350 and I21[01]
   7293 		 * (not all chips can be configured to strip it),
   7294 		 * so we need to trim it.
   7295 		 * May need to adjust length of previous mbuf in the
   7296 		 * chain if the current mbuf is too short.
   7297 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7298 		 * is always set in I350, so we don't trim it.
   7299 		 */
   7300 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7301 		    && (sc->sc_type != WM_T_I210)
   7302 		    && (sc->sc_type != WM_T_I211)) {
   7303 			if (m->m_len < ETHER_CRC_LEN) {
   7304 				rxq->rxq_tail->m_len
   7305 				    -= (ETHER_CRC_LEN - m->m_len);
   7306 				m->m_len = 0;
   7307 			} else
   7308 				m->m_len -= ETHER_CRC_LEN;
   7309 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7310 		} else
   7311 			len = rxq->rxq_len;
   7312 
   7313 		WM_RXCHAIN_LINK(rxq, m);
   7314 
   7315 		*rxq->rxq_tailp = NULL;
   7316 		m = rxq->rxq_head;
   7317 
   7318 		WM_RXCHAIN_RESET(rxq);
   7319 
   7320 		DPRINTF(WM_DEBUG_RX,
   7321 		    ("%s: RX: have entire packet, len -> %d\n",
   7322 		    device_xname(sc->sc_dev), len));
   7323 
   7324 		/* If an error occurred, update stats and drop the packet. */
   7325 		if (errors &
   7326 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7327 			if (errors & WRX_ER_SE)
   7328 				log(LOG_WARNING, "%s: symbol error\n",
   7329 				    device_xname(sc->sc_dev));
   7330 			else if (errors & WRX_ER_SEQ)
   7331 				log(LOG_WARNING, "%s: receive sequence error\n",
   7332 				    device_xname(sc->sc_dev));
   7333 			else if (errors & WRX_ER_CE)
   7334 				log(LOG_WARNING, "%s: CRC error\n",
   7335 				    device_xname(sc->sc_dev));
   7336 			m_freem(m);
   7337 			continue;
   7338 		}
   7339 
   7340 		/* No errors.  Receive the packet. */
   7341 		m_set_rcvif(m, ifp);
   7342 		m->m_pkthdr.len = len;
   7343 
   7344 		/*
   7345 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7346 		 * for us.  Associate the tag with the packet.
   7347 		 */
   7348 		/* XXXX should check for i350 and i354 */
   7349 		if ((status & WRX_ST_VP) != 0) {
   7350 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
   7351 		}
   7352 
   7353 		/* Set up checksum info for this packet. */
   7354 		if ((status & WRX_ST_IXSM) == 0) {
   7355 			if (status & WRX_ST_IPCS) {
   7356 				WM_Q_EVCNT_INCR(rxq, rxipsum);
   7357 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7358 				if (errors & WRX_ER_IPE)
   7359 					m->m_pkthdr.csum_flags |=
   7360 					    M_CSUM_IPv4_BAD;
   7361 			}
   7362 			if (status & WRX_ST_TCPCS) {
   7363 				/*
   7364 				 * Note: we don't know if this was TCP or UDP,
   7365 				 * so we just set both bits, and expect the
   7366 				 * upper layers to deal.
   7367 				 */
   7368 				WM_Q_EVCNT_INCR(rxq, rxtusum);
   7369 				m->m_pkthdr.csum_flags |=
   7370 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7371 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7372 				if (errors & WRX_ER_TCPE)
   7373 					m->m_pkthdr.csum_flags |=
   7374 					    M_CSUM_TCP_UDP_BAD;
   7375 			}
   7376 		}
   7377 
   7378 		ifp->if_ipackets++;
   7379 
   7380 		mutex_exit(rxq->rxq_lock);
   7381 
   7382 		/* Pass this up to any BPF listeners. */
   7383 		bpf_mtap(ifp, m);
   7384 
   7385 		/* Pass it on. */
   7386 		if_percpuq_enqueue(sc->sc_ipq, m);
   7387 
   7388 		mutex_enter(rxq->rxq_lock);
   7389 
   7390 		if (sc->sc_stopping)
   7391 			break;
   7392 	}
   7393 
   7394 	/* Update the receive pointer. */
   7395 	rxq->rxq_ptr = i;
   7396 	if (count != 0)
   7397 		rnd_add_uint32(&sc->rnd_source, count);
   7398 
   7399 	DPRINTF(WM_DEBUG_RX,
   7400 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7401 }
   7402 
   7403 /*
   7404  * wm_linkintr_gmii:
   7405  *
   7406  *	Helper; handle link interrupts for GMII.
   7407  */
   7408 static void
   7409 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7410 {
   7411 
   7412 	KASSERT(WM_CORE_LOCKED(sc));
   7413 
   7414 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7415 		__func__));
   7416 
   7417 	if (icr & ICR_LSC) {
   7418 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7419 
   7420 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7421 			wm_gig_downshift_workaround_ich8lan(sc);
   7422 
   7423 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7424 			device_xname(sc->sc_dev)));
   7425 		mii_pollstat(&sc->sc_mii);
   7426 		if (sc->sc_type == WM_T_82543) {
   7427 			int miistatus, active;
   7428 
   7429 			/*
   7430 			 * With 82543, we need to force speed and
   7431 			 * duplex on the MAC equal to what the PHY
   7432 			 * speed and duplex configuration is.
   7433 			 */
   7434 			miistatus = sc->sc_mii.mii_media_status;
   7435 
   7436 			if (miistatus & IFM_ACTIVE) {
   7437 				active = sc->sc_mii.mii_media_active;
   7438 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7439 				switch (IFM_SUBTYPE(active)) {
   7440 				case IFM_10_T:
   7441 					sc->sc_ctrl |= CTRL_SPEED_10;
   7442 					break;
   7443 				case IFM_100_TX:
   7444 					sc->sc_ctrl |= CTRL_SPEED_100;
   7445 					break;
   7446 				case IFM_1000_T:
   7447 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7448 					break;
   7449 				default:
   7450 					/*
   7451 					 * fiber?
   7452 					 * Shoud not enter here.
   7453 					 */
   7454 					printf("unknown media (%x)\n", active);
   7455 					break;
   7456 				}
   7457 				if (active & IFM_FDX)
   7458 					sc->sc_ctrl |= CTRL_FD;
   7459 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7460 			}
   7461 		} else if ((sc->sc_type == WM_T_ICH8)
   7462 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7463 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7464 		} else if (sc->sc_type == WM_T_PCH) {
   7465 			wm_k1_gig_workaround_hv(sc,
   7466 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7467 		}
   7468 
   7469 		if ((sc->sc_phytype == WMPHY_82578)
   7470 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7471 			== IFM_1000_T)) {
   7472 
   7473 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7474 				delay(200*1000); /* XXX too big */
   7475 
   7476 				/* Link stall fix for link up */
   7477 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7478 				    HV_MUX_DATA_CTRL,
   7479 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7480 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7481 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7482 				    HV_MUX_DATA_CTRL,
   7483 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7484 			}
   7485 		}
   7486 	} else if (icr & ICR_RXSEQ) {
   7487 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7488 			device_xname(sc->sc_dev)));
   7489 	}
   7490 }
   7491 
   7492 /*
   7493  * wm_linkintr_tbi:
   7494  *
   7495  *	Helper; handle link interrupts for TBI mode.
   7496  */
   7497 static void
   7498 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7499 {
   7500 	uint32_t status;
   7501 
   7502 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7503 		__func__));
   7504 
   7505 	status = CSR_READ(sc, WMREG_STATUS);
   7506 	if (icr & ICR_LSC) {
   7507 		if (status & STATUS_LU) {
   7508 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7509 			    device_xname(sc->sc_dev),
   7510 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7511 			/*
   7512 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7513 			 * so we should update sc->sc_ctrl
   7514 			 */
   7515 
   7516 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7517 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7518 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7519 			if (status & STATUS_FD)
   7520 				sc->sc_tctl |=
   7521 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7522 			else
   7523 				sc->sc_tctl |=
   7524 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7525 			if (sc->sc_ctrl & CTRL_TFCE)
   7526 				sc->sc_fcrtl |= FCRTL_XONE;
   7527 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7528 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7529 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7530 				      sc->sc_fcrtl);
   7531 			sc->sc_tbi_linkup = 1;
   7532 		} else {
   7533 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7534 			    device_xname(sc->sc_dev)));
   7535 			sc->sc_tbi_linkup = 0;
   7536 		}
   7537 		/* Update LED */
   7538 		wm_tbi_serdes_set_linkled(sc);
   7539 	} else if (icr & ICR_RXSEQ) {
   7540 		DPRINTF(WM_DEBUG_LINK,
   7541 		    ("%s: LINK: Receive sequence error\n",
   7542 		    device_xname(sc->sc_dev)));
   7543 	}
   7544 }
   7545 
   7546 /*
   7547  * wm_linkintr_serdes:
   7548  *
   7549  *	Helper; handle link interrupts for TBI mode.
   7550  */
   7551 static void
   7552 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7553 {
   7554 	struct mii_data *mii = &sc->sc_mii;
   7555 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7556 	uint32_t pcs_adv, pcs_lpab, reg;
   7557 
   7558 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7559 		__func__));
   7560 
   7561 	if (icr & ICR_LSC) {
   7562 		/* Check PCS */
   7563 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7564 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7565 			mii->mii_media_status |= IFM_ACTIVE;
   7566 			sc->sc_tbi_linkup = 1;
   7567 		} else {
   7568 			mii->mii_media_status |= IFM_NONE;
   7569 			sc->sc_tbi_linkup = 0;
   7570 			wm_tbi_serdes_set_linkled(sc);
   7571 			return;
   7572 		}
   7573 		mii->mii_media_active |= IFM_1000_SX;
   7574 		if ((reg & PCS_LSTS_FDX) != 0)
   7575 			mii->mii_media_active |= IFM_FDX;
   7576 		else
   7577 			mii->mii_media_active |= IFM_HDX;
   7578 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7579 			/* Check flow */
   7580 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7581 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7582 				DPRINTF(WM_DEBUG_LINK,
   7583 				    ("XXX LINKOK but not ACOMP\n"));
   7584 				return;
   7585 			}
   7586 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7587 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7588 			DPRINTF(WM_DEBUG_LINK,
   7589 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7590 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7591 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7592 				mii->mii_media_active |= IFM_FLOW
   7593 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7594 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7595 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7596 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7597 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7598 				mii->mii_media_active |= IFM_FLOW
   7599 				    | IFM_ETH_TXPAUSE;
   7600 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7601 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7602 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7603 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7604 				mii->mii_media_active |= IFM_FLOW
   7605 				    | IFM_ETH_RXPAUSE;
   7606 		}
   7607 		/* Update LED */
   7608 		wm_tbi_serdes_set_linkled(sc);
   7609 	} else {
   7610 		DPRINTF(WM_DEBUG_LINK,
   7611 		    ("%s: LINK: Receive sequence error\n",
   7612 		    device_xname(sc->sc_dev)));
   7613 	}
   7614 }
   7615 
   7616 /*
   7617  * wm_linkintr:
   7618  *
   7619  *	Helper; handle link interrupts.
   7620  */
   7621 static void
   7622 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7623 {
   7624 
   7625 	KASSERT(WM_CORE_LOCKED(sc));
   7626 
   7627 	if (sc->sc_flags & WM_F_HAS_MII)
   7628 		wm_linkintr_gmii(sc, icr);
   7629 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7630 	    && (sc->sc_type >= WM_T_82575))
   7631 		wm_linkintr_serdes(sc, icr);
   7632 	else
   7633 		wm_linkintr_tbi(sc, icr);
   7634 }
   7635 
   7636 /*
   7637  * wm_intr_legacy:
   7638  *
   7639  *	Interrupt service routine for INTx and MSI.
   7640  */
   7641 static int
   7642 wm_intr_legacy(void *arg)
   7643 {
   7644 	struct wm_softc *sc = arg;
   7645 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7646 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
   7647 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7648 	uint32_t icr, rndval = 0;
   7649 	int handled = 0;
   7650 
   7651 	DPRINTF(WM_DEBUG_TX,
   7652 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7653 	while (1 /* CONSTCOND */) {
   7654 		icr = CSR_READ(sc, WMREG_ICR);
   7655 		if ((icr & sc->sc_icr) == 0)
   7656 			break;
   7657 		if (rndval == 0)
   7658 			rndval = icr;
   7659 
   7660 		mutex_enter(rxq->rxq_lock);
   7661 
   7662 		if (sc->sc_stopping) {
   7663 			mutex_exit(rxq->rxq_lock);
   7664 			break;
   7665 		}
   7666 
   7667 		handled = 1;
   7668 
   7669 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7670 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   7671 			DPRINTF(WM_DEBUG_RX,
   7672 			    ("%s: RX: got Rx intr 0x%08x\n",
   7673 			    device_xname(sc->sc_dev),
   7674 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   7675 			WM_Q_EVCNT_INCR(rxq, rxintr);
   7676 		}
   7677 #endif
   7678 		wm_rxeof(rxq);
   7679 
   7680 		mutex_exit(rxq->rxq_lock);
   7681 		mutex_enter(txq->txq_lock);
   7682 
   7683 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7684 		if (icr & ICR_TXDW) {
   7685 			DPRINTF(WM_DEBUG_TX,
   7686 			    ("%s: TX: got TXDW interrupt\n",
   7687 			    device_xname(sc->sc_dev)));
   7688 			WM_Q_EVCNT_INCR(txq, txdw);
   7689 		}
   7690 #endif
   7691 		wm_txeof(sc, txq);
   7692 
   7693 		mutex_exit(txq->txq_lock);
   7694 		WM_CORE_LOCK(sc);
   7695 
   7696 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   7697 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7698 			wm_linkintr(sc, icr);
   7699 		}
   7700 
   7701 		WM_CORE_UNLOCK(sc);
   7702 
   7703 		if (icr & ICR_RXO) {
   7704 #if defined(WM_DEBUG)
   7705 			log(LOG_WARNING, "%s: Receive overrun\n",
   7706 			    device_xname(sc->sc_dev));
   7707 #endif /* defined(WM_DEBUG) */
   7708 		}
   7709 	}
   7710 
   7711 	rnd_add_uint32(&sc->rnd_source, rndval);
   7712 
   7713 	if (handled) {
   7714 		/* Try to get more packets going. */
   7715 		ifp->if_start(ifp);
   7716 	}
   7717 
   7718 	return handled;
   7719 }
   7720 
   7721 static int
   7722 wm_txrxintr_msix(void *arg)
   7723 {
   7724 	struct wm_queue *wmq = arg;
   7725 	struct wm_txqueue *txq = &wmq->wmq_txq;
   7726 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7727 	struct wm_softc *sc = txq->txq_sc;
   7728 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7729 
   7730 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   7731 
   7732 	DPRINTF(WM_DEBUG_TX,
   7733 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7734 
   7735 	if (sc->sc_type == WM_T_82574)
   7736 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7737 	else if (sc->sc_type == WM_T_82575)
   7738 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7739 	else
   7740 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   7741 
   7742 	if (!sc->sc_stopping) {
   7743 		mutex_enter(txq->txq_lock);
   7744 
   7745 		WM_Q_EVCNT_INCR(txq, txdw);
   7746 		wm_txeof(sc, txq);
   7747 
   7748 		/* Try to get more packets going. */
   7749 		if (pcq_peek(txq->txq_interq) != NULL)
   7750 			wm_nq_transmit_locked(ifp, txq);
   7751 		/*
   7752 		 * There are still some upper layer processing which call
   7753 		 * ifp->if_start(). e.g. ALTQ
   7754 		 */
   7755 		if (wmq->wmq_id == 0) {
   7756 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
   7757 				wm_nq_start_locked(ifp);
   7758 		}
   7759 		mutex_exit(txq->txq_lock);
   7760 	}
   7761 
   7762 	DPRINTF(WM_DEBUG_RX,
   7763 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7764 
   7765 	if (!sc->sc_stopping) {
   7766 		mutex_enter(rxq->rxq_lock);
   7767 		WM_Q_EVCNT_INCR(rxq, rxintr);
   7768 		wm_rxeof(rxq);
   7769 		mutex_exit(rxq->rxq_lock);
   7770 	}
   7771 
   7772 	if (sc->sc_type == WM_T_82574)
   7773 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7774 	else if (sc->sc_type == WM_T_82575)
   7775 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7776 	else
   7777 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   7778 
   7779 	return 1;
   7780 }
   7781 
   7782 /*
   7783  * wm_linkintr_msix:
   7784  *
   7785  *	Interrupt service routine for link status change for MSI-X.
   7786  */
   7787 static int
   7788 wm_linkintr_msix(void *arg)
   7789 {
   7790 	struct wm_softc *sc = arg;
   7791 	uint32_t reg;
   7792 
   7793 	DPRINTF(WM_DEBUG_LINK,
   7794 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   7795 
   7796 	reg = CSR_READ(sc, WMREG_ICR);
   7797 	WM_CORE_LOCK(sc);
   7798 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
   7799 		goto out;
   7800 
   7801 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7802 	wm_linkintr(sc, ICR_LSC);
   7803 
   7804 out:
   7805 	WM_CORE_UNLOCK(sc);
   7806 
   7807 	if (sc->sc_type == WM_T_82574)
   7808 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   7809 	else if (sc->sc_type == WM_T_82575)
   7810 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   7811 	else
   7812 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   7813 
   7814 	return 1;
   7815 }
   7816 
   7817 /*
   7818  * Media related.
   7819  * GMII, SGMII, TBI (and SERDES)
   7820  */
   7821 
   7822 /* Common */
   7823 
   7824 /*
   7825  * wm_tbi_serdes_set_linkled:
   7826  *
   7827  *	Update the link LED on TBI and SERDES devices.
   7828  */
   7829 static void
   7830 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   7831 {
   7832 
   7833 	if (sc->sc_tbi_linkup)
   7834 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7835 	else
   7836 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7837 
   7838 	/* 82540 or newer devices are active low */
   7839 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7840 
   7841 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7842 }
   7843 
   7844 /* GMII related */
   7845 
   7846 /*
   7847  * wm_gmii_reset:
   7848  *
   7849  *	Reset the PHY.
   7850  */
   7851 static void
   7852 wm_gmii_reset(struct wm_softc *sc)
   7853 {
   7854 	uint32_t reg;
   7855 	int rv;
   7856 
   7857 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7858 		device_xname(sc->sc_dev), __func__));
   7859 
   7860 	rv = sc->phy.acquire(sc);
   7861 	if (rv != 0) {
   7862 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7863 		    __func__);
   7864 		return;
   7865 	}
   7866 
   7867 	switch (sc->sc_type) {
   7868 	case WM_T_82542_2_0:
   7869 	case WM_T_82542_2_1:
   7870 		/* null */
   7871 		break;
   7872 	case WM_T_82543:
   7873 		/*
   7874 		 * With 82543, we need to force speed and duplex on the MAC
   7875 		 * equal to what the PHY speed and duplex configuration is.
   7876 		 * In addition, we need to perform a hardware reset on the PHY
   7877 		 * to take it out of reset.
   7878 		 */
   7879 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7880 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7881 
   7882 		/* The PHY reset pin is active-low. */
   7883 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7884 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   7885 		    CTRL_EXT_SWDPIN(4));
   7886 		reg |= CTRL_EXT_SWDPIO(4);
   7887 
   7888 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7889 		CSR_WRITE_FLUSH(sc);
   7890 		delay(10*1000);
   7891 
   7892 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   7893 		CSR_WRITE_FLUSH(sc);
   7894 		delay(150);
   7895 #if 0
   7896 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   7897 #endif
   7898 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   7899 		break;
   7900 	case WM_T_82544:	/* reset 10000us */
   7901 	case WM_T_82540:
   7902 	case WM_T_82545:
   7903 	case WM_T_82545_3:
   7904 	case WM_T_82546:
   7905 	case WM_T_82546_3:
   7906 	case WM_T_82541:
   7907 	case WM_T_82541_2:
   7908 	case WM_T_82547:
   7909 	case WM_T_82547_2:
   7910 	case WM_T_82571:	/* reset 100us */
   7911 	case WM_T_82572:
   7912 	case WM_T_82573:
   7913 	case WM_T_82574:
   7914 	case WM_T_82575:
   7915 	case WM_T_82576:
   7916 	case WM_T_82580:
   7917 	case WM_T_I350:
   7918 	case WM_T_I354:
   7919 	case WM_T_I210:
   7920 	case WM_T_I211:
   7921 	case WM_T_82583:
   7922 	case WM_T_80003:
   7923 		/* generic reset */
   7924 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7925 		CSR_WRITE_FLUSH(sc);
   7926 		delay(20000);
   7927 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7928 		CSR_WRITE_FLUSH(sc);
   7929 		delay(20000);
   7930 
   7931 		if ((sc->sc_type == WM_T_82541)
   7932 		    || (sc->sc_type == WM_T_82541_2)
   7933 		    || (sc->sc_type == WM_T_82547)
   7934 		    || (sc->sc_type == WM_T_82547_2)) {
   7935 			/* workaround for igp are done in igp_reset() */
   7936 			/* XXX add code to set LED after phy reset */
   7937 		}
   7938 		break;
   7939 	case WM_T_ICH8:
   7940 	case WM_T_ICH9:
   7941 	case WM_T_ICH10:
   7942 	case WM_T_PCH:
   7943 	case WM_T_PCH2:
   7944 	case WM_T_PCH_LPT:
   7945 	case WM_T_PCH_SPT:
   7946 		/* generic reset */
   7947 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7948 		CSR_WRITE_FLUSH(sc);
   7949 		delay(100);
   7950 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7951 		CSR_WRITE_FLUSH(sc);
   7952 		delay(150);
   7953 		break;
   7954 	default:
   7955 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   7956 		    __func__);
   7957 		break;
   7958 	}
   7959 
   7960 	sc->phy.release(sc);
   7961 
   7962 	/* get_cfg_done */
   7963 	wm_get_cfg_done(sc);
   7964 
   7965 	/* extra setup */
   7966 	switch (sc->sc_type) {
   7967 	case WM_T_82542_2_0:
   7968 	case WM_T_82542_2_1:
   7969 	case WM_T_82543:
   7970 	case WM_T_82544:
   7971 	case WM_T_82540:
   7972 	case WM_T_82545:
   7973 	case WM_T_82545_3:
   7974 	case WM_T_82546:
   7975 	case WM_T_82546_3:
   7976 	case WM_T_82541_2:
   7977 	case WM_T_82547_2:
   7978 	case WM_T_82571:
   7979 	case WM_T_82572:
   7980 	case WM_T_82573:
   7981 	case WM_T_82575:
   7982 	case WM_T_82576:
   7983 	case WM_T_82580:
   7984 	case WM_T_I350:
   7985 	case WM_T_I354:
   7986 	case WM_T_I210:
   7987 	case WM_T_I211:
   7988 	case WM_T_80003:
   7989 		/* null */
   7990 		break;
   7991 	case WM_T_82574:
   7992 	case WM_T_82583:
   7993 		wm_lplu_d0_disable(sc);
   7994 		break;
   7995 	case WM_T_82541:
   7996 	case WM_T_82547:
   7997 		/* XXX Configure actively LED after PHY reset */
   7998 		break;
   7999 	case WM_T_ICH8:
   8000 	case WM_T_ICH9:
   8001 	case WM_T_ICH10:
   8002 	case WM_T_PCH:
   8003 	case WM_T_PCH2:
   8004 	case WM_T_PCH_LPT:
   8005 	case WM_T_PCH_SPT:
   8006 		/* Allow time for h/w to get to a quiescent state afer reset */
   8007 		delay(10*1000);
   8008 
   8009 		if (sc->sc_type == WM_T_PCH)
   8010 			wm_hv_phy_workaround_ich8lan(sc);
   8011 
   8012 		if (sc->sc_type == WM_T_PCH2)
   8013 			wm_lv_phy_workaround_ich8lan(sc);
   8014 
   8015 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   8016 			/*
   8017 			 * dummy read to clear the phy wakeup bit after lcd
   8018 			 * reset
   8019 			 */
   8020 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   8021 		}
   8022 
   8023 		/*
   8024 		 * XXX Configure the LCD with th extended configuration region
   8025 		 * in NVM
   8026 		 */
   8027 
   8028 		/* Disable D0 LPLU. */
   8029 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8030 			wm_lplu_d0_disable_pch(sc);
   8031 		else
   8032 			wm_lplu_d0_disable(sc);	/* ICH* */
   8033 		break;
   8034 	default:
   8035 		panic("%s: unknown type\n", __func__);
   8036 		break;
   8037 	}
   8038 }
   8039 
   8040 /*
   8041  * wm_get_phy_id_82575:
   8042  *
   8043  * Return PHY ID. Return -1 if it failed.
   8044  */
   8045 static int
   8046 wm_get_phy_id_82575(struct wm_softc *sc)
   8047 {
   8048 	uint32_t reg;
   8049 	int phyid = -1;
   8050 
   8051 	/* XXX */
   8052 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8053 		return -1;
   8054 
   8055 	if (wm_sgmii_uses_mdio(sc)) {
   8056 		switch (sc->sc_type) {
   8057 		case WM_T_82575:
   8058 		case WM_T_82576:
   8059 			reg = CSR_READ(sc, WMREG_MDIC);
   8060 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8061 			break;
   8062 		case WM_T_82580:
   8063 		case WM_T_I350:
   8064 		case WM_T_I354:
   8065 		case WM_T_I210:
   8066 		case WM_T_I211:
   8067 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8068 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8069 			break;
   8070 		default:
   8071 			return -1;
   8072 		}
   8073 	}
   8074 
   8075 	return phyid;
   8076 }
   8077 
   8078 
   8079 /*
   8080  * wm_gmii_mediainit:
   8081  *
   8082  *	Initialize media for use on 1000BASE-T devices.
   8083  */
   8084 static void
   8085 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8086 {
   8087 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8088 	struct mii_data *mii = &sc->sc_mii;
   8089 	uint32_t reg;
   8090 
   8091 	/* We have GMII. */
   8092 	sc->sc_flags |= WM_F_HAS_MII;
   8093 
   8094 	if (sc->sc_type == WM_T_80003)
   8095 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8096 	else
   8097 		sc->sc_tipg = TIPG_1000T_DFLT;
   8098 
   8099 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8100 	if ((sc->sc_type == WM_T_82580)
   8101 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8102 	    || (sc->sc_type == WM_T_I211)) {
   8103 		reg = CSR_READ(sc, WMREG_PHPM);
   8104 		reg &= ~PHPM_GO_LINK_D;
   8105 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8106 	}
   8107 
   8108 	/*
   8109 	 * Let the chip set speed/duplex on its own based on
   8110 	 * signals from the PHY.
   8111 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8112 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8113 	 */
   8114 	sc->sc_ctrl |= CTRL_SLU;
   8115 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8116 
   8117 	/* Initialize our media structures and probe the GMII. */
   8118 	mii->mii_ifp = ifp;
   8119 
   8120 	/*
   8121 	 * Determine the PHY access method.
   8122 	 *
   8123 	 *  For SGMII, use SGMII specific method.
   8124 	 *
   8125 	 *  For some devices, we can determine the PHY access method
   8126 	 * from sc_type.
   8127 	 *
   8128 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   8129 	 * access  method by sc_type, so use the PCI product ID for some
   8130 	 * devices.
   8131 	 * For other ICH8 variants, try to use igp's method. If the PHY
   8132 	 * can't detect, then use bm's method.
   8133 	 */
   8134 	switch (prodid) {
   8135 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   8136 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   8137 		/* 82577 */
   8138 		sc->sc_phytype = WMPHY_82577;
   8139 		break;
   8140 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   8141 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   8142 		/* 82578 */
   8143 		sc->sc_phytype = WMPHY_82578;
   8144 		break;
   8145 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8146 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8147 		/* 82579 */
   8148 		sc->sc_phytype = WMPHY_82579;
   8149 		break;
   8150 	case PCI_PRODUCT_INTEL_82801I_BM:
   8151 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8152 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8153 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8154 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8155 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8156 		/* 82567 */
   8157 		sc->sc_phytype = WMPHY_BM;
   8158 		mii->mii_readreg = wm_gmii_bm_readreg;
   8159 		mii->mii_writereg = wm_gmii_bm_writereg;
   8160 		break;
   8161 	default:
   8162 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   8163 		    && !wm_sgmii_uses_mdio(sc)){
   8164 			/* SGMII */
   8165 			mii->mii_readreg = wm_sgmii_readreg;
   8166 			mii->mii_writereg = wm_sgmii_writereg;
   8167 		} else if (sc->sc_type >= WM_T_80003) {
   8168 			/* 80003 */
   8169 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8170 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8171 		} else if (sc->sc_type >= WM_T_I210) {
   8172 			/* I210 and I211 */
   8173 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8174 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8175 		} else if (sc->sc_type >= WM_T_82580) {
   8176 			/* 82580, I350 and I354 */
   8177 			sc->sc_phytype = WMPHY_82580;
   8178 			mii->mii_readreg = wm_gmii_82580_readreg;
   8179 			mii->mii_writereg = wm_gmii_82580_writereg;
   8180 		} else if (sc->sc_type >= WM_T_82544) {
   8181 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8182 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8183 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8184 		} else {
   8185 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8186 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8187 		}
   8188 		break;
   8189 	}
   8190 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8191 		/* All PCH* use _hv_ */
   8192 		mii->mii_readreg = wm_gmii_hv_readreg;
   8193 		mii->mii_writereg = wm_gmii_hv_writereg;
   8194 	}
   8195 	mii->mii_statchg = wm_gmii_statchg;
   8196 
   8197 	wm_gmii_reset(sc);
   8198 
   8199 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8200 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8201 	    wm_gmii_mediastatus);
   8202 
   8203 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8204 	    || (sc->sc_type == WM_T_82580)
   8205 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8206 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8207 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8208 			/* Attach only one port */
   8209 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8210 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8211 		} else {
   8212 			int i, id;
   8213 			uint32_t ctrl_ext;
   8214 
   8215 			id = wm_get_phy_id_82575(sc);
   8216 			if (id != -1) {
   8217 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8218 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8219 			}
   8220 			if ((id == -1)
   8221 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8222 				/* Power on sgmii phy if it is disabled */
   8223 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8224 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8225 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8226 				CSR_WRITE_FLUSH(sc);
   8227 				delay(300*1000); /* XXX too long */
   8228 
   8229 				/* from 1 to 8 */
   8230 				for (i = 1; i < 8; i++)
   8231 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8232 					    0xffffffff, i, MII_OFFSET_ANY,
   8233 					    MIIF_DOPAUSE);
   8234 
   8235 				/* restore previous sfp cage power state */
   8236 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8237 			}
   8238 		}
   8239 	} else {
   8240 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8241 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8242 	}
   8243 
   8244 	/*
   8245 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8246 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8247 	 */
   8248 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8249 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8250 		wm_set_mdio_slow_mode_hv(sc);
   8251 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8252 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8253 	}
   8254 
   8255 	/*
   8256 	 * (For ICH8 variants)
   8257 	 * If PHY detection failed, use BM's r/w function and retry.
   8258 	 */
   8259 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8260 		/* if failed, retry with *_bm_* */
   8261 		mii->mii_readreg = wm_gmii_bm_readreg;
   8262 		mii->mii_writereg = wm_gmii_bm_writereg;
   8263 
   8264 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8265 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8266 	}
   8267 
   8268 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8269 		/* Any PHY wasn't find */
   8270 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8271 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8272 		sc->sc_phytype = WMPHY_NONE;
   8273 	} else {
   8274 		/*
   8275 		 * PHY Found!
   8276 		 * Check PHY type.
   8277 		 */
   8278 		uint32_t model;
   8279 		struct mii_softc *child;
   8280 
   8281 		child = LIST_FIRST(&mii->mii_phys);
   8282 		model = child->mii_mpd_model;
   8283 		if (model == MII_MODEL_yyINTEL_I82566)
   8284 			sc->sc_phytype = WMPHY_IGP_3;
   8285 
   8286 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8287 	}
   8288 }
   8289 
   8290 /*
   8291  * wm_gmii_mediachange:	[ifmedia interface function]
   8292  *
   8293  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8294  */
   8295 static int
   8296 wm_gmii_mediachange(struct ifnet *ifp)
   8297 {
   8298 	struct wm_softc *sc = ifp->if_softc;
   8299 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8300 	int rc;
   8301 
   8302 	if ((ifp->if_flags & IFF_UP) == 0)
   8303 		return 0;
   8304 
   8305 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8306 	sc->sc_ctrl |= CTRL_SLU;
   8307 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8308 	    || (sc->sc_type > WM_T_82543)) {
   8309 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8310 	} else {
   8311 		sc->sc_ctrl &= ~CTRL_ASDE;
   8312 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8313 		if (ife->ifm_media & IFM_FDX)
   8314 			sc->sc_ctrl |= CTRL_FD;
   8315 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8316 		case IFM_10_T:
   8317 			sc->sc_ctrl |= CTRL_SPEED_10;
   8318 			break;
   8319 		case IFM_100_TX:
   8320 			sc->sc_ctrl |= CTRL_SPEED_100;
   8321 			break;
   8322 		case IFM_1000_T:
   8323 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8324 			break;
   8325 		default:
   8326 			panic("wm_gmii_mediachange: bad media 0x%x",
   8327 			    ife->ifm_media);
   8328 		}
   8329 	}
   8330 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8331 	if (sc->sc_type <= WM_T_82543)
   8332 		wm_gmii_reset(sc);
   8333 
   8334 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8335 		return 0;
   8336 	return rc;
   8337 }
   8338 
   8339 /*
   8340  * wm_gmii_mediastatus:	[ifmedia interface function]
   8341  *
   8342  *	Get the current interface media status on a 1000BASE-T device.
   8343  */
   8344 static void
   8345 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8346 {
   8347 	struct wm_softc *sc = ifp->if_softc;
   8348 
   8349 	ether_mediastatus(ifp, ifmr);
   8350 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8351 	    | sc->sc_flowflags;
   8352 }
   8353 
   8354 #define	MDI_IO		CTRL_SWDPIN(2)
   8355 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8356 #define	MDI_CLK		CTRL_SWDPIN(3)
   8357 
   8358 static void
   8359 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8360 {
   8361 	uint32_t i, v;
   8362 
   8363 	v = CSR_READ(sc, WMREG_CTRL);
   8364 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8365 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8366 
   8367 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8368 		if (data & i)
   8369 			v |= MDI_IO;
   8370 		else
   8371 			v &= ~MDI_IO;
   8372 		CSR_WRITE(sc, WMREG_CTRL, v);
   8373 		CSR_WRITE_FLUSH(sc);
   8374 		delay(10);
   8375 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8376 		CSR_WRITE_FLUSH(sc);
   8377 		delay(10);
   8378 		CSR_WRITE(sc, WMREG_CTRL, v);
   8379 		CSR_WRITE_FLUSH(sc);
   8380 		delay(10);
   8381 	}
   8382 }
   8383 
   8384 static uint32_t
   8385 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8386 {
   8387 	uint32_t v, i, data = 0;
   8388 
   8389 	v = CSR_READ(sc, WMREG_CTRL);
   8390 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8391 	v |= CTRL_SWDPIO(3);
   8392 
   8393 	CSR_WRITE(sc, WMREG_CTRL, v);
   8394 	CSR_WRITE_FLUSH(sc);
   8395 	delay(10);
   8396 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8397 	CSR_WRITE_FLUSH(sc);
   8398 	delay(10);
   8399 	CSR_WRITE(sc, WMREG_CTRL, v);
   8400 	CSR_WRITE_FLUSH(sc);
   8401 	delay(10);
   8402 
   8403 	for (i = 0; i < 16; i++) {
   8404 		data <<= 1;
   8405 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8406 		CSR_WRITE_FLUSH(sc);
   8407 		delay(10);
   8408 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8409 			data |= 1;
   8410 		CSR_WRITE(sc, WMREG_CTRL, v);
   8411 		CSR_WRITE_FLUSH(sc);
   8412 		delay(10);
   8413 	}
   8414 
   8415 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8416 	CSR_WRITE_FLUSH(sc);
   8417 	delay(10);
   8418 	CSR_WRITE(sc, WMREG_CTRL, v);
   8419 	CSR_WRITE_FLUSH(sc);
   8420 	delay(10);
   8421 
   8422 	return data;
   8423 }
   8424 
   8425 #undef MDI_IO
   8426 #undef MDI_DIR
   8427 #undef MDI_CLK
   8428 
   8429 /*
   8430  * wm_gmii_i82543_readreg:	[mii interface function]
   8431  *
   8432  *	Read a PHY register on the GMII (i82543 version).
   8433  */
   8434 static int
   8435 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8436 {
   8437 	struct wm_softc *sc = device_private(self);
   8438 	int rv;
   8439 
   8440 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8441 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8442 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8443 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8444 
   8445 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8446 	    device_xname(sc->sc_dev), phy, reg, rv));
   8447 
   8448 	return rv;
   8449 }
   8450 
   8451 /*
   8452  * wm_gmii_i82543_writereg:	[mii interface function]
   8453  *
   8454  *	Write a PHY register on the GMII (i82543 version).
   8455  */
   8456 static void
   8457 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8458 {
   8459 	struct wm_softc *sc = device_private(self);
   8460 
   8461 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8462 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8463 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8464 	    (MII_COMMAND_START << 30), 32);
   8465 }
   8466 
   8467 /*
   8468  * wm_gmii_mdic_readreg:	[mii interface function]
   8469  *
   8470  *	Read a PHY register on the GMII.
   8471  */
   8472 static int
   8473 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   8474 {
   8475 	struct wm_softc *sc = device_private(self);
   8476 	uint32_t mdic = 0;
   8477 	int i, rv;
   8478 
   8479 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8480 	    MDIC_REGADD(reg));
   8481 
   8482 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8483 		mdic = CSR_READ(sc, WMREG_MDIC);
   8484 		if (mdic & MDIC_READY)
   8485 			break;
   8486 		delay(50);
   8487 	}
   8488 
   8489 	if ((mdic & MDIC_READY) == 0) {
   8490 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8491 		    device_xname(sc->sc_dev), phy, reg);
   8492 		rv = 0;
   8493 	} else if (mdic & MDIC_E) {
   8494 #if 0 /* This is normal if no PHY is present. */
   8495 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8496 		    device_xname(sc->sc_dev), phy, reg);
   8497 #endif
   8498 		rv = 0;
   8499 	} else {
   8500 		rv = MDIC_DATA(mdic);
   8501 		if (rv == 0xffff)
   8502 			rv = 0;
   8503 	}
   8504 
   8505 	return rv;
   8506 }
   8507 
   8508 /*
   8509  * wm_gmii_mdic_writereg:	[mii interface function]
   8510  *
   8511  *	Write a PHY register on the GMII.
   8512  */
   8513 static void
   8514 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   8515 {
   8516 	struct wm_softc *sc = device_private(self);
   8517 	uint32_t mdic = 0;
   8518 	int i;
   8519 
   8520 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8521 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8522 
   8523 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8524 		mdic = CSR_READ(sc, WMREG_MDIC);
   8525 		if (mdic & MDIC_READY)
   8526 			break;
   8527 		delay(50);
   8528 	}
   8529 
   8530 	if ((mdic & MDIC_READY) == 0)
   8531 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8532 		    device_xname(sc->sc_dev), phy, reg);
   8533 	else if (mdic & MDIC_E)
   8534 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8535 		    device_xname(sc->sc_dev), phy, reg);
   8536 }
   8537 
   8538 /*
   8539  * wm_gmii_i82544_readreg:	[mii interface function]
   8540  *
   8541  *	Read a PHY register on the GMII.
   8542  */
   8543 static int
   8544 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8545 {
   8546 	struct wm_softc *sc = device_private(self);
   8547 	int rv;
   8548 
   8549 	if (sc->phy.acquire(sc)) {
   8550 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8551 		    __func__);
   8552 		return 0;
   8553 	}
   8554 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   8555 	sc->phy.release(sc);
   8556 
   8557 	return rv;
   8558 }
   8559 
   8560 /*
   8561  * wm_gmii_i82544_writereg:	[mii interface function]
   8562  *
   8563  *	Write a PHY register on the GMII.
   8564  */
   8565 static void
   8566 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8567 {
   8568 	struct wm_softc *sc = device_private(self);
   8569 
   8570 	if (sc->phy.acquire(sc)) {
   8571 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8572 		    __func__);
   8573 	}
   8574 	wm_gmii_mdic_writereg(self, phy, reg, val);
   8575 	sc->phy.release(sc);
   8576 }
   8577 
   8578 /*
   8579  * wm_gmii_i80003_readreg:	[mii interface function]
   8580  *
   8581  *	Read a PHY register on the kumeran
   8582  * This could be handled by the PHY layer if we didn't have to lock the
   8583  * ressource ...
   8584  */
   8585 static int
   8586 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8587 {
   8588 	struct wm_softc *sc = device_private(self);
   8589 	int rv;
   8590 
   8591 	if (phy != 1) /* only one PHY on kumeran bus */
   8592 		return 0;
   8593 
   8594 	if (sc->phy.acquire(sc)) {
   8595 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8596 		    __func__);
   8597 		return 0;
   8598 	}
   8599 
   8600 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8601 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8602 		    reg >> GG82563_PAGE_SHIFT);
   8603 	} else {
   8604 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8605 		    reg >> GG82563_PAGE_SHIFT);
   8606 	}
   8607 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8608 	delay(200);
   8609 	rv = wm_gmii_mdic_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8610 	delay(200);
   8611 	sc->phy.release(sc);
   8612 
   8613 	return rv;
   8614 }
   8615 
   8616 /*
   8617  * wm_gmii_i80003_writereg:	[mii interface function]
   8618  *
   8619  *	Write a PHY register on the kumeran.
   8620  * This could be handled by the PHY layer if we didn't have to lock the
   8621  * ressource ...
   8622  */
   8623 static void
   8624 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8625 {
   8626 	struct wm_softc *sc = device_private(self);
   8627 
   8628 	if (phy != 1) /* only one PHY on kumeran bus */
   8629 		return;
   8630 
   8631 	if (sc->phy.acquire(sc)) {
   8632 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8633 		    __func__);
   8634 		return;
   8635 	}
   8636 
   8637 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8638 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8639 		    reg >> GG82563_PAGE_SHIFT);
   8640 	} else {
   8641 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8642 		    reg >> GG82563_PAGE_SHIFT);
   8643 	}
   8644 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8645 	delay(200);
   8646 	wm_gmii_mdic_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8647 	delay(200);
   8648 
   8649 	sc->phy.release(sc);
   8650 }
   8651 
   8652 /*
   8653  * wm_gmii_bm_readreg:	[mii interface function]
   8654  *
   8655  *	Read a PHY register on the kumeran
   8656  * This could be handled by the PHY layer if we didn't have to lock the
   8657  * ressource ...
   8658  */
   8659 static int
   8660 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8661 {
   8662 	struct wm_softc *sc = device_private(self);
   8663 	int rv;
   8664 
   8665 	if (sc->phy.acquire(sc)) {
   8666 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8667 		    __func__);
   8668 		return 0;
   8669 	}
   8670 
   8671 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8672 		if (phy == 1)
   8673 			wm_gmii_mdic_writereg(self, phy,
   8674 			    MII_IGPHY_PAGE_SELECT, reg);
   8675 		else
   8676 			wm_gmii_mdic_writereg(self, phy,
   8677 			    GG82563_PHY_PAGE_SELECT,
   8678 			    reg >> GG82563_PAGE_SHIFT);
   8679 	}
   8680 
   8681 	rv = wm_gmii_mdic_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8682 	sc->phy.release(sc);
   8683 	return rv;
   8684 }
   8685 
   8686 /*
   8687  * wm_gmii_bm_writereg:	[mii interface function]
   8688  *
   8689  *	Write a PHY register on the kumeran.
   8690  * This could be handled by the PHY layer if we didn't have to lock the
   8691  * ressource ...
   8692  */
   8693 static void
   8694 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8695 {
   8696 	struct wm_softc *sc = device_private(self);
   8697 
   8698 	if (sc->phy.acquire(sc)) {
   8699 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8700 		    __func__);
   8701 		return;
   8702 	}
   8703 
   8704 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8705 		if (phy == 1)
   8706 			wm_gmii_mdic_writereg(self, phy,
   8707 			    MII_IGPHY_PAGE_SELECT, reg);
   8708 		else
   8709 			wm_gmii_mdic_writereg(self, phy,
   8710 			    GG82563_PHY_PAGE_SELECT,
   8711 			    reg >> GG82563_PAGE_SHIFT);
   8712 	}
   8713 
   8714 	wm_gmii_mdic_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8715 	sc->phy.release(sc);
   8716 }
   8717 
   8718 static void
   8719 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8720 {
   8721 	struct wm_softc *sc = device_private(self);
   8722 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8723 	uint16_t wuce;
   8724 
   8725 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8726 	if (sc->sc_type == WM_T_PCH) {
   8727 		/* XXX e1000 driver do nothing... why? */
   8728 	}
   8729 
   8730 	/* Set page 769 */
   8731 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8732 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8733 
   8734 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   8735 
   8736 	wuce &= ~BM_WUC_HOST_WU_BIT;
   8737 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   8738 	    wuce | BM_WUC_ENABLE_BIT);
   8739 
   8740 	/* Select page 800 */
   8741 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8742 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   8743 
   8744 	/* Write page 800 */
   8745 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   8746 
   8747 	if (rd)
   8748 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   8749 	else
   8750 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   8751 
   8752 	/* Set page 769 */
   8753 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8754 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8755 
   8756 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   8757 }
   8758 
   8759 /*
   8760  * wm_gmii_hv_readreg:	[mii interface function]
   8761  *
   8762  *	Read a PHY register on the kumeran
   8763  * This could be handled by the PHY layer if we didn't have to lock the
   8764  * ressource ...
   8765  */
   8766 static int
   8767 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   8768 {
   8769 	struct wm_softc *sc = device_private(self);
   8770 	int rv;
   8771 
   8772 	if (sc->phy.acquire(sc)) {
   8773 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8774 		    __func__);
   8775 		return 0;
   8776 	}
   8777 
   8778 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   8779 	sc->phy.release(sc);
   8780 	return rv;
   8781 }
   8782 
   8783 static int
   8784 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   8785 {
   8786 	struct wm_softc *sc = device_private(self);
   8787 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8788 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8789 	uint16_t val;
   8790 	int rv;
   8791 
   8792 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8793 	if (sc->sc_phytype == WMPHY_82577) {
   8794 		/* XXX must write */
   8795 	}
   8796 
   8797 	/* Page 800 works differently than the rest so it has its own func */
   8798 	if (page == BM_WUC_PAGE) {
   8799 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8800 		return val;
   8801 	}
   8802 
   8803 	/*
   8804 	 * Lower than page 768 works differently than the rest so it has its
   8805 	 * own func
   8806 	 */
   8807 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8808 		printf("gmii_hv_readreg!!!\n");
   8809 		return 0;
   8810 	}
   8811 
   8812 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8813 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8814 		    page << BME1000_PAGE_SHIFT);
   8815 	}
   8816 
   8817 	rv = wm_gmii_mdic_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   8818 	return rv;
   8819 }
   8820 
   8821 /*
   8822  * wm_gmii_hv_writereg:	[mii interface function]
   8823  *
   8824  *	Write a PHY register on the kumeran.
   8825  * This could be handled by the PHY layer if we didn't have to lock the
   8826  * ressource ...
   8827  */
   8828 static void
   8829 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   8830 {
   8831 	struct wm_softc *sc = device_private(self);
   8832 
   8833 	if (sc->phy.acquire(sc)) {
   8834 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8835 		    __func__);
   8836 		return;
   8837 	}
   8838 
   8839 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   8840 	sc->phy.release(sc);
   8841 }
   8842 
   8843 static void
   8844 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   8845 {
   8846 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8847 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8848 
   8849 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8850 
   8851 	/* Page 800 works differently than the rest so it has its own func */
   8852 	if (page == BM_WUC_PAGE) {
   8853 		uint16_t tmp;
   8854 
   8855 		tmp = val;
   8856 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8857 		return;
   8858 	}
   8859 
   8860 	/*
   8861 	 * Lower than page 768 works differently than the rest so it has its
   8862 	 * own func
   8863 	 */
   8864 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8865 		printf("gmii_hv_writereg!!!\n");
   8866 		return;
   8867 	}
   8868 
   8869 	/*
   8870 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   8871 	 * Power Down (whenever bit 11 of the PHY control register is set)
   8872 	 */
   8873 
   8874 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8875 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8876 		    page << BME1000_PAGE_SHIFT);
   8877 	}
   8878 
   8879 	wm_gmii_mdic_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   8880 }
   8881 
   8882 /*
   8883  * wm_gmii_82580_readreg:	[mii interface function]
   8884  *
   8885  *	Read a PHY register on the 82580 and I350.
   8886  * This could be handled by the PHY layer if we didn't have to lock the
   8887  * ressource ...
   8888  */
   8889 static int
   8890 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   8891 {
   8892 	struct wm_softc *sc = device_private(self);
   8893 	int rv;
   8894 
   8895 	if (sc->phy.acquire(sc) != 0) {
   8896 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8897 		    __func__);
   8898 		return 0;
   8899 	}
   8900 
   8901 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   8902 
   8903 	sc->phy.release(sc);
   8904 	return rv;
   8905 }
   8906 
   8907 /*
   8908  * wm_gmii_82580_writereg:	[mii interface function]
   8909  *
   8910  *	Write a PHY register on the 82580 and I350.
   8911  * This could be handled by the PHY layer if we didn't have to lock the
   8912  * ressource ...
   8913  */
   8914 static void
   8915 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   8916 {
   8917 	struct wm_softc *sc = device_private(self);
   8918 
   8919 	if (sc->phy.acquire(sc) != 0) {
   8920 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8921 		    __func__);
   8922 		return;
   8923 	}
   8924 
   8925 	wm_gmii_mdic_writereg(self, phy, reg, val);
   8926 
   8927 	sc->phy.release(sc);
   8928 }
   8929 
   8930 /*
   8931  * wm_gmii_gs40g_readreg:	[mii interface function]
   8932  *
   8933  *	Read a PHY register on the I2100 and I211.
   8934  * This could be handled by the PHY layer if we didn't have to lock the
   8935  * ressource ...
   8936  */
   8937 static int
   8938 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   8939 {
   8940 	struct wm_softc *sc = device_private(self);
   8941 	int page, offset;
   8942 	int rv;
   8943 
   8944 	/* Acquire semaphore */
   8945 	if (sc->phy.acquire(sc)) {
   8946 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8947 		    __func__);
   8948 		return 0;
   8949 	}
   8950 
   8951 	/* Page select */
   8952 	page = reg >> GS40G_PAGE_SHIFT;
   8953 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8954 
   8955 	/* Read reg */
   8956 	offset = reg & GS40G_OFFSET_MASK;
   8957 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   8958 
   8959 	sc->phy.release(sc);
   8960 	return rv;
   8961 }
   8962 
   8963 /*
   8964  * wm_gmii_gs40g_writereg:	[mii interface function]
   8965  *
   8966  *	Write a PHY register on the I210 and I211.
   8967  * This could be handled by the PHY layer if we didn't have to lock the
   8968  * ressource ...
   8969  */
   8970 static void
   8971 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   8972 {
   8973 	struct wm_softc *sc = device_private(self);
   8974 	int page, offset;
   8975 
   8976 	/* Acquire semaphore */
   8977 	if (sc->phy.acquire(sc)) {
   8978 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8979 		    __func__);
   8980 		return;
   8981 	}
   8982 
   8983 	/* Page select */
   8984 	page = reg >> GS40G_PAGE_SHIFT;
   8985 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8986 
   8987 	/* Write reg */
   8988 	offset = reg & GS40G_OFFSET_MASK;
   8989 	wm_gmii_mdic_writereg(self, phy, offset, val);
   8990 
   8991 	/* Release semaphore */
   8992 	sc->phy.release(sc);
   8993 }
   8994 
   8995 /*
   8996  * wm_gmii_statchg:	[mii interface function]
   8997  *
   8998  *	Callback from MII layer when media changes.
   8999  */
   9000 static void
   9001 wm_gmii_statchg(struct ifnet *ifp)
   9002 {
   9003 	struct wm_softc *sc = ifp->if_softc;
   9004 	struct mii_data *mii = &sc->sc_mii;
   9005 
   9006 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   9007 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9008 	sc->sc_fcrtl &= ~FCRTL_XONE;
   9009 
   9010 	/*
   9011 	 * Get flow control negotiation result.
   9012 	 */
   9013 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   9014 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   9015 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   9016 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   9017 	}
   9018 
   9019 	if (sc->sc_flowflags & IFM_FLOW) {
   9020 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   9021 			sc->sc_ctrl |= CTRL_TFCE;
   9022 			sc->sc_fcrtl |= FCRTL_XONE;
   9023 		}
   9024 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   9025 			sc->sc_ctrl |= CTRL_RFCE;
   9026 	}
   9027 
   9028 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   9029 		DPRINTF(WM_DEBUG_LINK,
   9030 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   9031 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9032 	} else {
   9033 		DPRINTF(WM_DEBUG_LINK,
   9034 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   9035 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9036 	}
   9037 
   9038 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9039 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9040 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   9041 						 : WMREG_FCRTL, sc->sc_fcrtl);
   9042 	if (sc->sc_type == WM_T_80003) {
   9043 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   9044 		case IFM_1000_T:
   9045 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9046 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   9047 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9048 			break;
   9049 		default:
   9050 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9051 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   9052 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   9053 			break;
   9054 		}
   9055 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   9056 	}
   9057 }
   9058 
   9059 /*
   9060  * wm_kmrn_readreg:
   9061  *
   9062  *	Read a kumeran register
   9063  */
   9064 static int
   9065 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   9066 {
   9067 	int rv;
   9068 
   9069 	if (sc->sc_type == WM_T_80003)
   9070 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9071 	else
   9072 		rv = sc->phy.acquire(sc);
   9073 	if (rv != 0) {
   9074 		aprint_error_dev(sc->sc_dev,
   9075 		    "%s: failed to get semaphore\n", __func__);
   9076 		return 0;
   9077 	}
   9078 
   9079 	wm_kmrn_readreg_locked(sc, reg);
   9080 
   9081 	if (sc->sc_type == WM_T_80003)
   9082 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9083 	else
   9084 		sc->phy.release(sc);
   9085 
   9086 	return rv;
   9087 }
   9088 
   9089 static int
   9090 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   9091 {
   9092 	int rv;
   9093 
   9094 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9095 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9096 	    KUMCTRLSTA_REN);
   9097 	CSR_WRITE_FLUSH(sc);
   9098 	delay(2);
   9099 
   9100 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9101 
   9102 	return rv;
   9103 }
   9104 
   9105 /*
   9106  * wm_kmrn_writereg:
   9107  *
   9108  *	Write a kumeran register
   9109  */
   9110 static void
   9111 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9112 {
   9113 	int rv;
   9114 
   9115 	if (sc->sc_type == WM_T_80003)
   9116 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9117 	else
   9118 		rv = sc->phy.acquire(sc);
   9119 	if (rv != 0) {
   9120 		aprint_error_dev(sc->sc_dev,
   9121 		    "%s: failed to get semaphore\n", __func__);
   9122 		return;
   9123 	}
   9124 
   9125 	wm_kmrn_writereg_locked(sc, reg, val);
   9126 
   9127 	if (sc->sc_type == WM_T_80003)
   9128 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9129 	else
   9130 		sc->phy.release(sc);
   9131 }
   9132 
   9133 static void
   9134 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   9135 {
   9136 
   9137 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9138 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9139 	    (val & KUMCTRLSTA_MASK));
   9140 }
   9141 
   9142 /* SGMII related */
   9143 
   9144 /*
   9145  * wm_sgmii_uses_mdio
   9146  *
   9147  * Check whether the transaction is to the internal PHY or the external
   9148  * MDIO interface. Return true if it's MDIO.
   9149  */
   9150 static bool
   9151 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9152 {
   9153 	uint32_t reg;
   9154 	bool ismdio = false;
   9155 
   9156 	switch (sc->sc_type) {
   9157 	case WM_T_82575:
   9158 	case WM_T_82576:
   9159 		reg = CSR_READ(sc, WMREG_MDIC);
   9160 		ismdio = ((reg & MDIC_DEST) != 0);
   9161 		break;
   9162 	case WM_T_82580:
   9163 	case WM_T_I350:
   9164 	case WM_T_I354:
   9165 	case WM_T_I210:
   9166 	case WM_T_I211:
   9167 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9168 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9169 		break;
   9170 	default:
   9171 		break;
   9172 	}
   9173 
   9174 	return ismdio;
   9175 }
   9176 
   9177 /*
   9178  * wm_sgmii_readreg:	[mii interface function]
   9179  *
   9180  *	Read a PHY register on the SGMII
   9181  * This could be handled by the PHY layer if we didn't have to lock the
   9182  * ressource ...
   9183  */
   9184 static int
   9185 wm_sgmii_readreg(device_t self, int phy, int reg)
   9186 {
   9187 	struct wm_softc *sc = device_private(self);
   9188 	uint32_t i2ccmd;
   9189 	int i, rv;
   9190 
   9191 	if (sc->phy.acquire(sc)) {
   9192 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9193 		    __func__);
   9194 		return 0;
   9195 	}
   9196 
   9197 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9198 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9199 	    | I2CCMD_OPCODE_READ;
   9200 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9201 
   9202 	/* Poll the ready bit */
   9203 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9204 		delay(50);
   9205 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9206 		if (i2ccmd & I2CCMD_READY)
   9207 			break;
   9208 	}
   9209 	if ((i2ccmd & I2CCMD_READY) == 0)
   9210 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9211 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9212 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9213 
   9214 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9215 
   9216 	sc->phy.release(sc);
   9217 	return rv;
   9218 }
   9219 
   9220 /*
   9221  * wm_sgmii_writereg:	[mii interface function]
   9222  *
   9223  *	Write a PHY register on the SGMII.
   9224  * This could be handled by the PHY layer if we didn't have to lock the
   9225  * ressource ...
   9226  */
   9227 static void
   9228 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9229 {
   9230 	struct wm_softc *sc = device_private(self);
   9231 	uint32_t i2ccmd;
   9232 	int i;
   9233 	int val_swapped;
   9234 
   9235 	if (sc->phy.acquire(sc) != 0) {
   9236 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9237 		    __func__);
   9238 		return;
   9239 	}
   9240 	/* Swap the data bytes for the I2C interface */
   9241 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9242 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9243 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9244 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9245 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9246 
   9247 	/* Poll the ready bit */
   9248 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9249 		delay(50);
   9250 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9251 		if (i2ccmd & I2CCMD_READY)
   9252 			break;
   9253 	}
   9254 	if ((i2ccmd & I2CCMD_READY) == 0)
   9255 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9256 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9257 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9258 
   9259 	sc->phy.release(sc);
   9260 }
   9261 
   9262 /* TBI related */
   9263 
   9264 /*
   9265  * wm_tbi_mediainit:
   9266  *
   9267  *	Initialize media for use on 1000BASE-X devices.
   9268  */
   9269 static void
   9270 wm_tbi_mediainit(struct wm_softc *sc)
   9271 {
   9272 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9273 	const char *sep = "";
   9274 
   9275 	if (sc->sc_type < WM_T_82543)
   9276 		sc->sc_tipg = TIPG_WM_DFLT;
   9277 	else
   9278 		sc->sc_tipg = TIPG_LG_DFLT;
   9279 
   9280 	sc->sc_tbi_serdes_anegticks = 5;
   9281 
   9282 	/* Initialize our media structures */
   9283 	sc->sc_mii.mii_ifp = ifp;
   9284 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9285 
   9286 	if ((sc->sc_type >= WM_T_82575)
   9287 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9288 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9289 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9290 	else
   9291 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9292 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9293 
   9294 	/*
   9295 	 * SWD Pins:
   9296 	 *
   9297 	 *	0 = Link LED (output)
   9298 	 *	1 = Loss Of Signal (input)
   9299 	 */
   9300 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9301 
   9302 	/* XXX Perhaps this is only for TBI */
   9303 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9304 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9305 
   9306 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9307 		sc->sc_ctrl &= ~CTRL_LRST;
   9308 
   9309 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9310 
   9311 #define	ADD(ss, mm, dd)							\
   9312 do {									\
   9313 	aprint_normal("%s%s", sep, ss);					\
   9314 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9315 	sep = ", ";							\
   9316 } while (/*CONSTCOND*/0)
   9317 
   9318 	aprint_normal_dev(sc->sc_dev, "");
   9319 
   9320 	/* Only 82545 is LX */
   9321 	if (sc->sc_type == WM_T_82545) {
   9322 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9323 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9324 	} else {
   9325 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9326 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9327 	}
   9328 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9329 	aprint_normal("\n");
   9330 
   9331 #undef ADD
   9332 
   9333 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9334 }
   9335 
   9336 /*
   9337  * wm_tbi_mediachange:	[ifmedia interface function]
   9338  *
   9339  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9340  */
   9341 static int
   9342 wm_tbi_mediachange(struct ifnet *ifp)
   9343 {
   9344 	struct wm_softc *sc = ifp->if_softc;
   9345 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9346 	uint32_t status;
   9347 	int i;
   9348 
   9349 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9350 		/* XXX need some work for >= 82571 and < 82575 */
   9351 		if (sc->sc_type < WM_T_82575)
   9352 			return 0;
   9353 	}
   9354 
   9355 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9356 	    || (sc->sc_type >= WM_T_82575))
   9357 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9358 
   9359 	sc->sc_ctrl &= ~CTRL_LRST;
   9360 	sc->sc_txcw = TXCW_ANE;
   9361 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9362 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9363 	else if (ife->ifm_media & IFM_FDX)
   9364 		sc->sc_txcw |= TXCW_FD;
   9365 	else
   9366 		sc->sc_txcw |= TXCW_HD;
   9367 
   9368 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9369 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9370 
   9371 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9372 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9373 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9374 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9375 	CSR_WRITE_FLUSH(sc);
   9376 	delay(1000);
   9377 
   9378 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9379 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9380 
   9381 	/*
   9382 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9383 	 * optics detect a signal, 0 if they don't.
   9384 	 */
   9385 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9386 		/* Have signal; wait for the link to come up. */
   9387 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9388 			delay(10000);
   9389 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9390 				break;
   9391 		}
   9392 
   9393 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9394 			    device_xname(sc->sc_dev),i));
   9395 
   9396 		status = CSR_READ(sc, WMREG_STATUS);
   9397 		DPRINTF(WM_DEBUG_LINK,
   9398 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9399 			device_xname(sc->sc_dev),status, STATUS_LU));
   9400 		if (status & STATUS_LU) {
   9401 			/* Link is up. */
   9402 			DPRINTF(WM_DEBUG_LINK,
   9403 			    ("%s: LINK: set media -> link up %s\n",
   9404 			    device_xname(sc->sc_dev),
   9405 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9406 
   9407 			/*
   9408 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9409 			 * so we should update sc->sc_ctrl
   9410 			 */
   9411 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9412 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9413 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9414 			if (status & STATUS_FD)
   9415 				sc->sc_tctl |=
   9416 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9417 			else
   9418 				sc->sc_tctl |=
   9419 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9420 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9421 				sc->sc_fcrtl |= FCRTL_XONE;
   9422 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9423 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9424 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9425 				      sc->sc_fcrtl);
   9426 			sc->sc_tbi_linkup = 1;
   9427 		} else {
   9428 			if (i == WM_LINKUP_TIMEOUT)
   9429 				wm_check_for_link(sc);
   9430 			/* Link is down. */
   9431 			DPRINTF(WM_DEBUG_LINK,
   9432 			    ("%s: LINK: set media -> link down\n",
   9433 			    device_xname(sc->sc_dev)));
   9434 			sc->sc_tbi_linkup = 0;
   9435 		}
   9436 	} else {
   9437 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9438 		    device_xname(sc->sc_dev)));
   9439 		sc->sc_tbi_linkup = 0;
   9440 	}
   9441 
   9442 	wm_tbi_serdes_set_linkled(sc);
   9443 
   9444 	return 0;
   9445 }
   9446 
   9447 /*
   9448  * wm_tbi_mediastatus:	[ifmedia interface function]
   9449  *
   9450  *	Get the current interface media status on a 1000BASE-X device.
   9451  */
   9452 static void
   9453 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9454 {
   9455 	struct wm_softc *sc = ifp->if_softc;
   9456 	uint32_t ctrl, status;
   9457 
   9458 	ifmr->ifm_status = IFM_AVALID;
   9459 	ifmr->ifm_active = IFM_ETHER;
   9460 
   9461 	status = CSR_READ(sc, WMREG_STATUS);
   9462 	if ((status & STATUS_LU) == 0) {
   9463 		ifmr->ifm_active |= IFM_NONE;
   9464 		return;
   9465 	}
   9466 
   9467 	ifmr->ifm_status |= IFM_ACTIVE;
   9468 	/* Only 82545 is LX */
   9469 	if (sc->sc_type == WM_T_82545)
   9470 		ifmr->ifm_active |= IFM_1000_LX;
   9471 	else
   9472 		ifmr->ifm_active |= IFM_1000_SX;
   9473 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9474 		ifmr->ifm_active |= IFM_FDX;
   9475 	else
   9476 		ifmr->ifm_active |= IFM_HDX;
   9477 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9478 	if (ctrl & CTRL_RFCE)
   9479 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9480 	if (ctrl & CTRL_TFCE)
   9481 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9482 }
   9483 
   9484 /* XXX TBI only */
   9485 static int
   9486 wm_check_for_link(struct wm_softc *sc)
   9487 {
   9488 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9489 	uint32_t rxcw;
   9490 	uint32_t ctrl;
   9491 	uint32_t status;
   9492 	uint32_t sig;
   9493 
   9494 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9495 		/* XXX need some work for >= 82571 */
   9496 		if (sc->sc_type >= WM_T_82571) {
   9497 			sc->sc_tbi_linkup = 1;
   9498 			return 0;
   9499 		}
   9500 	}
   9501 
   9502 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9503 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9504 	status = CSR_READ(sc, WMREG_STATUS);
   9505 
   9506 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9507 
   9508 	DPRINTF(WM_DEBUG_LINK,
   9509 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9510 		device_xname(sc->sc_dev), __func__,
   9511 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9512 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   9513 
   9514 	/*
   9515 	 * SWDPIN   LU RXCW
   9516 	 *      0    0    0
   9517 	 *      0    0    1	(should not happen)
   9518 	 *      0    1    0	(should not happen)
   9519 	 *      0    1    1	(should not happen)
   9520 	 *      1    0    0	Disable autonego and force linkup
   9521 	 *      1    0    1	got /C/ but not linkup yet
   9522 	 *      1    1    0	(linkup)
   9523 	 *      1    1    1	If IFM_AUTO, back to autonego
   9524 	 *
   9525 	 */
   9526 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9527 	    && ((status & STATUS_LU) == 0)
   9528 	    && ((rxcw & RXCW_C) == 0)) {
   9529 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9530 			__func__));
   9531 		sc->sc_tbi_linkup = 0;
   9532 		/* Disable auto-negotiation in the TXCW register */
   9533 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9534 
   9535 		/*
   9536 		 * Force link-up and also force full-duplex.
   9537 		 *
   9538 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9539 		 * so we should update sc->sc_ctrl
   9540 		 */
   9541 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9542 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9543 	} else if (((status & STATUS_LU) != 0)
   9544 	    && ((rxcw & RXCW_C) != 0)
   9545 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9546 		sc->sc_tbi_linkup = 1;
   9547 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9548 			__func__));
   9549 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9550 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9551 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9552 	    && ((rxcw & RXCW_C) != 0)) {
   9553 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9554 	} else {
   9555 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9556 			status));
   9557 	}
   9558 
   9559 	return 0;
   9560 }
   9561 
   9562 /*
   9563  * wm_tbi_tick:
   9564  *
   9565  *	Check the link on TBI devices.
   9566  *	This function acts as mii_tick().
   9567  */
   9568 static void
   9569 wm_tbi_tick(struct wm_softc *sc)
   9570 {
   9571 	struct mii_data *mii = &sc->sc_mii;
   9572 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9573 	uint32_t status;
   9574 
   9575 	KASSERT(WM_CORE_LOCKED(sc));
   9576 
   9577 	status = CSR_READ(sc, WMREG_STATUS);
   9578 
   9579 	/* XXX is this needed? */
   9580 	(void)CSR_READ(sc, WMREG_RXCW);
   9581 	(void)CSR_READ(sc, WMREG_CTRL);
   9582 
   9583 	/* set link status */
   9584 	if ((status & STATUS_LU) == 0) {
   9585 		DPRINTF(WM_DEBUG_LINK,
   9586 		    ("%s: LINK: checklink -> down\n",
   9587 			device_xname(sc->sc_dev)));
   9588 		sc->sc_tbi_linkup = 0;
   9589 	} else if (sc->sc_tbi_linkup == 0) {
   9590 		DPRINTF(WM_DEBUG_LINK,
   9591 		    ("%s: LINK: checklink -> up %s\n",
   9592 			device_xname(sc->sc_dev),
   9593 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9594 		sc->sc_tbi_linkup = 1;
   9595 		sc->sc_tbi_serdes_ticks = 0;
   9596 	}
   9597 
   9598 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9599 		goto setled;
   9600 
   9601 	if ((status & STATUS_LU) == 0) {
   9602 		sc->sc_tbi_linkup = 0;
   9603 		/* If the timer expired, retry autonegotiation */
   9604 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9605 		    && (++sc->sc_tbi_serdes_ticks
   9606 			>= sc->sc_tbi_serdes_anegticks)) {
   9607 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9608 			sc->sc_tbi_serdes_ticks = 0;
   9609 			/*
   9610 			 * Reset the link, and let autonegotiation do
   9611 			 * its thing
   9612 			 */
   9613 			sc->sc_ctrl |= CTRL_LRST;
   9614 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9615 			CSR_WRITE_FLUSH(sc);
   9616 			delay(1000);
   9617 			sc->sc_ctrl &= ~CTRL_LRST;
   9618 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9619 			CSR_WRITE_FLUSH(sc);
   9620 			delay(1000);
   9621 			CSR_WRITE(sc, WMREG_TXCW,
   9622 			    sc->sc_txcw & ~TXCW_ANE);
   9623 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9624 		}
   9625 	}
   9626 
   9627 setled:
   9628 	wm_tbi_serdes_set_linkled(sc);
   9629 }
   9630 
   9631 /* SERDES related */
   9632 static void
   9633 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9634 {
   9635 	uint32_t reg;
   9636 
   9637 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9638 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9639 		return;
   9640 
   9641 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9642 	reg |= PCS_CFG_PCS_EN;
   9643 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9644 
   9645 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9646 	reg &= ~CTRL_EXT_SWDPIN(3);
   9647 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9648 	CSR_WRITE_FLUSH(sc);
   9649 }
   9650 
   9651 static int
   9652 wm_serdes_mediachange(struct ifnet *ifp)
   9653 {
   9654 	struct wm_softc *sc = ifp->if_softc;
   9655 	bool pcs_autoneg = true; /* XXX */
   9656 	uint32_t ctrl_ext, pcs_lctl, reg;
   9657 
   9658 	/* XXX Currently, this function is not called on 8257[12] */
   9659 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9660 	    || (sc->sc_type >= WM_T_82575))
   9661 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9662 
   9663 	wm_serdes_power_up_link_82575(sc);
   9664 
   9665 	sc->sc_ctrl |= CTRL_SLU;
   9666 
   9667 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9668 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9669 
   9670 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9671 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9672 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9673 	case CTRL_EXT_LINK_MODE_SGMII:
   9674 		pcs_autoneg = true;
   9675 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9676 		break;
   9677 	case CTRL_EXT_LINK_MODE_1000KX:
   9678 		pcs_autoneg = false;
   9679 		/* FALLTHROUGH */
   9680 	default:
   9681 		if ((sc->sc_type == WM_T_82575)
   9682 		    || (sc->sc_type == WM_T_82576)) {
   9683 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9684 				pcs_autoneg = false;
   9685 		}
   9686 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9687 		    | CTRL_FRCFDX;
   9688 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9689 	}
   9690 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9691 
   9692 	if (pcs_autoneg) {
   9693 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9694 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9695 
   9696 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9697 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9698 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9699 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9700 	} else
   9701 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9702 
   9703 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   9704 
   9705 
   9706 	return 0;
   9707 }
   9708 
   9709 static void
   9710 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9711 {
   9712 	struct wm_softc *sc = ifp->if_softc;
   9713 	struct mii_data *mii = &sc->sc_mii;
   9714 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9715 	uint32_t pcs_adv, pcs_lpab, reg;
   9716 
   9717 	ifmr->ifm_status = IFM_AVALID;
   9718 	ifmr->ifm_active = IFM_ETHER;
   9719 
   9720 	/* Check PCS */
   9721 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9722 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   9723 		ifmr->ifm_active |= IFM_NONE;
   9724 		sc->sc_tbi_linkup = 0;
   9725 		goto setled;
   9726 	}
   9727 
   9728 	sc->sc_tbi_linkup = 1;
   9729 	ifmr->ifm_status |= IFM_ACTIVE;
   9730 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   9731 	if ((reg & PCS_LSTS_FDX) != 0)
   9732 		ifmr->ifm_active |= IFM_FDX;
   9733 	else
   9734 		ifmr->ifm_active |= IFM_HDX;
   9735 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   9736 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9737 		/* Check flow */
   9738 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9739 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9740 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   9741 			goto setled;
   9742 		}
   9743 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9744 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9745 		DPRINTF(WM_DEBUG_LINK,
   9746 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   9747 		if ((pcs_adv & TXCW_SYM_PAUSE)
   9748 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9749 			mii->mii_media_active |= IFM_FLOW
   9750 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9751 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9752 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9753 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   9754 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9755 			mii->mii_media_active |= IFM_FLOW
   9756 			    | IFM_ETH_TXPAUSE;
   9757 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   9758 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9759 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9760 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9761 			mii->mii_media_active |= IFM_FLOW
   9762 			    | IFM_ETH_RXPAUSE;
   9763 		} else {
   9764 		}
   9765 	}
   9766 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9767 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   9768 setled:
   9769 	wm_tbi_serdes_set_linkled(sc);
   9770 }
   9771 
   9772 /*
   9773  * wm_serdes_tick:
   9774  *
   9775  *	Check the link on serdes devices.
   9776  */
   9777 static void
   9778 wm_serdes_tick(struct wm_softc *sc)
   9779 {
   9780 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9781 	struct mii_data *mii = &sc->sc_mii;
   9782 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9783 	uint32_t reg;
   9784 
   9785 	KASSERT(WM_CORE_LOCKED(sc));
   9786 
   9787 	mii->mii_media_status = IFM_AVALID;
   9788 	mii->mii_media_active = IFM_ETHER;
   9789 
   9790 	/* Check PCS */
   9791 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9792 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   9793 		mii->mii_media_status |= IFM_ACTIVE;
   9794 		sc->sc_tbi_linkup = 1;
   9795 		sc->sc_tbi_serdes_ticks = 0;
   9796 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   9797 		if ((reg & PCS_LSTS_FDX) != 0)
   9798 			mii->mii_media_active |= IFM_FDX;
   9799 		else
   9800 			mii->mii_media_active |= IFM_HDX;
   9801 	} else {
   9802 		mii->mii_media_status |= IFM_NONE;
   9803 		sc->sc_tbi_linkup = 0;
   9804 		    /* If the timer expired, retry autonegotiation */
   9805 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9806 		    && (++sc->sc_tbi_serdes_ticks
   9807 			>= sc->sc_tbi_serdes_anegticks)) {
   9808 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9809 			sc->sc_tbi_serdes_ticks = 0;
   9810 			/* XXX */
   9811 			wm_serdes_mediachange(ifp);
   9812 		}
   9813 	}
   9814 
   9815 	wm_tbi_serdes_set_linkled(sc);
   9816 }
   9817 
   9818 /* SFP related */
   9819 
   9820 static int
   9821 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   9822 {
   9823 	uint32_t i2ccmd;
   9824 	int i;
   9825 
   9826 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   9827 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9828 
   9829 	/* Poll the ready bit */
   9830 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9831 		delay(50);
   9832 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9833 		if (i2ccmd & I2CCMD_READY)
   9834 			break;
   9835 	}
   9836 	if ((i2ccmd & I2CCMD_READY) == 0)
   9837 		return -1;
   9838 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9839 		return -1;
   9840 
   9841 	*data = i2ccmd & 0x00ff;
   9842 
   9843 	return 0;
   9844 }
   9845 
   9846 static uint32_t
   9847 wm_sfp_get_media_type(struct wm_softc *sc)
   9848 {
   9849 	uint32_t ctrl_ext;
   9850 	uint8_t val = 0;
   9851 	int timeout = 3;
   9852 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   9853 	int rv = -1;
   9854 
   9855 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9856 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   9857 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   9858 	CSR_WRITE_FLUSH(sc);
   9859 
   9860 	/* Read SFP module data */
   9861 	while (timeout) {
   9862 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   9863 		if (rv == 0)
   9864 			break;
   9865 		delay(100*1000); /* XXX too big */
   9866 		timeout--;
   9867 	}
   9868 	if (rv != 0)
   9869 		goto out;
   9870 	switch (val) {
   9871 	case SFF_SFP_ID_SFF:
   9872 		aprint_normal_dev(sc->sc_dev,
   9873 		    "Module/Connector soldered to board\n");
   9874 		break;
   9875 	case SFF_SFP_ID_SFP:
   9876 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   9877 		break;
   9878 	case SFF_SFP_ID_UNKNOWN:
   9879 		goto out;
   9880 	default:
   9881 		break;
   9882 	}
   9883 
   9884 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   9885 	if (rv != 0) {
   9886 		goto out;
   9887 	}
   9888 
   9889 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   9890 		mediatype = WM_MEDIATYPE_SERDES;
   9891 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   9892 		sc->sc_flags |= WM_F_SGMII;
   9893 		mediatype = WM_MEDIATYPE_COPPER;
   9894 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   9895 		sc->sc_flags |= WM_F_SGMII;
   9896 		mediatype = WM_MEDIATYPE_SERDES;
   9897 	}
   9898 
   9899 out:
   9900 	/* Restore I2C interface setting */
   9901 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9902 
   9903 	return mediatype;
   9904 }
   9905 /*
   9906  * NVM related.
   9907  * Microwire, SPI (w/wo EERD) and Flash.
   9908  */
   9909 
   9910 /* Both spi and uwire */
   9911 
   9912 /*
   9913  * wm_eeprom_sendbits:
   9914  *
   9915  *	Send a series of bits to the EEPROM.
   9916  */
   9917 static void
   9918 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   9919 {
   9920 	uint32_t reg;
   9921 	int x;
   9922 
   9923 	reg = CSR_READ(sc, WMREG_EECD);
   9924 
   9925 	for (x = nbits; x > 0; x--) {
   9926 		if (bits & (1U << (x - 1)))
   9927 			reg |= EECD_DI;
   9928 		else
   9929 			reg &= ~EECD_DI;
   9930 		CSR_WRITE(sc, WMREG_EECD, reg);
   9931 		CSR_WRITE_FLUSH(sc);
   9932 		delay(2);
   9933 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9934 		CSR_WRITE_FLUSH(sc);
   9935 		delay(2);
   9936 		CSR_WRITE(sc, WMREG_EECD, reg);
   9937 		CSR_WRITE_FLUSH(sc);
   9938 		delay(2);
   9939 	}
   9940 }
   9941 
   9942 /*
   9943  * wm_eeprom_recvbits:
   9944  *
   9945  *	Receive a series of bits from the EEPROM.
   9946  */
   9947 static void
   9948 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   9949 {
   9950 	uint32_t reg, val;
   9951 	int x;
   9952 
   9953 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   9954 
   9955 	val = 0;
   9956 	for (x = nbits; x > 0; x--) {
   9957 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9958 		CSR_WRITE_FLUSH(sc);
   9959 		delay(2);
   9960 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   9961 			val |= (1U << (x - 1));
   9962 		CSR_WRITE(sc, WMREG_EECD, reg);
   9963 		CSR_WRITE_FLUSH(sc);
   9964 		delay(2);
   9965 	}
   9966 	*valp = val;
   9967 }
   9968 
   9969 /* Microwire */
   9970 
   9971 /*
   9972  * wm_nvm_read_uwire:
   9973  *
   9974  *	Read a word from the EEPROM using the MicroWire protocol.
   9975  */
   9976 static int
   9977 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9978 {
   9979 	uint32_t reg, val;
   9980 	int i;
   9981 
   9982 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   9983 		device_xname(sc->sc_dev), __func__));
   9984 
   9985 	for (i = 0; i < wordcnt; i++) {
   9986 		/* Clear SK and DI. */
   9987 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   9988 		CSR_WRITE(sc, WMREG_EECD, reg);
   9989 
   9990 		/*
   9991 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   9992 		 * and Xen.
   9993 		 *
   9994 		 * We use this workaround only for 82540 because qemu's
   9995 		 * e1000 act as 82540.
   9996 		 */
   9997 		if (sc->sc_type == WM_T_82540) {
   9998 			reg |= EECD_SK;
   9999 			CSR_WRITE(sc, WMREG_EECD, reg);
   10000 			reg &= ~EECD_SK;
   10001 			CSR_WRITE(sc, WMREG_EECD, reg);
   10002 			CSR_WRITE_FLUSH(sc);
   10003 			delay(2);
   10004 		}
   10005 		/* XXX: end of workaround */
   10006 
   10007 		/* Set CHIP SELECT. */
   10008 		reg |= EECD_CS;
   10009 		CSR_WRITE(sc, WMREG_EECD, reg);
   10010 		CSR_WRITE_FLUSH(sc);
   10011 		delay(2);
   10012 
   10013 		/* Shift in the READ command. */
   10014 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   10015 
   10016 		/* Shift in address. */
   10017 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   10018 
   10019 		/* Shift out the data. */
   10020 		wm_eeprom_recvbits(sc, &val, 16);
   10021 		data[i] = val & 0xffff;
   10022 
   10023 		/* Clear CHIP SELECT. */
   10024 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   10025 		CSR_WRITE(sc, WMREG_EECD, reg);
   10026 		CSR_WRITE_FLUSH(sc);
   10027 		delay(2);
   10028 	}
   10029 
   10030 	return 0;
   10031 }
   10032 
   10033 /* SPI */
   10034 
   10035 /*
   10036  * Set SPI and FLASH related information from the EECD register.
   10037  * For 82541 and 82547, the word size is taken from EEPROM.
   10038  */
   10039 static int
   10040 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   10041 {
   10042 	int size;
   10043 	uint32_t reg;
   10044 	uint16_t data;
   10045 
   10046 	reg = CSR_READ(sc, WMREG_EECD);
   10047 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   10048 
   10049 	/* Read the size of NVM from EECD by default */
   10050 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10051 	switch (sc->sc_type) {
   10052 	case WM_T_82541:
   10053 	case WM_T_82541_2:
   10054 	case WM_T_82547:
   10055 	case WM_T_82547_2:
   10056 		/* Set dummy value to access EEPROM */
   10057 		sc->sc_nvm_wordsize = 64;
   10058 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   10059 		reg = data;
   10060 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10061 		if (size == 0)
   10062 			size = 6; /* 64 word size */
   10063 		else
   10064 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   10065 		break;
   10066 	case WM_T_80003:
   10067 	case WM_T_82571:
   10068 	case WM_T_82572:
   10069 	case WM_T_82573: /* SPI case */
   10070 	case WM_T_82574: /* SPI case */
   10071 	case WM_T_82583: /* SPI case */
   10072 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10073 		if (size > 14)
   10074 			size = 14;
   10075 		break;
   10076 	case WM_T_82575:
   10077 	case WM_T_82576:
   10078 	case WM_T_82580:
   10079 	case WM_T_I350:
   10080 	case WM_T_I354:
   10081 	case WM_T_I210:
   10082 	case WM_T_I211:
   10083 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10084 		if (size > 15)
   10085 			size = 15;
   10086 		break;
   10087 	default:
   10088 		aprint_error_dev(sc->sc_dev,
   10089 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   10090 		return -1;
   10091 		break;
   10092 	}
   10093 
   10094 	sc->sc_nvm_wordsize = 1 << size;
   10095 
   10096 	return 0;
   10097 }
   10098 
   10099 /*
   10100  * wm_nvm_ready_spi:
   10101  *
   10102  *	Wait for a SPI EEPROM to be ready for commands.
   10103  */
   10104 static int
   10105 wm_nvm_ready_spi(struct wm_softc *sc)
   10106 {
   10107 	uint32_t val;
   10108 	int usec;
   10109 
   10110 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10111 		device_xname(sc->sc_dev), __func__));
   10112 
   10113 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10114 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10115 		wm_eeprom_recvbits(sc, &val, 8);
   10116 		if ((val & SPI_SR_RDY) == 0)
   10117 			break;
   10118 	}
   10119 	if (usec >= SPI_MAX_RETRIES) {
   10120 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10121 		return 1;
   10122 	}
   10123 	return 0;
   10124 }
   10125 
   10126 /*
   10127  * wm_nvm_read_spi:
   10128  *
   10129  *	Read a work from the EEPROM using the SPI protocol.
   10130  */
   10131 static int
   10132 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10133 {
   10134 	uint32_t reg, val;
   10135 	int i;
   10136 	uint8_t opc;
   10137 
   10138 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10139 		device_xname(sc->sc_dev), __func__));
   10140 
   10141 	/* Clear SK and CS. */
   10142 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10143 	CSR_WRITE(sc, WMREG_EECD, reg);
   10144 	CSR_WRITE_FLUSH(sc);
   10145 	delay(2);
   10146 
   10147 	if (wm_nvm_ready_spi(sc))
   10148 		return 1;
   10149 
   10150 	/* Toggle CS to flush commands. */
   10151 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10152 	CSR_WRITE_FLUSH(sc);
   10153 	delay(2);
   10154 	CSR_WRITE(sc, WMREG_EECD, reg);
   10155 	CSR_WRITE_FLUSH(sc);
   10156 	delay(2);
   10157 
   10158 	opc = SPI_OPC_READ;
   10159 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10160 		opc |= SPI_OPC_A8;
   10161 
   10162 	wm_eeprom_sendbits(sc, opc, 8);
   10163 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10164 
   10165 	for (i = 0; i < wordcnt; i++) {
   10166 		wm_eeprom_recvbits(sc, &val, 16);
   10167 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10168 	}
   10169 
   10170 	/* Raise CS and clear SK. */
   10171 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10172 	CSR_WRITE(sc, WMREG_EECD, reg);
   10173 	CSR_WRITE_FLUSH(sc);
   10174 	delay(2);
   10175 
   10176 	return 0;
   10177 }
   10178 
   10179 /* Using with EERD */
   10180 
   10181 static int
   10182 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10183 {
   10184 	uint32_t attempts = 100000;
   10185 	uint32_t i, reg = 0;
   10186 	int32_t done = -1;
   10187 
   10188 	for (i = 0; i < attempts; i++) {
   10189 		reg = CSR_READ(sc, rw);
   10190 
   10191 		if (reg & EERD_DONE) {
   10192 			done = 0;
   10193 			break;
   10194 		}
   10195 		delay(5);
   10196 	}
   10197 
   10198 	return done;
   10199 }
   10200 
   10201 static int
   10202 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   10203     uint16_t *data)
   10204 {
   10205 	int i, eerd = 0;
   10206 	int error = 0;
   10207 
   10208 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10209 		device_xname(sc->sc_dev), __func__));
   10210 
   10211 	for (i = 0; i < wordcnt; i++) {
   10212 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   10213 
   10214 		CSR_WRITE(sc, WMREG_EERD, eerd);
   10215 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   10216 		if (error != 0)
   10217 			break;
   10218 
   10219 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   10220 	}
   10221 
   10222 	return error;
   10223 }
   10224 
   10225 /* Flash */
   10226 
   10227 static int
   10228 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10229 {
   10230 	uint32_t eecd;
   10231 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10232 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10233 	uint8_t sig_byte = 0;
   10234 
   10235 	switch (sc->sc_type) {
   10236 	case WM_T_PCH_SPT:
   10237 		/*
   10238 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   10239 		 * sector valid bits from the NVM.
   10240 		 */
   10241 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   10242 		if ((*bank == 0) || (*bank == 1)) {
   10243 			aprint_error_dev(sc->sc_dev,
   10244 			    "%s: no valid NVM bank present (%u)\n", __func__,
   10245 				*bank);
   10246 			return -1;
   10247 		} else {
   10248 			*bank = *bank - 2;
   10249 			return 0;
   10250 		}
   10251 	case WM_T_ICH8:
   10252 	case WM_T_ICH9:
   10253 		eecd = CSR_READ(sc, WMREG_EECD);
   10254 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10255 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10256 			return 0;
   10257 		}
   10258 		/* FALLTHROUGH */
   10259 	default:
   10260 		/* Default to 0 */
   10261 		*bank = 0;
   10262 
   10263 		/* Check bank 0 */
   10264 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10265 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10266 			*bank = 0;
   10267 			return 0;
   10268 		}
   10269 
   10270 		/* Check bank 1 */
   10271 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10272 		    &sig_byte);
   10273 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10274 			*bank = 1;
   10275 			return 0;
   10276 		}
   10277 	}
   10278 
   10279 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10280 		device_xname(sc->sc_dev)));
   10281 	return -1;
   10282 }
   10283 
   10284 /******************************************************************************
   10285  * This function does initial flash setup so that a new read/write/erase cycle
   10286  * can be started.
   10287  *
   10288  * sc - The pointer to the hw structure
   10289  ****************************************************************************/
   10290 static int32_t
   10291 wm_ich8_cycle_init(struct wm_softc *sc)
   10292 {
   10293 	uint16_t hsfsts;
   10294 	int32_t error = 1;
   10295 	int32_t i     = 0;
   10296 
   10297 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10298 
   10299 	/* May be check the Flash Des Valid bit in Hw status */
   10300 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10301 		return error;
   10302 	}
   10303 
   10304 	/* Clear FCERR in Hw status by writing 1 */
   10305 	/* Clear DAEL in Hw status by writing a 1 */
   10306 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10307 
   10308 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10309 
   10310 	/*
   10311 	 * Either we should have a hardware SPI cycle in progress bit to check
   10312 	 * against, in order to start a new cycle or FDONE bit should be
   10313 	 * changed in the hardware so that it is 1 after harware reset, which
   10314 	 * can then be used as an indication whether a cycle is in progress or
   10315 	 * has been completed .. we should also have some software semaphore
   10316 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10317 	 * threads access to those bits can be sequentiallized or a way so that
   10318 	 * 2 threads dont start the cycle at the same time
   10319 	 */
   10320 
   10321 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10322 		/*
   10323 		 * There is no cycle running at present, so we can start a
   10324 		 * cycle
   10325 		 */
   10326 
   10327 		/* Begin by setting Flash Cycle Done. */
   10328 		hsfsts |= HSFSTS_DONE;
   10329 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10330 		error = 0;
   10331 	} else {
   10332 		/*
   10333 		 * otherwise poll for sometime so the current cycle has a
   10334 		 * chance to end before giving up.
   10335 		 */
   10336 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10337 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10338 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10339 				error = 0;
   10340 				break;
   10341 			}
   10342 			delay(1);
   10343 		}
   10344 		if (error == 0) {
   10345 			/*
   10346 			 * Successful in waiting for previous cycle to timeout,
   10347 			 * now set the Flash Cycle Done.
   10348 			 */
   10349 			hsfsts |= HSFSTS_DONE;
   10350 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10351 		}
   10352 	}
   10353 	return error;
   10354 }
   10355 
   10356 /******************************************************************************
   10357  * This function starts a flash cycle and waits for its completion
   10358  *
   10359  * sc - The pointer to the hw structure
   10360  ****************************************************************************/
   10361 static int32_t
   10362 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10363 {
   10364 	uint16_t hsflctl;
   10365 	uint16_t hsfsts;
   10366 	int32_t error = 1;
   10367 	uint32_t i = 0;
   10368 
   10369 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10370 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10371 	hsflctl |= HSFCTL_GO;
   10372 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10373 
   10374 	/* Wait till FDONE bit is set to 1 */
   10375 	do {
   10376 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10377 		if (hsfsts & HSFSTS_DONE)
   10378 			break;
   10379 		delay(1);
   10380 		i++;
   10381 	} while (i < timeout);
   10382 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10383 		error = 0;
   10384 
   10385 	return error;
   10386 }
   10387 
   10388 /******************************************************************************
   10389  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   10390  *
   10391  * sc - The pointer to the hw structure
   10392  * index - The index of the byte or word to read.
   10393  * size - Size of data to read, 1=byte 2=word, 4=dword
   10394  * data - Pointer to the word to store the value read.
   10395  *****************************************************************************/
   10396 static int32_t
   10397 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10398     uint32_t size, uint32_t *data)
   10399 {
   10400 	uint16_t hsfsts;
   10401 	uint16_t hsflctl;
   10402 	uint32_t flash_linear_address;
   10403 	uint32_t flash_data = 0;
   10404 	int32_t error = 1;
   10405 	int32_t count = 0;
   10406 
   10407 	if (size < 1  || size > 4 || data == 0x0 ||
   10408 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10409 		return error;
   10410 
   10411 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10412 	    sc->sc_ich8_flash_base;
   10413 
   10414 	do {
   10415 		delay(1);
   10416 		/* Steps */
   10417 		error = wm_ich8_cycle_init(sc);
   10418 		if (error)
   10419 			break;
   10420 
   10421 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10422 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10423 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10424 		    & HSFCTL_BCOUNT_MASK;
   10425 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10426 		if (sc->sc_type == WM_T_PCH_SPT) {
   10427 			/*
   10428 			 * In SPT, This register is in Lan memory space, not
   10429 			 * flash. Therefore, only 32 bit access is supported.
   10430 			 */
   10431 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   10432 			    (uint32_t)hsflctl);
   10433 		} else
   10434 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10435 
   10436 		/*
   10437 		 * Write the last 24 bits of index into Flash Linear address
   10438 		 * field in Flash Address
   10439 		 */
   10440 		/* TODO: TBD maybe check the index against the size of flash */
   10441 
   10442 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10443 
   10444 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10445 
   10446 		/*
   10447 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10448 		 * the whole sequence a few more times, else read in (shift in)
   10449 		 * the Flash Data0, the order is least significant byte first
   10450 		 * msb to lsb
   10451 		 */
   10452 		if (error == 0) {
   10453 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10454 			if (size == 1)
   10455 				*data = (uint8_t)(flash_data & 0x000000FF);
   10456 			else if (size == 2)
   10457 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10458 			else if (size == 4)
   10459 				*data = (uint32_t)flash_data;
   10460 			break;
   10461 		} else {
   10462 			/*
   10463 			 * If we've gotten here, then things are probably
   10464 			 * completely hosed, but if the error condition is
   10465 			 * detected, it won't hurt to give it another try...
   10466 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10467 			 */
   10468 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10469 			if (hsfsts & HSFSTS_ERR) {
   10470 				/* Repeat for some time before giving up. */
   10471 				continue;
   10472 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10473 				break;
   10474 		}
   10475 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10476 
   10477 	return error;
   10478 }
   10479 
   10480 /******************************************************************************
   10481  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10482  *
   10483  * sc - pointer to wm_hw structure
   10484  * index - The index of the byte to read.
   10485  * data - Pointer to a byte to store the value read.
   10486  *****************************************************************************/
   10487 static int32_t
   10488 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10489 {
   10490 	int32_t status;
   10491 	uint32_t word = 0;
   10492 
   10493 	status = wm_read_ich8_data(sc, index, 1, &word);
   10494 	if (status == 0)
   10495 		*data = (uint8_t)word;
   10496 	else
   10497 		*data = 0;
   10498 
   10499 	return status;
   10500 }
   10501 
   10502 /******************************************************************************
   10503  * Reads a word from the NVM using the ICH8 flash access registers.
   10504  *
   10505  * sc - pointer to wm_hw structure
   10506  * index - The starting byte index of the word to read.
   10507  * data - Pointer to a word to store the value read.
   10508  *****************************************************************************/
   10509 static int32_t
   10510 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10511 {
   10512 	int32_t status;
   10513 	uint32_t word = 0;
   10514 
   10515 	status = wm_read_ich8_data(sc, index, 2, &word);
   10516 	if (status == 0)
   10517 		*data = (uint16_t)word;
   10518 	else
   10519 		*data = 0;
   10520 
   10521 	return status;
   10522 }
   10523 
   10524 /******************************************************************************
   10525  * Reads a dword from the NVM using the ICH8 flash access registers.
   10526  *
   10527  * sc - pointer to wm_hw structure
   10528  * index - The starting byte index of the word to read.
   10529  * data - Pointer to a word to store the value read.
   10530  *****************************************************************************/
   10531 static int32_t
   10532 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   10533 {
   10534 	int32_t status;
   10535 
   10536 	status = wm_read_ich8_data(sc, index, 4, data);
   10537 	return status;
   10538 }
   10539 
   10540 /******************************************************************************
   10541  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10542  * register.
   10543  *
   10544  * sc - Struct containing variables accessed by shared code
   10545  * offset - offset of word in the EEPROM to read
   10546  * data - word read from the EEPROM
   10547  * words - number of words to read
   10548  *****************************************************************************/
   10549 static int
   10550 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10551 {
   10552 	int32_t  error = 0;
   10553 	uint32_t flash_bank = 0;
   10554 	uint32_t act_offset = 0;
   10555 	uint32_t bank_offset = 0;
   10556 	uint16_t word = 0;
   10557 	uint16_t i = 0;
   10558 
   10559 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10560 		device_xname(sc->sc_dev), __func__));
   10561 
   10562 	/*
   10563 	 * We need to know which is the valid flash bank.  In the event
   10564 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10565 	 * managing flash_bank.  So it cannot be trusted and needs
   10566 	 * to be updated with each read.
   10567 	 */
   10568 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10569 	if (error) {
   10570 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10571 			device_xname(sc->sc_dev)));
   10572 		flash_bank = 0;
   10573 	}
   10574 
   10575 	/*
   10576 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10577 	 * size
   10578 	 */
   10579 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10580 
   10581 	error = wm_get_swfwhw_semaphore(sc);
   10582 	if (error) {
   10583 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10584 		    __func__);
   10585 		return error;
   10586 	}
   10587 
   10588 	for (i = 0; i < words; i++) {
   10589 		/* The NVM part needs a byte offset, hence * 2 */
   10590 		act_offset = bank_offset + ((offset + i) * 2);
   10591 		error = wm_read_ich8_word(sc, act_offset, &word);
   10592 		if (error) {
   10593 			aprint_error_dev(sc->sc_dev,
   10594 			    "%s: failed to read NVM\n", __func__);
   10595 			break;
   10596 		}
   10597 		data[i] = word;
   10598 	}
   10599 
   10600 	wm_put_swfwhw_semaphore(sc);
   10601 	return error;
   10602 }
   10603 
   10604 /******************************************************************************
   10605  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   10606  * register.
   10607  *
   10608  * sc - Struct containing variables accessed by shared code
   10609  * offset - offset of word in the EEPROM to read
   10610  * data - word read from the EEPROM
   10611  * words - number of words to read
   10612  *****************************************************************************/
   10613 static int
   10614 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10615 {
   10616 	int32_t  error = 0;
   10617 	uint32_t flash_bank = 0;
   10618 	uint32_t act_offset = 0;
   10619 	uint32_t bank_offset = 0;
   10620 	uint32_t dword = 0;
   10621 	uint16_t i = 0;
   10622 
   10623 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10624 		device_xname(sc->sc_dev), __func__));
   10625 
   10626 	/*
   10627 	 * We need to know which is the valid flash bank.  In the event
   10628 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10629 	 * managing flash_bank.  So it cannot be trusted and needs
   10630 	 * to be updated with each read.
   10631 	 */
   10632 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10633 	if (error) {
   10634 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10635 			device_xname(sc->sc_dev)));
   10636 		flash_bank = 0;
   10637 	}
   10638 
   10639 	/*
   10640 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10641 	 * size
   10642 	 */
   10643 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10644 
   10645 	error = wm_get_swfwhw_semaphore(sc);
   10646 	if (error) {
   10647 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10648 		    __func__);
   10649 		return error;
   10650 	}
   10651 
   10652 	for (i = 0; i < words; i++) {
   10653 		/* The NVM part needs a byte offset, hence * 2 */
   10654 		act_offset = bank_offset + ((offset + i) * 2);
   10655 		/* but we must read dword aligned, so mask ... */
   10656 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   10657 		if (error) {
   10658 			aprint_error_dev(sc->sc_dev,
   10659 			    "%s: failed to read NVM\n", __func__);
   10660 			break;
   10661 		}
   10662 		/* ... and pick out low or high word */
   10663 		if ((act_offset & 0x2) == 0)
   10664 			data[i] = (uint16_t)(dword & 0xFFFF);
   10665 		else
   10666 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   10667 	}
   10668 
   10669 	wm_put_swfwhw_semaphore(sc);
   10670 	return error;
   10671 }
   10672 
   10673 /* iNVM */
   10674 
   10675 static int
   10676 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   10677 {
   10678 	int32_t  rv = 0;
   10679 	uint32_t invm_dword;
   10680 	uint16_t i;
   10681 	uint8_t record_type, word_address;
   10682 
   10683 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10684 		device_xname(sc->sc_dev), __func__));
   10685 
   10686 	for (i = 0; i < INVM_SIZE; i++) {
   10687 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   10688 		/* Get record type */
   10689 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   10690 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   10691 			break;
   10692 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   10693 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   10694 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   10695 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   10696 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   10697 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   10698 			if (word_address == address) {
   10699 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   10700 				rv = 0;
   10701 				break;
   10702 			}
   10703 		}
   10704 	}
   10705 
   10706 	return rv;
   10707 }
   10708 
   10709 static int
   10710 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10711 {
   10712 	int rv = 0;
   10713 	int i;
   10714 
   10715 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10716 		device_xname(sc->sc_dev), __func__));
   10717 
   10718 	for (i = 0; i < words; i++) {
   10719 		switch (offset + i) {
   10720 		case NVM_OFF_MACADDR:
   10721 		case NVM_OFF_MACADDR1:
   10722 		case NVM_OFF_MACADDR2:
   10723 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   10724 			if (rv != 0) {
   10725 				data[i] = 0xffff;
   10726 				rv = -1;
   10727 			}
   10728 			break;
   10729 		case NVM_OFF_CFG2:
   10730 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10731 			if (rv != 0) {
   10732 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   10733 				rv = 0;
   10734 			}
   10735 			break;
   10736 		case NVM_OFF_CFG4:
   10737 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10738 			if (rv != 0) {
   10739 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   10740 				rv = 0;
   10741 			}
   10742 			break;
   10743 		case NVM_OFF_LED_1_CFG:
   10744 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10745 			if (rv != 0) {
   10746 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   10747 				rv = 0;
   10748 			}
   10749 			break;
   10750 		case NVM_OFF_LED_0_2_CFG:
   10751 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10752 			if (rv != 0) {
   10753 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   10754 				rv = 0;
   10755 			}
   10756 			break;
   10757 		case NVM_OFF_ID_LED_SETTINGS:
   10758 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10759 			if (rv != 0) {
   10760 				*data = ID_LED_RESERVED_FFFF;
   10761 				rv = 0;
   10762 			}
   10763 			break;
   10764 		default:
   10765 			DPRINTF(WM_DEBUG_NVM,
   10766 			    ("NVM word 0x%02x is not mapped.\n", offset));
   10767 			*data = NVM_RESERVED_WORD;
   10768 			break;
   10769 		}
   10770 	}
   10771 
   10772 	return rv;
   10773 }
   10774 
   10775 /* Lock, detecting NVM type, validate checksum, version and read */
   10776 
   10777 /*
   10778  * wm_nvm_acquire:
   10779  *
   10780  *	Perform the EEPROM handshake required on some chips.
   10781  */
   10782 static int
   10783 wm_nvm_acquire(struct wm_softc *sc)
   10784 {
   10785 	uint32_t reg;
   10786 	int x;
   10787 	int ret = 0;
   10788 
   10789 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10790 		device_xname(sc->sc_dev), __func__));
   10791 
   10792 	if (sc->sc_type >= WM_T_ICH8) {
   10793 		ret = wm_get_nvm_ich8lan(sc);
   10794 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   10795 		ret = wm_get_swfwhw_semaphore(sc);
   10796 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   10797 		/* This will also do wm_get_swsm_semaphore() if needed */
   10798 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   10799 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10800 		ret = wm_get_swsm_semaphore(sc);
   10801 	}
   10802 
   10803 	if (ret) {
   10804 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10805 			__func__);
   10806 		return 1;
   10807 	}
   10808 
   10809 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10810 		reg = CSR_READ(sc, WMREG_EECD);
   10811 
   10812 		/* Request EEPROM access. */
   10813 		reg |= EECD_EE_REQ;
   10814 		CSR_WRITE(sc, WMREG_EECD, reg);
   10815 
   10816 		/* ..and wait for it to be granted. */
   10817 		for (x = 0; x < 1000; x++) {
   10818 			reg = CSR_READ(sc, WMREG_EECD);
   10819 			if (reg & EECD_EE_GNT)
   10820 				break;
   10821 			delay(5);
   10822 		}
   10823 		if ((reg & EECD_EE_GNT) == 0) {
   10824 			aprint_error_dev(sc->sc_dev,
   10825 			    "could not acquire EEPROM GNT\n");
   10826 			reg &= ~EECD_EE_REQ;
   10827 			CSR_WRITE(sc, WMREG_EECD, reg);
   10828 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10829 				wm_put_swfwhw_semaphore(sc);
   10830 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   10831 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10832 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10833 				wm_put_swsm_semaphore(sc);
   10834 			return 1;
   10835 		}
   10836 	}
   10837 
   10838 	return 0;
   10839 }
   10840 
   10841 /*
   10842  * wm_nvm_release:
   10843  *
   10844  *	Release the EEPROM mutex.
   10845  */
   10846 static void
   10847 wm_nvm_release(struct wm_softc *sc)
   10848 {
   10849 	uint32_t reg;
   10850 
   10851 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10852 		device_xname(sc->sc_dev), __func__));
   10853 
   10854 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10855 		reg = CSR_READ(sc, WMREG_EECD);
   10856 		reg &= ~EECD_EE_REQ;
   10857 		CSR_WRITE(sc, WMREG_EECD, reg);
   10858 	}
   10859 
   10860 	if (sc->sc_type >= WM_T_ICH8) {
   10861 		wm_put_nvm_ich8lan(sc);
   10862 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10863 		wm_put_swfwhw_semaphore(sc);
   10864 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   10865 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10866 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10867 		wm_put_swsm_semaphore(sc);
   10868 }
   10869 
   10870 static int
   10871 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   10872 {
   10873 	uint32_t eecd = 0;
   10874 
   10875 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   10876 	    || sc->sc_type == WM_T_82583) {
   10877 		eecd = CSR_READ(sc, WMREG_EECD);
   10878 
   10879 		/* Isolate bits 15 & 16 */
   10880 		eecd = ((eecd >> 15) & 0x03);
   10881 
   10882 		/* If both bits are set, device is Flash type */
   10883 		if (eecd == 0x03)
   10884 			return 0;
   10885 	}
   10886 	return 1;
   10887 }
   10888 
   10889 static int
   10890 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   10891 {
   10892 	uint32_t eec;
   10893 
   10894 	eec = CSR_READ(sc, WMREG_EEC);
   10895 	if ((eec & EEC_FLASH_DETECTED) != 0)
   10896 		return 1;
   10897 
   10898 	return 0;
   10899 }
   10900 
   10901 /*
   10902  * wm_nvm_validate_checksum
   10903  *
   10904  * The checksum is defined as the sum of the first 64 (16 bit) words.
   10905  */
   10906 static int
   10907 wm_nvm_validate_checksum(struct wm_softc *sc)
   10908 {
   10909 	uint16_t checksum;
   10910 	uint16_t eeprom_data;
   10911 #ifdef WM_DEBUG
   10912 	uint16_t csum_wordaddr, valid_checksum;
   10913 #endif
   10914 	int i;
   10915 
   10916 	checksum = 0;
   10917 
   10918 	/* Don't check for I211 */
   10919 	if (sc->sc_type == WM_T_I211)
   10920 		return 0;
   10921 
   10922 #ifdef WM_DEBUG
   10923 	if (sc->sc_type == WM_T_PCH_LPT) {
   10924 		csum_wordaddr = NVM_OFF_COMPAT;
   10925 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   10926 	} else {
   10927 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   10928 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   10929 	}
   10930 
   10931 	/* Dump EEPROM image for debug */
   10932 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10933 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10934 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   10935 		/* XXX PCH_SPT? */
   10936 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   10937 		if ((eeprom_data & valid_checksum) == 0) {
   10938 			DPRINTF(WM_DEBUG_NVM,
   10939 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   10940 				device_xname(sc->sc_dev), eeprom_data,
   10941 				    valid_checksum));
   10942 		}
   10943 	}
   10944 
   10945 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   10946 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   10947 		for (i = 0; i < NVM_SIZE; i++) {
   10948 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10949 				printf("XXXX ");
   10950 			else
   10951 				printf("%04hx ", eeprom_data);
   10952 			if (i % 8 == 7)
   10953 				printf("\n");
   10954 		}
   10955 	}
   10956 
   10957 #endif /* WM_DEBUG */
   10958 
   10959 	for (i = 0; i < NVM_SIZE; i++) {
   10960 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10961 			return 1;
   10962 		checksum += eeprom_data;
   10963 	}
   10964 
   10965 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   10966 #ifdef WM_DEBUG
   10967 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   10968 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   10969 #endif
   10970 	}
   10971 
   10972 	return 0;
   10973 }
   10974 
   10975 static void
   10976 wm_nvm_version_invm(struct wm_softc *sc)
   10977 {
   10978 	uint32_t dword;
   10979 
   10980 	/*
   10981 	 * Linux's code to decode version is very strange, so we don't
   10982 	 * obey that algorithm and just use word 61 as the document.
   10983 	 * Perhaps it's not perfect though...
   10984 	 *
   10985 	 * Example:
   10986 	 *
   10987 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   10988 	 */
   10989 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   10990 	dword = __SHIFTOUT(dword, INVM_VER_1);
   10991 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   10992 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   10993 }
   10994 
   10995 static void
   10996 wm_nvm_version(struct wm_softc *sc)
   10997 {
   10998 	uint16_t major, minor, build, patch;
   10999 	uint16_t uid0, uid1;
   11000 	uint16_t nvm_data;
   11001 	uint16_t off;
   11002 	bool check_version = false;
   11003 	bool check_optionrom = false;
   11004 	bool have_build = false;
   11005 
   11006 	/*
   11007 	 * Version format:
   11008 	 *
   11009 	 * XYYZ
   11010 	 * X0YZ
   11011 	 * X0YY
   11012 	 *
   11013 	 * Example:
   11014 	 *
   11015 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   11016 	 *	82571	0x50a6	5.10.6?
   11017 	 *	82572	0x506a	5.6.10?
   11018 	 *	82572EI	0x5069	5.6.9?
   11019 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   11020 	 *		0x2013	2.1.3?
   11021 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   11022 	 */
   11023 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   11024 	switch (sc->sc_type) {
   11025 	case WM_T_82571:
   11026 	case WM_T_82572:
   11027 	case WM_T_82574:
   11028 	case WM_T_82583:
   11029 		check_version = true;
   11030 		check_optionrom = true;
   11031 		have_build = true;
   11032 		break;
   11033 	case WM_T_82575:
   11034 	case WM_T_82576:
   11035 	case WM_T_82580:
   11036 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   11037 			check_version = true;
   11038 		break;
   11039 	case WM_T_I211:
   11040 		wm_nvm_version_invm(sc);
   11041 		goto printver;
   11042 	case WM_T_I210:
   11043 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   11044 			wm_nvm_version_invm(sc);
   11045 			goto printver;
   11046 		}
   11047 		/* FALLTHROUGH */
   11048 	case WM_T_I350:
   11049 	case WM_T_I354:
   11050 		check_version = true;
   11051 		check_optionrom = true;
   11052 		break;
   11053 	default:
   11054 		return;
   11055 	}
   11056 	if (check_version) {
   11057 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   11058 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   11059 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   11060 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   11061 			build = nvm_data & NVM_BUILD_MASK;
   11062 			have_build = true;
   11063 		} else
   11064 			minor = nvm_data & 0x00ff;
   11065 
   11066 		/* Decimal */
   11067 		minor = (minor / 16) * 10 + (minor % 16);
   11068 		sc->sc_nvm_ver_major = major;
   11069 		sc->sc_nvm_ver_minor = minor;
   11070 
   11071 printver:
   11072 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   11073 		    sc->sc_nvm_ver_minor);
   11074 		if (have_build) {
   11075 			sc->sc_nvm_ver_build = build;
   11076 			aprint_verbose(".%d", build);
   11077 		}
   11078 	}
   11079 	if (check_optionrom) {
   11080 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   11081 		/* Option ROM Version */
   11082 		if ((off != 0x0000) && (off != 0xffff)) {
   11083 			off += NVM_COMBO_VER_OFF;
   11084 			wm_nvm_read(sc, off + 1, 1, &uid1);
   11085 			wm_nvm_read(sc, off, 1, &uid0);
   11086 			if ((uid0 != 0) && (uid0 != 0xffff)
   11087 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   11088 				/* 16bits */
   11089 				major = uid0 >> 8;
   11090 				build = (uid0 << 8) | (uid1 >> 8);
   11091 				patch = uid1 & 0x00ff;
   11092 				aprint_verbose(", option ROM Version %d.%d.%d",
   11093 				    major, build, patch);
   11094 			}
   11095 		}
   11096 	}
   11097 
   11098 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   11099 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   11100 }
   11101 
   11102 /*
   11103  * wm_nvm_read:
   11104  *
   11105  *	Read data from the serial EEPROM.
   11106  */
   11107 static int
   11108 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11109 {
   11110 	int rv;
   11111 
   11112 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11113 		device_xname(sc->sc_dev), __func__));
   11114 
   11115 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   11116 		return 1;
   11117 
   11118 	if (wm_nvm_acquire(sc))
   11119 		return 1;
   11120 
   11121 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11122 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11123 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   11124 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   11125 	else if (sc->sc_type == WM_T_PCH_SPT)
   11126 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11127 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11128 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11129 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11130 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11131 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11132 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11133 	else
   11134 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11135 
   11136 	wm_nvm_release(sc);
   11137 	return rv;
   11138 }
   11139 
   11140 /*
   11141  * Hardware semaphores.
   11142  * Very complexed...
   11143  */
   11144 
   11145 static int
   11146 wm_get_null(struct wm_softc *sc)
   11147 {
   11148 
   11149 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11150 		device_xname(sc->sc_dev), __func__));
   11151 	return 0;
   11152 }
   11153 
   11154 static void
   11155 wm_put_null(struct wm_softc *sc)
   11156 {
   11157 
   11158 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11159 		device_xname(sc->sc_dev), __func__));
   11160 	return;
   11161 }
   11162 
   11163 /*
   11164  * Get hardware semaphore.
   11165  * Same as e1000_get_hw_semaphore_generic()
   11166  */
   11167 static int
   11168 wm_get_swsm_semaphore(struct wm_softc *sc)
   11169 {
   11170 	int32_t timeout;
   11171 	uint32_t swsm;
   11172 
   11173 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11174 		device_xname(sc->sc_dev), __func__));
   11175 	KASSERT(sc->sc_nvm_wordsize > 0);
   11176 
   11177 	/* Get the SW semaphore. */
   11178 	timeout = sc->sc_nvm_wordsize + 1;
   11179 	while (timeout) {
   11180 		swsm = CSR_READ(sc, WMREG_SWSM);
   11181 
   11182 		if ((swsm & SWSM_SMBI) == 0)
   11183 			break;
   11184 
   11185 		delay(50);
   11186 		timeout--;
   11187 	}
   11188 
   11189 	if (timeout == 0) {
   11190 		aprint_error_dev(sc->sc_dev,
   11191 		    "could not acquire SWSM SMBI\n");
   11192 		return 1;
   11193 	}
   11194 
   11195 	/* Get the FW semaphore. */
   11196 	timeout = sc->sc_nvm_wordsize + 1;
   11197 	while (timeout) {
   11198 		swsm = CSR_READ(sc, WMREG_SWSM);
   11199 		swsm |= SWSM_SWESMBI;
   11200 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   11201 		/* If we managed to set the bit we got the semaphore. */
   11202 		swsm = CSR_READ(sc, WMREG_SWSM);
   11203 		if (swsm & SWSM_SWESMBI)
   11204 			break;
   11205 
   11206 		delay(50);
   11207 		timeout--;
   11208 	}
   11209 
   11210 	if (timeout == 0) {
   11211 		aprint_error_dev(sc->sc_dev,
   11212 		    "could not acquire SWSM SWESMBI\n");
   11213 		/* Release semaphores */
   11214 		wm_put_swsm_semaphore(sc);
   11215 		return 1;
   11216 	}
   11217 	return 0;
   11218 }
   11219 
   11220 /*
   11221  * Put hardware semaphore.
   11222  * Same as e1000_put_hw_semaphore_generic()
   11223  */
   11224 static void
   11225 wm_put_swsm_semaphore(struct wm_softc *sc)
   11226 {
   11227 	uint32_t swsm;
   11228 
   11229 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11230 		device_xname(sc->sc_dev), __func__));
   11231 
   11232 	swsm = CSR_READ(sc, WMREG_SWSM);
   11233 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   11234 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   11235 }
   11236 
   11237 /*
   11238  * Get SW/FW semaphore.
   11239  * Same as e1000_acquire_swfw_sync_82575().
   11240  */
   11241 static int
   11242 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11243 {
   11244 	uint32_t swfw_sync;
   11245 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   11246 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   11247 	int timeout = 200;
   11248 
   11249 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11250 		device_xname(sc->sc_dev), __func__));
   11251 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11252 
   11253 	for (timeout = 0; timeout < 200; timeout++) {
   11254 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11255 			if (wm_get_swsm_semaphore(sc)) {
   11256 				aprint_error_dev(sc->sc_dev,
   11257 				    "%s: failed to get semaphore\n",
   11258 				    __func__);
   11259 				return 1;
   11260 			}
   11261 		}
   11262 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11263 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   11264 			swfw_sync |= swmask;
   11265 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11266 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   11267 				wm_put_swsm_semaphore(sc);
   11268 			return 0;
   11269 		}
   11270 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   11271 			wm_put_swsm_semaphore(sc);
   11272 		delay(5000);
   11273 	}
   11274 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   11275 	    device_xname(sc->sc_dev), mask, swfw_sync);
   11276 	return 1;
   11277 }
   11278 
   11279 static void
   11280 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11281 {
   11282 	uint32_t swfw_sync;
   11283 
   11284 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11285 		device_xname(sc->sc_dev), __func__));
   11286 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11287 
   11288 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11289 		while (wm_get_swsm_semaphore(sc) != 0)
   11290 			continue;
   11291 	}
   11292 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11293 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   11294 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11295 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   11296 		wm_put_swsm_semaphore(sc);
   11297 }
   11298 
   11299 static int
   11300 wm_get_phy_82575(struct wm_softc *sc)
   11301 {
   11302 
   11303 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11304 		device_xname(sc->sc_dev), __func__));
   11305 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11306 }
   11307 
   11308 static void
   11309 wm_put_phy_82575(struct wm_softc *sc)
   11310 {
   11311 
   11312 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11313 		device_xname(sc->sc_dev), __func__));
   11314 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11315 }
   11316 
   11317 static int
   11318 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   11319 {
   11320 	uint32_t ext_ctrl;
   11321 	int timeout = 200;
   11322 
   11323 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11324 		device_xname(sc->sc_dev), __func__));
   11325 
   11326 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11327 	for (timeout = 0; timeout < 200; timeout++) {
   11328 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11329 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11330 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11331 
   11332 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11333 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11334 			return 0;
   11335 		delay(5000);
   11336 	}
   11337 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   11338 	    device_xname(sc->sc_dev), ext_ctrl);
   11339 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11340 	return 1;
   11341 }
   11342 
   11343 static void
   11344 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   11345 {
   11346 	uint32_t ext_ctrl;
   11347 
   11348 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11349 		device_xname(sc->sc_dev), __func__));
   11350 
   11351 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11352 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11353 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11354 
   11355 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11356 }
   11357 
   11358 static int
   11359 wm_get_swflag_ich8lan(struct wm_softc *sc)
   11360 {
   11361 	uint32_t ext_ctrl;
   11362 	int timeout;
   11363 
   11364 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11365 		device_xname(sc->sc_dev), __func__));
   11366 	mutex_enter(sc->sc_ich_phymtx);
   11367 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   11368 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11369 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   11370 			break;
   11371 		delay(1000);
   11372 	}
   11373 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   11374 		printf("%s: SW has already locked the resource\n",
   11375 		    device_xname(sc->sc_dev));
   11376 		goto out;
   11377 	}
   11378 
   11379 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11380 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11381 	for (timeout = 0; timeout < 1000; timeout++) {
   11382 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11383 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11384 			break;
   11385 		delay(1000);
   11386 	}
   11387 	if (timeout >= 1000) {
   11388 		printf("%s: failed to acquire semaphore\n",
   11389 		    device_xname(sc->sc_dev));
   11390 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11391 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11392 		goto out;
   11393 	}
   11394 	return 0;
   11395 
   11396 out:
   11397 	mutex_exit(sc->sc_ich_phymtx);
   11398 	return 1;
   11399 }
   11400 
   11401 static void
   11402 wm_put_swflag_ich8lan(struct wm_softc *sc)
   11403 {
   11404 	uint32_t ext_ctrl;
   11405 
   11406 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11407 		device_xname(sc->sc_dev), __func__));
   11408 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11409 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   11410 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11411 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11412 	} else {
   11413 		printf("%s: Semaphore unexpectedly released\n",
   11414 		    device_xname(sc->sc_dev));
   11415 	}
   11416 
   11417 	mutex_exit(sc->sc_ich_phymtx);
   11418 }
   11419 
   11420 static int
   11421 wm_get_nvm_ich8lan(struct wm_softc *sc)
   11422 {
   11423 
   11424 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11425 		device_xname(sc->sc_dev), __func__));
   11426 	mutex_enter(sc->sc_ich_nvmmtx);
   11427 
   11428 	return 0;
   11429 }
   11430 
   11431 static void
   11432 wm_put_nvm_ich8lan(struct wm_softc *sc)
   11433 {
   11434 
   11435 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11436 		device_xname(sc->sc_dev), __func__));
   11437 	mutex_exit(sc->sc_ich_nvmmtx);
   11438 }
   11439 
   11440 static int
   11441 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   11442 {
   11443 	int i = 0;
   11444 	uint32_t reg;
   11445 
   11446 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11447 		device_xname(sc->sc_dev), __func__));
   11448 
   11449 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11450 	do {
   11451 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   11452 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   11453 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11454 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   11455 			break;
   11456 		delay(2*1000);
   11457 		i++;
   11458 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   11459 
   11460 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   11461 		wm_put_hw_semaphore_82573(sc);
   11462 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   11463 		    device_xname(sc->sc_dev));
   11464 		return -1;
   11465 	}
   11466 
   11467 	return 0;
   11468 }
   11469 
   11470 static void
   11471 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   11472 {
   11473 	uint32_t reg;
   11474 
   11475 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11476 		device_xname(sc->sc_dev), __func__));
   11477 
   11478 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11479 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11480 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11481 }
   11482 
   11483 /*
   11484  * Management mode and power management related subroutines.
   11485  * BMC, AMT, suspend/resume and EEE.
   11486  */
   11487 
   11488 #ifdef WM_WOL
   11489 static int
   11490 wm_check_mng_mode(struct wm_softc *sc)
   11491 {
   11492 	int rv;
   11493 
   11494 	switch (sc->sc_type) {
   11495 	case WM_T_ICH8:
   11496 	case WM_T_ICH9:
   11497 	case WM_T_ICH10:
   11498 	case WM_T_PCH:
   11499 	case WM_T_PCH2:
   11500 	case WM_T_PCH_LPT:
   11501 	case WM_T_PCH_SPT:
   11502 		rv = wm_check_mng_mode_ich8lan(sc);
   11503 		break;
   11504 	case WM_T_82574:
   11505 	case WM_T_82583:
   11506 		rv = wm_check_mng_mode_82574(sc);
   11507 		break;
   11508 	case WM_T_82571:
   11509 	case WM_T_82572:
   11510 	case WM_T_82573:
   11511 	case WM_T_80003:
   11512 		rv = wm_check_mng_mode_generic(sc);
   11513 		break;
   11514 	default:
   11515 		/* noting to do */
   11516 		rv = 0;
   11517 		break;
   11518 	}
   11519 
   11520 	return rv;
   11521 }
   11522 
   11523 static int
   11524 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   11525 {
   11526 	uint32_t fwsm;
   11527 
   11528 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11529 
   11530 	if (((fwsm & FWSM_FW_VALID) != 0)
   11531 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11532 		return 1;
   11533 
   11534 	return 0;
   11535 }
   11536 
   11537 static int
   11538 wm_check_mng_mode_82574(struct wm_softc *sc)
   11539 {
   11540 	uint16_t data;
   11541 
   11542 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11543 
   11544 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   11545 		return 1;
   11546 
   11547 	return 0;
   11548 }
   11549 
   11550 static int
   11551 wm_check_mng_mode_generic(struct wm_softc *sc)
   11552 {
   11553 	uint32_t fwsm;
   11554 
   11555 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11556 
   11557 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   11558 		return 1;
   11559 
   11560 	return 0;
   11561 }
   11562 #endif /* WM_WOL */
   11563 
   11564 static int
   11565 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11566 {
   11567 	uint32_t manc, fwsm, factps;
   11568 
   11569 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11570 		return 0;
   11571 
   11572 	manc = CSR_READ(sc, WMREG_MANC);
   11573 
   11574 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11575 		device_xname(sc->sc_dev), manc));
   11576 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11577 		return 0;
   11578 
   11579 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11580 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11581 		factps = CSR_READ(sc, WMREG_FACTPS);
   11582 		if (((factps & FACTPS_MNGCG) == 0)
   11583 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11584 			return 1;
   11585 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11586 		uint16_t data;
   11587 
   11588 		factps = CSR_READ(sc, WMREG_FACTPS);
   11589 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11590 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11591 			device_xname(sc->sc_dev), factps, data));
   11592 		if (((factps & FACTPS_MNGCG) == 0)
   11593 		    && ((data & NVM_CFG2_MNGM_MASK)
   11594 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11595 			return 1;
   11596 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11597 	    && ((manc & MANC_ASF_EN) == 0))
   11598 		return 1;
   11599 
   11600 	return 0;
   11601 }
   11602 
   11603 static bool
   11604 wm_phy_resetisblocked(struct wm_softc *sc)
   11605 {
   11606 	bool blocked = false;
   11607 	uint32_t reg;
   11608 	int i = 0;
   11609 
   11610 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11611 		device_xname(sc->sc_dev), __func__));
   11612 
   11613 	switch (sc->sc_type) {
   11614 	case WM_T_ICH8:
   11615 	case WM_T_ICH9:
   11616 	case WM_T_ICH10:
   11617 	case WM_T_PCH:
   11618 	case WM_T_PCH2:
   11619 	case WM_T_PCH_LPT:
   11620 	case WM_T_PCH_SPT:
   11621 		do {
   11622 			reg = CSR_READ(sc, WMREG_FWSM);
   11623 			if ((reg & FWSM_RSPCIPHY) == 0) {
   11624 				blocked = true;
   11625 				delay(10*1000);
   11626 				continue;
   11627 			}
   11628 			blocked = false;
   11629 		} while (blocked && (i++ < 30));
   11630 		return blocked;
   11631 		break;
   11632 	case WM_T_82571:
   11633 	case WM_T_82572:
   11634 	case WM_T_82573:
   11635 	case WM_T_82574:
   11636 	case WM_T_82583:
   11637 	case WM_T_80003:
   11638 		reg = CSR_READ(sc, WMREG_MANC);
   11639 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   11640 			return true;
   11641 		else
   11642 			return false;
   11643 		break;
   11644 	default:
   11645 		/* no problem */
   11646 		break;
   11647 	}
   11648 
   11649 	return false;
   11650 }
   11651 
   11652 static void
   11653 wm_get_hw_control(struct wm_softc *sc)
   11654 {
   11655 	uint32_t reg;
   11656 
   11657 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11658 		device_xname(sc->sc_dev), __func__));
   11659 
   11660 	switch (sc->sc_type) {
   11661 	case WM_T_82573:
   11662 		reg = CSR_READ(sc, WMREG_SWSM);
   11663 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   11664 		break;
   11665 	case WM_T_82571:
   11666 	case WM_T_82572:
   11667 	case WM_T_82574:
   11668 	case WM_T_82583:
   11669 	case WM_T_80003:
   11670 	case WM_T_ICH8:
   11671 	case WM_T_ICH9:
   11672 	case WM_T_ICH10:
   11673 	case WM_T_PCH:
   11674 	case WM_T_PCH2:
   11675 	case WM_T_PCH_LPT:
   11676 	case WM_T_PCH_SPT:
   11677 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11678 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   11679 		break;
   11680 	default:
   11681 		break;
   11682 	}
   11683 }
   11684 
   11685 static void
   11686 wm_release_hw_control(struct wm_softc *sc)
   11687 {
   11688 	uint32_t reg;
   11689 
   11690 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11691 		device_xname(sc->sc_dev), __func__));
   11692 
   11693 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   11694 		return;
   11695 
   11696 	if (sc->sc_type == WM_T_82573) {
   11697 		reg = CSR_READ(sc, WMREG_SWSM);
   11698 		reg &= ~SWSM_DRV_LOAD;
   11699 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   11700 	} else {
   11701 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11702 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   11703 	}
   11704 }
   11705 
   11706 static void
   11707 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   11708 {
   11709 	uint32_t reg;
   11710 
   11711 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11712 		device_xname(sc->sc_dev), __func__));
   11713 
   11714 	if (sc->sc_type < WM_T_PCH2)
   11715 		return;
   11716 
   11717 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11718 
   11719 	if (gate)
   11720 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   11721 	else
   11722 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   11723 
   11724 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11725 }
   11726 
   11727 static void
   11728 wm_smbustopci(struct wm_softc *sc)
   11729 {
   11730 	uint32_t fwsm, reg;
   11731 
   11732 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11733 		device_xname(sc->sc_dev), __func__));
   11734 
   11735 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   11736 	wm_gate_hw_phy_config_ich8lan(sc, true);
   11737 
   11738 	/* Acquire PHY semaphore */
   11739 	sc->phy.acquire(sc);
   11740 
   11741 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11742 	if (((fwsm & FWSM_FW_VALID) == 0)
   11743 	    && ((wm_phy_resetisblocked(sc) == false))) {
   11744 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11745 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11746 			reg |= CTRL_EXT_FORCE_SMBUS;
   11747 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11748 			CSR_WRITE_FLUSH(sc);
   11749 			delay(50*1000);
   11750 		}
   11751 
   11752 		/* Toggle LANPHYPC */
   11753 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   11754 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   11755 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11756 		CSR_WRITE_FLUSH(sc);
   11757 		delay(1000);
   11758 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   11759 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11760 		CSR_WRITE_FLUSH(sc);
   11761 		delay(50*1000);
   11762 
   11763 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11764 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11765 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   11766 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11767 		}
   11768 	}
   11769 
   11770 	/* Release semaphore */
   11771 	sc->phy.release(sc);
   11772 
   11773 	/*
   11774 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   11775 	 */
   11776 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0))
   11777 		wm_gate_hw_phy_config_ich8lan(sc, false);
   11778 }
   11779 
   11780 static void
   11781 wm_init_manageability(struct wm_softc *sc)
   11782 {
   11783 
   11784 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11785 		device_xname(sc->sc_dev), __func__));
   11786 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11787 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   11788 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11789 
   11790 		/* Disable hardware interception of ARP */
   11791 		manc &= ~MANC_ARP_EN;
   11792 
   11793 		/* Enable receiving management packets to the host */
   11794 		if (sc->sc_type >= WM_T_82571) {
   11795 			manc |= MANC_EN_MNG2HOST;
   11796 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   11797 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   11798 		}
   11799 
   11800 		CSR_WRITE(sc, WMREG_MANC, manc);
   11801 	}
   11802 }
   11803 
   11804 static void
   11805 wm_release_manageability(struct wm_softc *sc)
   11806 {
   11807 
   11808 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11809 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11810 
   11811 		manc |= MANC_ARP_EN;
   11812 		if (sc->sc_type >= WM_T_82571)
   11813 			manc &= ~MANC_EN_MNG2HOST;
   11814 
   11815 		CSR_WRITE(sc, WMREG_MANC, manc);
   11816 	}
   11817 }
   11818 
   11819 static void
   11820 wm_get_wakeup(struct wm_softc *sc)
   11821 {
   11822 
   11823 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   11824 	switch (sc->sc_type) {
   11825 	case WM_T_82573:
   11826 	case WM_T_82583:
   11827 		sc->sc_flags |= WM_F_HAS_AMT;
   11828 		/* FALLTHROUGH */
   11829 	case WM_T_80003:
   11830 	case WM_T_82541:
   11831 	case WM_T_82547:
   11832 	case WM_T_82571:
   11833 	case WM_T_82572:
   11834 	case WM_T_82574:
   11835 	case WM_T_82575:
   11836 	case WM_T_82576:
   11837 	case WM_T_82580:
   11838 	case WM_T_I350:
   11839 	case WM_T_I354:
   11840 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   11841 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   11842 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11843 		break;
   11844 	case WM_T_ICH8:
   11845 	case WM_T_ICH9:
   11846 	case WM_T_ICH10:
   11847 	case WM_T_PCH:
   11848 	case WM_T_PCH2:
   11849 	case WM_T_PCH_LPT:
   11850 	case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
   11851 		sc->sc_flags |= WM_F_HAS_AMT;
   11852 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11853 		break;
   11854 	default:
   11855 		break;
   11856 	}
   11857 
   11858 	/* 1: HAS_MANAGE */
   11859 	if (wm_enable_mng_pass_thru(sc) != 0)
   11860 		sc->sc_flags |= WM_F_HAS_MANAGE;
   11861 
   11862 #ifdef WM_DEBUG
   11863 	printf("\n");
   11864 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   11865 		printf("HAS_AMT,");
   11866 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   11867 		printf("ARC_SUBSYS_VALID,");
   11868 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   11869 		printf("ASF_FIRMWARE_PRES,");
   11870 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   11871 		printf("HAS_MANAGE,");
   11872 	printf("\n");
   11873 #endif
   11874 	/*
   11875 	 * Note that the WOL flags is set after the resetting of the eeprom
   11876 	 * stuff
   11877 	 */
   11878 }
   11879 
   11880 #ifdef WM_WOL
   11881 /* WOL in the newer chipset interfaces (pchlan) */
   11882 static void
   11883 wm_enable_phy_wakeup(struct wm_softc *sc)
   11884 {
   11885 #if 0
   11886 	uint16_t preg;
   11887 
   11888 	/* Copy MAC RARs to PHY RARs */
   11889 
   11890 	/* Copy MAC MTA to PHY MTA */
   11891 
   11892 	/* Configure PHY Rx Control register */
   11893 
   11894 	/* Enable PHY wakeup in MAC register */
   11895 
   11896 	/* Configure and enable PHY wakeup in PHY registers */
   11897 
   11898 	/* Activate PHY wakeup */
   11899 
   11900 	/* XXX */
   11901 #endif
   11902 }
   11903 
   11904 /* Power down workaround on D3 */
   11905 static void
   11906 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   11907 {
   11908 	uint32_t reg;
   11909 	int i;
   11910 
   11911 	for (i = 0; i < 2; i++) {
   11912 		/* Disable link */
   11913 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11914 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11915 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11916 
   11917 		/*
   11918 		 * Call gig speed drop workaround on Gig disable before
   11919 		 * accessing any PHY registers
   11920 		 */
   11921 		if (sc->sc_type == WM_T_ICH8)
   11922 			wm_gig_downshift_workaround_ich8lan(sc);
   11923 
   11924 		/* Write VR power-down enable */
   11925 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11926 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11927 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   11928 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   11929 
   11930 		/* Read it back and test */
   11931 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11932 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11933 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   11934 			break;
   11935 
   11936 		/* Issue PHY reset and repeat at most one more time */
   11937 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11938 	}
   11939 }
   11940 
   11941 static void
   11942 wm_enable_wakeup(struct wm_softc *sc)
   11943 {
   11944 	uint32_t reg, pmreg;
   11945 	pcireg_t pmode;
   11946 
   11947 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   11948 		&pmreg, NULL) == 0)
   11949 		return;
   11950 
   11951 	/* Advertise the wakeup capability */
   11952 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   11953 	    | CTRL_SWDPIN(3));
   11954 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   11955 
   11956 	/* ICH workaround */
   11957 	switch (sc->sc_type) {
   11958 	case WM_T_ICH8:
   11959 	case WM_T_ICH9:
   11960 	case WM_T_ICH10:
   11961 	case WM_T_PCH:
   11962 	case WM_T_PCH2:
   11963 	case WM_T_PCH_LPT:
   11964 	case WM_T_PCH_SPT:
   11965 		/* Disable gig during WOL */
   11966 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11967 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   11968 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11969 		if (sc->sc_type == WM_T_PCH)
   11970 			wm_gmii_reset(sc);
   11971 
   11972 		/* Power down workaround */
   11973 		if (sc->sc_phytype == WMPHY_82577) {
   11974 			struct mii_softc *child;
   11975 
   11976 			/* Assume that the PHY is copper */
   11977 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11978 			if (child->mii_mpd_rev <= 2)
   11979 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   11980 				    (768 << 5) | 25, 0x0444); /* magic num */
   11981 		}
   11982 		break;
   11983 	default:
   11984 		break;
   11985 	}
   11986 
   11987 	/* Keep the laser running on fiber adapters */
   11988 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   11989 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   11990 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11991 		reg |= CTRL_EXT_SWDPIN(3);
   11992 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11993 	}
   11994 
   11995 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   11996 #if 0	/* for the multicast packet */
   11997 	reg |= WUFC_MC;
   11998 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   11999 #endif
   12000 
   12001 	if (sc->sc_type == WM_T_PCH) {
   12002 		wm_enable_phy_wakeup(sc);
   12003 	} else {
   12004 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   12005 		CSR_WRITE(sc, WMREG_WUFC, reg);
   12006 	}
   12007 
   12008 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12009 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12010 		|| (sc->sc_type == WM_T_PCH2))
   12011 		    && (sc->sc_phytype == WMPHY_IGP_3))
   12012 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   12013 
   12014 	/* Request PME */
   12015 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   12016 #if 0
   12017 	/* Disable WOL */
   12018 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   12019 #else
   12020 	/* For WOL */
   12021 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   12022 #endif
   12023 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   12024 }
   12025 #endif /* WM_WOL */
   12026 
   12027 /* LPLU */
   12028 
   12029 static void
   12030 wm_lplu_d0_disable(struct wm_softc *sc)
   12031 {
   12032 	uint32_t reg;
   12033 
   12034 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12035 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   12036 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12037 }
   12038 
   12039 static void
   12040 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   12041 {
   12042 	uint32_t reg;
   12043 
   12044 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   12045 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   12046 	reg |= HV_OEM_BITS_ANEGNOW;
   12047 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   12048 }
   12049 
   12050 /* EEE */
   12051 
   12052 static void
   12053 wm_set_eee_i350(struct wm_softc *sc)
   12054 {
   12055 	uint32_t ipcnfg, eeer;
   12056 
   12057 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   12058 	eeer = CSR_READ(sc, WMREG_EEER);
   12059 
   12060 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   12061 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12062 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12063 		    | EEER_LPI_FC);
   12064 	} else {
   12065 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12066 		ipcnfg &= ~IPCNFG_10BASE_TE;
   12067 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12068 		    | EEER_LPI_FC);
   12069 	}
   12070 
   12071 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   12072 	CSR_WRITE(sc, WMREG_EEER, eeer);
   12073 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   12074 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   12075 }
   12076 
   12077 /*
   12078  * Workarounds (mainly PHY related).
   12079  * Basically, PHY's workarounds are in the PHY drivers.
   12080  */
   12081 
   12082 /* Work-around for 82566 Kumeran PCS lock loss */
   12083 static void
   12084 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   12085 {
   12086 #if 0
   12087 	int miistatus, active, i;
   12088 	int reg;
   12089 
   12090 	miistatus = sc->sc_mii.mii_media_status;
   12091 
   12092 	/* If the link is not up, do nothing */
   12093 	if ((miistatus & IFM_ACTIVE) == 0)
   12094 		return;
   12095 
   12096 	active = sc->sc_mii.mii_media_active;
   12097 
   12098 	/* Nothing to do if the link is other than 1Gbps */
   12099 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   12100 		return;
   12101 
   12102 	for (i = 0; i < 10; i++) {
   12103 		/* read twice */
   12104 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12105 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12106 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   12107 			goto out;	/* GOOD! */
   12108 
   12109 		/* Reset the PHY */
   12110 		wm_gmii_reset(sc);
   12111 		delay(5*1000);
   12112 	}
   12113 
   12114 	/* Disable GigE link negotiation */
   12115 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12116 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12117 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12118 
   12119 	/*
   12120 	 * Call gig speed drop workaround on Gig disable before accessing
   12121 	 * any PHY registers.
   12122 	 */
   12123 	wm_gig_downshift_workaround_ich8lan(sc);
   12124 
   12125 out:
   12126 	return;
   12127 #endif
   12128 }
   12129 
   12130 /* WOL from S5 stops working */
   12131 static void
   12132 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   12133 {
   12134 	uint16_t kmrn_reg;
   12135 
   12136 	/* Only for igp3 */
   12137 	if (sc->sc_phytype == WMPHY_IGP_3) {
   12138 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   12139 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   12140 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12141 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   12142 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12143 	}
   12144 }
   12145 
   12146 /*
   12147  * Workaround for pch's PHYs
   12148  * XXX should be moved to new PHY driver?
   12149  */
   12150 static void
   12151 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   12152 {
   12153 
   12154 	KASSERT(sc->sc_type == WM_T_PCH);
   12155 
   12156 	if (sc->sc_phytype == WMPHY_82577)
   12157 		wm_set_mdio_slow_mode_hv(sc);
   12158 
   12159 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   12160 
   12161 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   12162 
   12163 	/* 82578 */
   12164 	if (sc->sc_phytype == WMPHY_82578) {
   12165 		/* PCH rev. < 3 */
   12166 		if (sc->sc_rev < 3) {
   12167 			/* XXX 6 bit shift? Why? Is it page2? */
   12168 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   12169 			    0x66c0);
   12170 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   12171 			    0xffff);
   12172 		}
   12173 
   12174 		/* XXX phy rev. < 2 */
   12175 	}
   12176 
   12177 	/* Select page 0 */
   12178 
   12179 	sc->phy.acquire(sc);
   12180 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   12181 	sc->phy.release(sc);
   12182 
   12183 	/*
   12184 	 * Configure the K1 Si workaround during phy reset assuming there is
   12185 	 * link so that it disables K1 if link is in 1Gbps.
   12186 	 */
   12187 	wm_k1_gig_workaround_hv(sc, 1);
   12188 }
   12189 
   12190 static void
   12191 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   12192 {
   12193 
   12194 	KASSERT(sc->sc_type == WM_T_PCH2);
   12195 
   12196 	wm_set_mdio_slow_mode_hv(sc);
   12197 }
   12198 
   12199 static int
   12200 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   12201 {
   12202 	int k1_enable = sc->sc_nvm_k1_enabled;
   12203 
   12204 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12205 		device_xname(sc->sc_dev), __func__));
   12206 
   12207 	if (sc->phy.acquire(sc) != 0)
   12208 		return -1;
   12209 
   12210 	if (link) {
   12211 		k1_enable = 0;
   12212 
   12213 		/* Link stall fix for link up */
   12214 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   12215 	} else {
   12216 		/* Link stall fix for link down */
   12217 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   12218 	}
   12219 
   12220 	wm_configure_k1_ich8lan(sc, k1_enable);
   12221 	sc->phy.release(sc);
   12222 
   12223 	return 0;
   12224 }
   12225 
   12226 static void
   12227 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   12228 {
   12229 	uint32_t reg;
   12230 
   12231 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   12232 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   12233 	    reg | HV_KMRN_MDIO_SLOW);
   12234 }
   12235 
   12236 static void
   12237 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   12238 {
   12239 	uint32_t ctrl, ctrl_ext, tmp;
   12240 	uint16_t kmrn_reg;
   12241 
   12242 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   12243 
   12244 	if (k1_enable)
   12245 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   12246 	else
   12247 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   12248 
   12249 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   12250 
   12251 	delay(20);
   12252 
   12253 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12254 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12255 
   12256 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   12257 	tmp |= CTRL_FRCSPD;
   12258 
   12259 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   12260 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   12261 	CSR_WRITE_FLUSH(sc);
   12262 	delay(20);
   12263 
   12264 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   12265 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12266 	CSR_WRITE_FLUSH(sc);
   12267 	delay(20);
   12268 }
   12269 
   12270 /* special case - for 82575 - need to do manual init ... */
   12271 static void
   12272 wm_reset_init_script_82575(struct wm_softc *sc)
   12273 {
   12274 	/*
   12275 	 * remark: this is untested code - we have no board without EEPROM
   12276 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   12277 	 */
   12278 
   12279 	/* SerDes configuration via SERDESCTRL */
   12280 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   12281 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   12282 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   12283 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   12284 
   12285 	/* CCM configuration via CCMCTL register */
   12286 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   12287 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   12288 
   12289 	/* PCIe lanes configuration */
   12290 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   12291 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   12292 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   12293 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   12294 
   12295 	/* PCIe PLL Configuration */
   12296 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   12297 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   12298 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   12299 }
   12300 
   12301 static void
   12302 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   12303 {
   12304 	uint32_t reg;
   12305 	uint16_t nvmword;
   12306 	int rv;
   12307 
   12308 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   12309 		return;
   12310 
   12311 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   12312 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   12313 	if (rv != 0) {
   12314 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   12315 		    __func__);
   12316 		return;
   12317 	}
   12318 
   12319 	reg = CSR_READ(sc, WMREG_MDICNFG);
   12320 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   12321 		reg |= MDICNFG_DEST;
   12322 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   12323 		reg |= MDICNFG_COM_MDIO;
   12324 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12325 }
   12326 
   12327 /*
   12328  * I210 Errata 25 and I211 Errata 10
   12329  * Slow System Clock.
   12330  */
   12331 static void
   12332 wm_pll_workaround_i210(struct wm_softc *sc)
   12333 {
   12334 	uint32_t mdicnfg, wuc;
   12335 	uint32_t reg;
   12336 	pcireg_t pcireg;
   12337 	uint32_t pmreg;
   12338 	uint16_t nvmword, tmp_nvmword;
   12339 	int phyval;
   12340 	bool wa_done = false;
   12341 	int i;
   12342 
   12343 	/* Save WUC and MDICNFG registers */
   12344 	wuc = CSR_READ(sc, WMREG_WUC);
   12345 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   12346 
   12347 	reg = mdicnfg & ~MDICNFG_DEST;
   12348 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12349 
   12350 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   12351 		nvmword = INVM_DEFAULT_AL;
   12352 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   12353 
   12354 	/* Get Power Management cap offset */
   12355 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12356 		&pmreg, NULL) == 0)
   12357 		return;
   12358 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   12359 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   12360 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   12361 
   12362 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   12363 			break; /* OK */
   12364 		}
   12365 
   12366 		wa_done = true;
   12367 		/* Directly reset the internal PHY */
   12368 		reg = CSR_READ(sc, WMREG_CTRL);
   12369 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   12370 
   12371 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12372 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   12373 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12374 
   12375 		CSR_WRITE(sc, WMREG_WUC, 0);
   12376 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   12377 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12378 
   12379 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   12380 		    pmreg + PCI_PMCSR);
   12381 		pcireg |= PCI_PMCSR_STATE_D3;
   12382 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12383 		    pmreg + PCI_PMCSR, pcireg);
   12384 		delay(1000);
   12385 		pcireg &= ~PCI_PMCSR_STATE_D3;
   12386 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12387 		    pmreg + PCI_PMCSR, pcireg);
   12388 
   12389 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   12390 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12391 
   12392 		/* Restore WUC register */
   12393 		CSR_WRITE(sc, WMREG_WUC, wuc);
   12394 	}
   12395 
   12396 	/* Restore MDICNFG setting */
   12397 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   12398 	if (wa_done)
   12399 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   12400 }
   12401