Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.454
      1 /*	$NetBSD: if_wm.c,v 1.454 2016/12/01 02:36:50 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Advanced Receive Descriptor
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.454 2016/12/01 02:36:50 knakahara Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #endif
    165 
    166 /*
    167  * This device driver's max interrupt numbers.
    168  */
    169 #define WM_MAX_NQUEUEINTR	16
    170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    171 
    172 /*
    173  * Transmit descriptor list size.  Due to errata, we can only have
    174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    175  * on >= 82544.  We tell the upper layers that they can queue a lot
    176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    177  * of them at a time.
    178  *
    179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    180  * chains containing many small mbufs have been observed in zero-copy
    181  * situations with jumbo frames.
    182  */
    183 #define	WM_NTXSEGS		256
    184 #define	WM_IFQUEUELEN		256
    185 #define	WM_TXQUEUELEN_MAX	64
    186 #define	WM_TXQUEUELEN_MAX_82547	16
    187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    190 #define	WM_NTXDESC_82542	256
    191 #define	WM_NTXDESC_82544	4096
    192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    197 
    198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    199 
    200 #define	WM_TXINTERQSIZE		256
    201 
    202 /*
    203  * Receive descriptor list size.  We have one Rx buffer for normal
    204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    205  * packet.  We allocate 256 receive descriptors, each with a 2k
    206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    207  */
    208 #define	WM_NRXDESC		256
    209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    212 
    213 typedef union txdescs {
    214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    216 } txdescs_t;
    217 
    218 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    219 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    220 
    221 /*
    222  * Software state for transmit jobs.
    223  */
    224 struct wm_txsoft {
    225 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    226 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    227 	int txs_firstdesc;		/* first descriptor in packet */
    228 	int txs_lastdesc;		/* last descriptor in packet */
    229 	int txs_ndesc;			/* # of descriptors used */
    230 };
    231 
    232 /*
    233  * Software state for receive buffers.  Each descriptor gets a
    234  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    235  * more than one buffer, we chain them together.
    236  */
    237 struct wm_rxsoft {
    238 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    239 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    240 };
    241 
    242 #define WM_LINKUP_TIMEOUT	50
    243 
    244 static uint16_t swfwphysem[] = {
    245 	SWFW_PHY0_SM,
    246 	SWFW_PHY1_SM,
    247 	SWFW_PHY2_SM,
    248 	SWFW_PHY3_SM
    249 };
    250 
    251 static const uint32_t wm_82580_rxpbs_table[] = {
    252 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    253 };
    254 
    255 struct wm_softc;
    256 
    257 #ifdef WM_EVENT_COUNTERS
    258 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    259 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    260 	struct evcnt qname##_ev_##evname;
    261 
    262 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    263 	do{								\
    264 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    265 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    266 		    "%s%02d%s", #qname, (qnum), #evname);		\
    267 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    268 		    (evtype), NULL, (xname),				\
    269 		    (q)->qname##_##evname##_evcnt_name);		\
    270 	}while(0)
    271 
    272 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    273 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    274 
    275 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    276 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    277 #endif /* WM_EVENT_COUNTERS */
    278 
    279 struct wm_txqueue {
    280 	kmutex_t *txq_lock;		/* lock for tx operations */
    281 
    282 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    283 
    284 	/* Software state for the transmit descriptors. */
    285 	int txq_num;			/* must be a power of two */
    286 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    287 
    288 	/* TX control data structures. */
    289 	int txq_ndesc;			/* must be a power of two */
    290 	size_t txq_descsize;		/* a tx descriptor size */
    291 	txdescs_t *txq_descs_u;
    292         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    293 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    294 	int txq_desc_rseg;		/* real number of control segment */
    295 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    296 #define	txq_descs	txq_descs_u->sctxu_txdescs
    297 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    298 
    299 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    300 
    301 	int txq_free;			/* number of free Tx descriptors */
    302 	int txq_next;			/* next ready Tx descriptor */
    303 
    304 	int txq_sfree;			/* number of free Tx jobs */
    305 	int txq_snext;			/* next free Tx job */
    306 	int txq_sdirty;			/* dirty Tx jobs */
    307 
    308 	/* These 4 variables are used only on the 82547. */
    309 	int txq_fifo_size;		/* Tx FIFO size */
    310 	int txq_fifo_head;		/* current head of FIFO */
    311 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    312 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    313 
    314 	/*
    315 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    316 	 * CPUs. This queue intermediate them without block.
    317 	 */
    318 	pcq_t *txq_interq;
    319 
    320 	/*
    321 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    322 	 * to manage Tx H/W queue's busy flag.
    323 	 */
    324 	int txq_flags;			/* flags for H/W queue, see below */
    325 #define	WM_TXQ_NO_SPACE	0x1
    326 
    327 	bool txq_stopping;
    328 
    329 #ifdef WM_EVENT_COUNTERS
    330 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    331 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    332 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    333 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    334 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    335 						/* XXX not used? */
    336 
    337 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    338 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    339 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    340 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    341 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    342 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    343 
    344 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    345 
    346 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    347 
    348 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    349 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    350 #endif /* WM_EVENT_COUNTERS */
    351 };
    352 
    353 struct wm_rxqueue {
    354 	kmutex_t *rxq_lock;		/* lock for rx operations */
    355 
    356 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    357 
    358 	/* Software state for the receive descriptors. */
    359 	wiseman_rxdesc_t *rxq_descs;
    360 
    361 	/* RX control data structures. */
    362 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    363 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    364 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    365 	int rxq_desc_rseg;		/* real number of control segment */
    366 	size_t rxq_desc_size;		/* control data size */
    367 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    368 
    369 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    370 
    371 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    372 	int rxq_discard;
    373 	int rxq_len;
    374 	struct mbuf *rxq_head;
    375 	struct mbuf *rxq_tail;
    376 	struct mbuf **rxq_tailp;
    377 
    378 	bool rxq_stopping;
    379 
    380 #ifdef WM_EVENT_COUNTERS
    381 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    382 
    383 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    384 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    385 #endif
    386 };
    387 
    388 struct wm_queue {
    389 	int wmq_id;			/* index of transmit and receive queues */
    390 	int wmq_intr_idx;		/* index of MSI-X tables */
    391 
    392 	struct wm_txqueue wmq_txq;
    393 	struct wm_rxqueue wmq_rxq;
    394 };
    395 
    396 struct wm_phyop {
    397 	int (*acquire)(struct wm_softc *);
    398 	void (*release)(struct wm_softc *);
    399 	int reset_delay_us;
    400 };
    401 
    402 /*
    403  * Software state per device.
    404  */
    405 struct wm_softc {
    406 	device_t sc_dev;		/* generic device information */
    407 	bus_space_tag_t sc_st;		/* bus space tag */
    408 	bus_space_handle_t sc_sh;	/* bus space handle */
    409 	bus_size_t sc_ss;		/* bus space size */
    410 	bus_space_tag_t sc_iot;		/* I/O space tag */
    411 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    412 	bus_size_t sc_ios;		/* I/O space size */
    413 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    414 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    415 	bus_size_t sc_flashs;		/* flash registers space size */
    416 	off_t sc_flashreg_offset;	/*
    417 					 * offset to flash registers from
    418 					 * start of BAR
    419 					 */
    420 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    421 
    422 	struct ethercom sc_ethercom;	/* ethernet common data */
    423 	struct mii_data sc_mii;		/* MII/media information */
    424 
    425 	pci_chipset_tag_t sc_pc;
    426 	pcitag_t sc_pcitag;
    427 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    428 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    429 
    430 	uint16_t sc_pcidevid;		/* PCI device ID */
    431 	wm_chip_type sc_type;		/* MAC type */
    432 	int sc_rev;			/* MAC revision */
    433 	wm_phy_type sc_phytype;		/* PHY type */
    434 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    435 #define	WM_MEDIATYPE_UNKNOWN		0x00
    436 #define	WM_MEDIATYPE_FIBER		0x01
    437 #define	WM_MEDIATYPE_COPPER		0x02
    438 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    439 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    440 	int sc_flags;			/* flags; see below */
    441 	int sc_if_flags;		/* last if_flags */
    442 	int sc_flowflags;		/* 802.3x flow control flags */
    443 	int sc_align_tweak;
    444 
    445 	void *sc_ihs[WM_MAX_NINTR];	/*
    446 					 * interrupt cookie.
    447 					 * legacy and msi use sc_ihs[0].
    448 					 */
    449 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    450 	int sc_nintrs;			/* number of interrupts */
    451 
    452 	int sc_link_intr_idx;		/* index of MSI-X tables */
    453 
    454 	callout_t sc_tick_ch;		/* tick callout */
    455 	bool sc_core_stopping;
    456 
    457 	int sc_nvm_ver_major;
    458 	int sc_nvm_ver_minor;
    459 	int sc_nvm_ver_build;
    460 	int sc_nvm_addrbits;		/* NVM address bits */
    461 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    462 	int sc_ich8_flash_base;
    463 	int sc_ich8_flash_bank_size;
    464 	int sc_nvm_k1_enabled;
    465 
    466 	int sc_nqueues;
    467 	struct wm_queue *sc_queue;
    468 
    469 	int sc_affinity_offset;
    470 
    471 #ifdef WM_EVENT_COUNTERS
    472 	/* Event counters. */
    473 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    474 
    475         /* WM_T_82542_2_1 only */
    476 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    477 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    478 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    479 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    480 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    481 #endif /* WM_EVENT_COUNTERS */
    482 
    483 	/* This variable are used only on the 82547. */
    484 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    485 
    486 	uint32_t sc_ctrl;		/* prototype CTRL register */
    487 #if 0
    488 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    489 #endif
    490 	uint32_t sc_icr;		/* prototype interrupt bits */
    491 	uint32_t sc_itr;		/* prototype intr throttling reg */
    492 	uint32_t sc_tctl;		/* prototype TCTL register */
    493 	uint32_t sc_rctl;		/* prototype RCTL register */
    494 	uint32_t sc_txcw;		/* prototype TXCW register */
    495 	uint32_t sc_tipg;		/* prototype TIPG register */
    496 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    497 	uint32_t sc_pba;		/* prototype PBA register */
    498 
    499 	int sc_tbi_linkup;		/* TBI link status */
    500 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    501 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    502 
    503 	int sc_mchash_type;		/* multicast filter offset */
    504 
    505 	krndsource_t rnd_source;	/* random source */
    506 
    507 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    508 
    509 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    510 	kmutex_t *sc_ich_phymtx;	/*
    511 					 * 82574/82583/ICH/PCH specific PHY
    512 					 * mutex. For 82574/82583, the mutex
    513 					 * is used for both PHY and NVM.
    514 					 */
    515 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    516 
    517 	struct wm_phyop phy;
    518 };
    519 
    520 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    521 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    522 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    523 
    524 #ifdef WM_MPSAFE
    525 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    526 #else
    527 #define CALLOUT_FLAGS	0
    528 #endif
    529 
    530 #define	WM_RXCHAIN_RESET(rxq)						\
    531 do {									\
    532 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    533 	*(rxq)->rxq_tailp = NULL;					\
    534 	(rxq)->rxq_len = 0;						\
    535 } while (/*CONSTCOND*/0)
    536 
    537 #define	WM_RXCHAIN_LINK(rxq, m)						\
    538 do {									\
    539 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    540 	(rxq)->rxq_tailp = &(m)->m_next;				\
    541 } while (/*CONSTCOND*/0)
    542 
    543 #ifdef WM_EVENT_COUNTERS
    544 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    545 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    546 
    547 #define WM_Q_EVCNT_INCR(qname, evname)			\
    548 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    549 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    550 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    551 #else /* !WM_EVENT_COUNTERS */
    552 #define	WM_EVCNT_INCR(ev)	/* nothing */
    553 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    554 
    555 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    556 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    557 #endif /* !WM_EVENT_COUNTERS */
    558 
    559 #define	CSR_READ(sc, reg)						\
    560 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    561 #define	CSR_WRITE(sc, reg, val)						\
    562 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    563 #define	CSR_WRITE_FLUSH(sc)						\
    564 	(void) CSR_READ((sc), WMREG_STATUS)
    565 
    566 #define ICH8_FLASH_READ32(sc, reg)					\
    567 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    568 	    (reg) + sc->sc_flashreg_offset)
    569 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    570 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    571 	    (reg) + sc->sc_flashreg_offset, (data))
    572 
    573 #define ICH8_FLASH_READ16(sc, reg)					\
    574 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    575 	    (reg) + sc->sc_flashreg_offset)
    576 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    577 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    578 	    (reg) + sc->sc_flashreg_offset, (data))
    579 
    580 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    581 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    582 
    583 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    584 #define	WM_CDTXADDR_HI(txq, x)						\
    585 	(sizeof(bus_addr_t) == 8 ?					\
    586 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    587 
    588 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    589 #define	WM_CDRXADDR_HI(rxq, x)						\
    590 	(sizeof(bus_addr_t) == 8 ?					\
    591 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    592 
    593 /*
    594  * Register read/write functions.
    595  * Other than CSR_{READ|WRITE}().
    596  */
    597 #if 0
    598 static inline uint32_t wm_io_read(struct wm_softc *, int);
    599 #endif
    600 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    601 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    602 	uint32_t, uint32_t);
    603 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    604 
    605 /*
    606  * Descriptor sync/init functions.
    607  */
    608 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    609 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    610 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    611 
    612 /*
    613  * Device driver interface functions and commonly used functions.
    614  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    615  */
    616 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    617 static int	wm_match(device_t, cfdata_t, void *);
    618 static void	wm_attach(device_t, device_t, void *);
    619 static int	wm_detach(device_t, int);
    620 static bool	wm_suspend(device_t, const pmf_qual_t *);
    621 static bool	wm_resume(device_t, const pmf_qual_t *);
    622 static void	wm_watchdog(struct ifnet *);
    623 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    624 static void	wm_tick(void *);
    625 static int	wm_ifflags_cb(struct ethercom *);
    626 static int	wm_ioctl(struct ifnet *, u_long, void *);
    627 /* MAC address related */
    628 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    629 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    630 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    631 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    632 static void	wm_set_filter(struct wm_softc *);
    633 /* Reset and init related */
    634 static void	wm_set_vlan(struct wm_softc *);
    635 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    636 static void	wm_get_auto_rd_done(struct wm_softc *);
    637 static void	wm_lan_init_done(struct wm_softc *);
    638 static void	wm_get_cfg_done(struct wm_softc *);
    639 static void	wm_initialize_hardware_bits(struct wm_softc *);
    640 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    641 static void	wm_reset_phy(struct wm_softc *);
    642 static void	wm_flush_desc_rings(struct wm_softc *);
    643 static void	wm_reset(struct wm_softc *);
    644 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    645 static void	wm_rxdrain(struct wm_rxqueue *);
    646 static void	wm_rss_getkey(uint8_t *);
    647 static void	wm_init_rss(struct wm_softc *);
    648 static void	wm_adjust_qnum(struct wm_softc *, int);
    649 static int	wm_setup_legacy(struct wm_softc *);
    650 static int	wm_setup_msix(struct wm_softc *);
    651 static int	wm_init(struct ifnet *);
    652 static int	wm_init_locked(struct ifnet *);
    653 static void	wm_turnon(struct wm_softc *);
    654 static void	wm_turnoff(struct wm_softc *);
    655 static void	wm_stop(struct ifnet *, int);
    656 static void	wm_stop_locked(struct ifnet *, int);
    657 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    658 static void	wm_82547_txfifo_stall(void *);
    659 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    660 /* DMA related */
    661 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    662 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    663 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    664 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    665     struct wm_txqueue *);
    666 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    667 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    668 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    669     struct wm_rxqueue *);
    670 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    671 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    672 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    673 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    674 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    675 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    676 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    677     struct wm_txqueue *);
    678 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    679     struct wm_rxqueue *);
    680 static int	wm_alloc_txrx_queues(struct wm_softc *);
    681 static void	wm_free_txrx_queues(struct wm_softc *);
    682 static int	wm_init_txrx_queues(struct wm_softc *);
    683 /* Start */
    684 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    685     uint32_t *, uint8_t *);
    686 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    687 static void	wm_start(struct ifnet *);
    688 static void	wm_start_locked(struct ifnet *);
    689 static int	wm_transmit(struct ifnet *, struct mbuf *);
    690 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    691 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    692 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    693     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    694 static void	wm_nq_start(struct ifnet *);
    695 static void	wm_nq_start_locked(struct ifnet *);
    696 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    697 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    698 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    699 /* Interrupt */
    700 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    701 static void	wm_rxeof(struct wm_rxqueue *);
    702 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    703 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    704 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    705 static void	wm_linkintr(struct wm_softc *, uint32_t);
    706 static int	wm_intr_legacy(void *);
    707 static int	wm_txrxintr_msix(void *);
    708 static int	wm_linkintr_msix(void *);
    709 
    710 /*
    711  * Media related.
    712  * GMII, SGMII, TBI, SERDES and SFP.
    713  */
    714 /* Common */
    715 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    716 /* GMII related */
    717 static void	wm_gmii_reset(struct wm_softc *);
    718 static int	wm_get_phy_id_82575(struct wm_softc *);
    719 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    720 static int	wm_gmii_mediachange(struct ifnet *);
    721 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    722 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    723 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    724 static int	wm_gmii_i82543_readreg(device_t, int, int);
    725 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    726 static int	wm_gmii_mdic_readreg(device_t, int, int);
    727 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    728 static int	wm_gmii_i82544_readreg(device_t, int, int);
    729 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    730 static int	wm_gmii_i80003_readreg(device_t, int, int);
    731 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    732 static int	wm_gmii_bm_readreg(device_t, int, int);
    733 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    734 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    735 static int	wm_gmii_hv_readreg(device_t, int, int);
    736 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    737 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    738 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    739 static int	wm_gmii_82580_readreg(device_t, int, int);
    740 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    741 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    742 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    743 static void	wm_gmii_statchg(struct ifnet *);
    744 /*
    745  * kumeran related (80003, ICH* and PCH*).
    746  * These functions are not for accessing MII registers but for accessing
    747  * kumeran specific registers.
    748  */
    749 static int	wm_kmrn_readreg(struct wm_softc *, int);
    750 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    751 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    752 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    753 /* SGMII */
    754 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    755 static int	wm_sgmii_readreg(device_t, int, int);
    756 static void	wm_sgmii_writereg(device_t, int, int, int);
    757 /* TBI related */
    758 static void	wm_tbi_mediainit(struct wm_softc *);
    759 static int	wm_tbi_mediachange(struct ifnet *);
    760 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    761 static int	wm_check_for_link(struct wm_softc *);
    762 static void	wm_tbi_tick(struct wm_softc *);
    763 /* SERDES related */
    764 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    765 static int	wm_serdes_mediachange(struct ifnet *);
    766 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    767 static void	wm_serdes_tick(struct wm_softc *);
    768 /* SFP related */
    769 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    770 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    771 
    772 /*
    773  * NVM related.
    774  * Microwire, SPI (w/wo EERD) and Flash.
    775  */
    776 /* Misc functions */
    777 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    778 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    779 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    780 /* Microwire */
    781 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    782 /* SPI */
    783 static int	wm_nvm_ready_spi(struct wm_softc *);
    784 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    785 /* Using with EERD */
    786 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    787 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    788 /* Flash */
    789 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    790     unsigned int *);
    791 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    792 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    793 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    794 	uint32_t *);
    795 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    796 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    797 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    798 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    799 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    800 /* iNVM */
    801 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    802 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    803 /* Lock, detecting NVM type, validate checksum and read */
    804 static int	wm_nvm_acquire(struct wm_softc *);
    805 static void	wm_nvm_release(struct wm_softc *);
    806 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    807 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    808 static int	wm_nvm_validate_checksum(struct wm_softc *);
    809 static void	wm_nvm_version_invm(struct wm_softc *);
    810 static void	wm_nvm_version(struct wm_softc *);
    811 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    812 
    813 /*
    814  * Hardware semaphores.
    815  * Very complexed...
    816  */
    817 static int	wm_get_null(struct wm_softc *);
    818 static void	wm_put_null(struct wm_softc *);
    819 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    820 static void	wm_put_swsm_semaphore(struct wm_softc *);
    821 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    822 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    823 static int	wm_get_phy_82575(struct wm_softc *);
    824 static void	wm_put_phy_82575(struct wm_softc *);
    825 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    826 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    827 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    828 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    829 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    830 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    831 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    832 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    833 
    834 /*
    835  * Management mode and power management related subroutines.
    836  * BMC, AMT, suspend/resume and EEE.
    837  */
    838 #if 0
    839 static int	wm_check_mng_mode(struct wm_softc *);
    840 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    841 static int	wm_check_mng_mode_82574(struct wm_softc *);
    842 static int	wm_check_mng_mode_generic(struct wm_softc *);
    843 #endif
    844 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    845 static bool	wm_phy_resetisblocked(struct wm_softc *);
    846 static void	wm_get_hw_control(struct wm_softc *);
    847 static void	wm_release_hw_control(struct wm_softc *);
    848 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    849 static void	wm_smbustopci(struct wm_softc *);
    850 static void	wm_init_manageability(struct wm_softc *);
    851 static void	wm_release_manageability(struct wm_softc *);
    852 static void	wm_get_wakeup(struct wm_softc *);
    853 static void	wm_ulp_disable(struct wm_softc *);
    854 static void	wm_enable_phy_wakeup(struct wm_softc *);
    855 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    856 static void	wm_enable_wakeup(struct wm_softc *);
    857 /* LPLU (Low Power Link Up) */
    858 static void	wm_lplu_d0_disable(struct wm_softc *);
    859 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    860 /* EEE */
    861 static void	wm_set_eee_i350(struct wm_softc *);
    862 
    863 /*
    864  * Workarounds (mainly PHY related).
    865  * Basically, PHY's workarounds are in the PHY drivers.
    866  */
    867 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    868 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    869 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    870 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    871 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    872 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    873 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    874 static void	wm_reset_init_script_82575(struct wm_softc *);
    875 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    876 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    877 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    878 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    879 static void	wm_pll_workaround_i210(struct wm_softc *);
    880 
    881 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    882     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    883 
    884 /*
    885  * Devices supported by this driver.
    886  */
    887 static const struct wm_product {
    888 	pci_vendor_id_t		wmp_vendor;
    889 	pci_product_id_t	wmp_product;
    890 	const char		*wmp_name;
    891 	wm_chip_type		wmp_type;
    892 	uint32_t		wmp_flags;
    893 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    894 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    895 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    896 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    897 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    898 } wm_products[] = {
    899 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    900 	  "Intel i82542 1000BASE-X Ethernet",
    901 	  WM_T_82542_2_1,	WMP_F_FIBER },
    902 
    903 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    904 	  "Intel i82543GC 1000BASE-X Ethernet",
    905 	  WM_T_82543,		WMP_F_FIBER },
    906 
    907 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    908 	  "Intel i82543GC 1000BASE-T Ethernet",
    909 	  WM_T_82543,		WMP_F_COPPER },
    910 
    911 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    912 	  "Intel i82544EI 1000BASE-T Ethernet",
    913 	  WM_T_82544,		WMP_F_COPPER },
    914 
    915 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    916 	  "Intel i82544EI 1000BASE-X Ethernet",
    917 	  WM_T_82544,		WMP_F_FIBER },
    918 
    919 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    920 	  "Intel i82544GC 1000BASE-T Ethernet",
    921 	  WM_T_82544,		WMP_F_COPPER },
    922 
    923 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    924 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    925 	  WM_T_82544,		WMP_F_COPPER },
    926 
    927 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    928 	  "Intel i82540EM 1000BASE-T Ethernet",
    929 	  WM_T_82540,		WMP_F_COPPER },
    930 
    931 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    932 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    933 	  WM_T_82540,		WMP_F_COPPER },
    934 
    935 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    936 	  "Intel i82540EP 1000BASE-T Ethernet",
    937 	  WM_T_82540,		WMP_F_COPPER },
    938 
    939 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    940 	  "Intel i82540EP 1000BASE-T Ethernet",
    941 	  WM_T_82540,		WMP_F_COPPER },
    942 
    943 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    944 	  "Intel i82540EP 1000BASE-T Ethernet",
    945 	  WM_T_82540,		WMP_F_COPPER },
    946 
    947 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    948 	  "Intel i82545EM 1000BASE-T Ethernet",
    949 	  WM_T_82545,		WMP_F_COPPER },
    950 
    951 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    952 	  "Intel i82545GM 1000BASE-T Ethernet",
    953 	  WM_T_82545_3,		WMP_F_COPPER },
    954 
    955 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    956 	  "Intel i82545GM 1000BASE-X Ethernet",
    957 	  WM_T_82545_3,		WMP_F_FIBER },
    958 
    959 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    960 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    961 	  WM_T_82545_3,		WMP_F_SERDES },
    962 
    963 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    964 	  "Intel i82546EB 1000BASE-T Ethernet",
    965 	  WM_T_82546,		WMP_F_COPPER },
    966 
    967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    968 	  "Intel i82546EB 1000BASE-T Ethernet",
    969 	  WM_T_82546,		WMP_F_COPPER },
    970 
    971 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    972 	  "Intel i82545EM 1000BASE-X Ethernet",
    973 	  WM_T_82545,		WMP_F_FIBER },
    974 
    975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    976 	  "Intel i82546EB 1000BASE-X Ethernet",
    977 	  WM_T_82546,		WMP_F_FIBER },
    978 
    979 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    980 	  "Intel i82546GB 1000BASE-T Ethernet",
    981 	  WM_T_82546_3,		WMP_F_COPPER },
    982 
    983 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    984 	  "Intel i82546GB 1000BASE-X Ethernet",
    985 	  WM_T_82546_3,		WMP_F_FIBER },
    986 
    987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    988 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    989 	  WM_T_82546_3,		WMP_F_SERDES },
    990 
    991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    992 	  "i82546GB quad-port Gigabit Ethernet",
    993 	  WM_T_82546_3,		WMP_F_COPPER },
    994 
    995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    996 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    997 	  WM_T_82546_3,		WMP_F_COPPER },
    998 
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1000 	  "Intel PRO/1000MT (82546GB)",
   1001 	  WM_T_82546_3,		WMP_F_COPPER },
   1002 
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1004 	  "Intel i82541EI 1000BASE-T Ethernet",
   1005 	  WM_T_82541,		WMP_F_COPPER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1008 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1009 	  WM_T_82541,		WMP_F_COPPER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1012 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1013 	  WM_T_82541,		WMP_F_COPPER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1016 	  "Intel i82541ER 1000BASE-T Ethernet",
   1017 	  WM_T_82541_2,		WMP_F_COPPER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1020 	  "Intel i82541GI 1000BASE-T Ethernet",
   1021 	  WM_T_82541_2,		WMP_F_COPPER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1024 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1025 	  WM_T_82541_2,		WMP_F_COPPER },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1028 	  "Intel i82541PI 1000BASE-T Ethernet",
   1029 	  WM_T_82541_2,		WMP_F_COPPER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1032 	  "Intel i82547EI 1000BASE-T Ethernet",
   1033 	  WM_T_82547,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1036 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1037 	  WM_T_82547,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1040 	  "Intel i82547GI 1000BASE-T Ethernet",
   1041 	  WM_T_82547_2,		WMP_F_COPPER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1044 	  "Intel PRO/1000 PT (82571EB)",
   1045 	  WM_T_82571,		WMP_F_COPPER },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1048 	  "Intel PRO/1000 PF (82571EB)",
   1049 	  WM_T_82571,		WMP_F_FIBER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1052 	  "Intel PRO/1000 PB (82571EB)",
   1053 	  WM_T_82571,		WMP_F_SERDES },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1056 	  "Intel PRO/1000 QT (82571EB)",
   1057 	  WM_T_82571,		WMP_F_COPPER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1060 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1061 	  WM_T_82571,		WMP_F_COPPER, },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1064 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1065 	  WM_T_82571,		WMP_F_COPPER, },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1068 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1069 	  WM_T_82571,		WMP_F_SERDES, },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1072 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1073 	  WM_T_82571,		WMP_F_SERDES, },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1076 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1077 	  WM_T_82571,		WMP_F_FIBER, },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1080 	  "Intel i82572EI 1000baseT Ethernet",
   1081 	  WM_T_82572,		WMP_F_COPPER },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1084 	  "Intel i82572EI 1000baseX Ethernet",
   1085 	  WM_T_82572,		WMP_F_FIBER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1088 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1089 	  WM_T_82572,		WMP_F_SERDES },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1092 	  "Intel i82572EI 1000baseT Ethernet",
   1093 	  WM_T_82572,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1096 	  "Intel i82573E",
   1097 	  WM_T_82573,		WMP_F_COPPER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1100 	  "Intel i82573E IAMT",
   1101 	  WM_T_82573,		WMP_F_COPPER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1104 	  "Intel i82573L Gigabit Ethernet",
   1105 	  WM_T_82573,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1108 	  "Intel i82574L",
   1109 	  WM_T_82574,		WMP_F_COPPER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1112 	  "Intel i82574L",
   1113 	  WM_T_82574,		WMP_F_COPPER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1116 	  "Intel i82583V",
   1117 	  WM_T_82583,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1120 	  "i80003 dual 1000baseT Ethernet",
   1121 	  WM_T_80003,		WMP_F_COPPER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1124 	  "i80003 dual 1000baseX Ethernet",
   1125 	  WM_T_80003,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1128 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1129 	  WM_T_80003,		WMP_F_SERDES },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1132 	  "Intel i80003 1000baseT Ethernet",
   1133 	  WM_T_80003,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1136 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1137 	  WM_T_80003,		WMP_F_SERDES },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1140 	  "Intel i82801H (M_AMT) LAN Controller",
   1141 	  WM_T_ICH8,		WMP_F_COPPER },
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1143 	  "Intel i82801H (AMT) LAN Controller",
   1144 	  WM_T_ICH8,		WMP_F_COPPER },
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1146 	  "Intel i82801H LAN Controller",
   1147 	  WM_T_ICH8,		WMP_F_COPPER },
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1149 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1150 	  WM_T_ICH8,		WMP_F_COPPER },
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1152 	  "Intel i82801H (M) LAN Controller",
   1153 	  WM_T_ICH8,		WMP_F_COPPER },
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1155 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1156 	  WM_T_ICH8,		WMP_F_COPPER },
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1158 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1159 	  WM_T_ICH8,		WMP_F_COPPER },
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1161 	  "82567V-3 LAN Controller",
   1162 	  WM_T_ICH8,		WMP_F_COPPER },
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1164 	  "82801I (AMT) LAN Controller",
   1165 	  WM_T_ICH9,		WMP_F_COPPER },
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1167 	  "82801I 10/100 LAN Controller",
   1168 	  WM_T_ICH9,		WMP_F_COPPER },
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1170 	  "82801I (G) 10/100 LAN Controller",
   1171 	  WM_T_ICH9,		WMP_F_COPPER },
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1173 	  "82801I (GT) 10/100 LAN Controller",
   1174 	  WM_T_ICH9,		WMP_F_COPPER },
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1176 	  "82801I (C) LAN Controller",
   1177 	  WM_T_ICH9,		WMP_F_COPPER },
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1179 	  "82801I mobile LAN Controller",
   1180 	  WM_T_ICH9,		WMP_F_COPPER },
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1182 	  "82801I mobile (V) LAN Controller",
   1183 	  WM_T_ICH9,		WMP_F_COPPER },
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1185 	  "82801I mobile (AMT) LAN Controller",
   1186 	  WM_T_ICH9,		WMP_F_COPPER },
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1188 	  "82567LM-4 LAN Controller",
   1189 	  WM_T_ICH9,		WMP_F_COPPER },
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1191 	  "82567LM-2 LAN Controller",
   1192 	  WM_T_ICH10,		WMP_F_COPPER },
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1194 	  "82567LF-2 LAN Controller",
   1195 	  WM_T_ICH10,		WMP_F_COPPER },
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1197 	  "82567LM-3 LAN Controller",
   1198 	  WM_T_ICH10,		WMP_F_COPPER },
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1200 	  "82567LF-3 LAN Controller",
   1201 	  WM_T_ICH10,		WMP_F_COPPER },
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1203 	  "82567V-2 LAN Controller",
   1204 	  WM_T_ICH10,		WMP_F_COPPER },
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1206 	  "82567V-3? LAN Controller",
   1207 	  WM_T_ICH10,		WMP_F_COPPER },
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1209 	  "HANKSVILLE LAN Controller",
   1210 	  WM_T_ICH10,		WMP_F_COPPER },
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1212 	  "PCH LAN (82577LM) Controller",
   1213 	  WM_T_PCH,		WMP_F_COPPER },
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1215 	  "PCH LAN (82577LC) Controller",
   1216 	  WM_T_PCH,		WMP_F_COPPER },
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1218 	  "PCH LAN (82578DM) Controller",
   1219 	  WM_T_PCH,		WMP_F_COPPER },
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1221 	  "PCH LAN (82578DC) Controller",
   1222 	  WM_T_PCH,		WMP_F_COPPER },
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1224 	  "PCH2 LAN (82579LM) Controller",
   1225 	  WM_T_PCH2,		WMP_F_COPPER },
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1227 	  "PCH2 LAN (82579V) Controller",
   1228 	  WM_T_PCH2,		WMP_F_COPPER },
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1230 	  "82575EB dual-1000baseT Ethernet",
   1231 	  WM_T_82575,		WMP_F_COPPER },
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1233 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1234 	  WM_T_82575,		WMP_F_SERDES },
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1236 	  "82575GB quad-1000baseT Ethernet",
   1237 	  WM_T_82575,		WMP_F_COPPER },
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1239 	  "82575GB quad-1000baseT Ethernet (PM)",
   1240 	  WM_T_82575,		WMP_F_COPPER },
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1242 	  "82576 1000BaseT Ethernet",
   1243 	  WM_T_82576,		WMP_F_COPPER },
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1245 	  "82576 1000BaseX Ethernet",
   1246 	  WM_T_82576,		WMP_F_FIBER },
   1247 
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1249 	  "82576 gigabit Ethernet (SERDES)",
   1250 	  WM_T_82576,		WMP_F_SERDES },
   1251 
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1253 	  "82576 quad-1000BaseT Ethernet",
   1254 	  WM_T_82576,		WMP_F_COPPER },
   1255 
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1257 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1258 	  WM_T_82576,		WMP_F_COPPER },
   1259 
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1261 	  "82576 gigabit Ethernet",
   1262 	  WM_T_82576,		WMP_F_COPPER },
   1263 
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1265 	  "82576 gigabit Ethernet (SERDES)",
   1266 	  WM_T_82576,		WMP_F_SERDES },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1268 	  "82576 quad-gigabit Ethernet (SERDES)",
   1269 	  WM_T_82576,		WMP_F_SERDES },
   1270 
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1272 	  "82580 1000BaseT Ethernet",
   1273 	  WM_T_82580,		WMP_F_COPPER },
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1275 	  "82580 1000BaseX Ethernet",
   1276 	  WM_T_82580,		WMP_F_FIBER },
   1277 
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1279 	  "82580 1000BaseT Ethernet (SERDES)",
   1280 	  WM_T_82580,		WMP_F_SERDES },
   1281 
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1283 	  "82580 gigabit Ethernet (SGMII)",
   1284 	  WM_T_82580,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1286 	  "82580 dual-1000BaseT Ethernet",
   1287 	  WM_T_82580,		WMP_F_COPPER },
   1288 
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1290 	  "82580 quad-1000BaseX Ethernet",
   1291 	  WM_T_82580,		WMP_F_FIBER },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1294 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1295 	  WM_T_82580,		WMP_F_COPPER },
   1296 
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1298 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1299 	  WM_T_82580,		WMP_F_SERDES },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1302 	  "DH89XXCC 1000BASE-KX Ethernet",
   1303 	  WM_T_82580,		WMP_F_SERDES },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1306 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1307 	  WM_T_82580,		WMP_F_SERDES },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1310 	  "I350 Gigabit Network Connection",
   1311 	  WM_T_I350,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1314 	  "I350 Gigabit Fiber Network Connection",
   1315 	  WM_T_I350,		WMP_F_FIBER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1318 	  "I350 Gigabit Backplane Connection",
   1319 	  WM_T_I350,		WMP_F_SERDES },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1322 	  "I350 Quad Port Gigabit Ethernet",
   1323 	  WM_T_I350,		WMP_F_SERDES },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1326 	  "I350 Gigabit Connection",
   1327 	  WM_T_I350,		WMP_F_COPPER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1330 	  "I354 Gigabit Ethernet (KX)",
   1331 	  WM_T_I354,		WMP_F_SERDES },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1334 	  "I354 Gigabit Ethernet (SGMII)",
   1335 	  WM_T_I354,		WMP_F_COPPER },
   1336 
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1338 	  "I354 Gigabit Ethernet (2.5G)",
   1339 	  WM_T_I354,		WMP_F_COPPER },
   1340 
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1342 	  "I210-T1 Ethernet Server Adapter",
   1343 	  WM_T_I210,		WMP_F_COPPER },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1346 	  "I210 Ethernet (Copper OEM)",
   1347 	  WM_T_I210,		WMP_F_COPPER },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1350 	  "I210 Ethernet (Copper IT)",
   1351 	  WM_T_I210,		WMP_F_COPPER },
   1352 
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1354 	  "I210 Ethernet (FLASH less)",
   1355 	  WM_T_I210,		WMP_F_COPPER },
   1356 
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1358 	  "I210 Gigabit Ethernet (Fiber)",
   1359 	  WM_T_I210,		WMP_F_FIBER },
   1360 
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1362 	  "I210 Gigabit Ethernet (SERDES)",
   1363 	  WM_T_I210,		WMP_F_SERDES },
   1364 
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1366 	  "I210 Gigabit Ethernet (FLASH less)",
   1367 	  WM_T_I210,		WMP_F_SERDES },
   1368 
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1370 	  "I210 Gigabit Ethernet (SGMII)",
   1371 	  WM_T_I210,		WMP_F_COPPER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1374 	  "I211 Ethernet (COPPER)",
   1375 	  WM_T_I211,		WMP_F_COPPER },
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1377 	  "I217 V Ethernet Connection",
   1378 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1380 	  "I217 LM Ethernet Connection",
   1381 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1383 	  "I218 V Ethernet Connection",
   1384 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1386 	  "I218 V Ethernet Connection",
   1387 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1389 	  "I218 V Ethernet Connection",
   1390 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1392 	  "I218 LM Ethernet Connection",
   1393 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1395 	  "I218 LM Ethernet Connection",
   1396 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1398 	  "I218 LM Ethernet Connection",
   1399 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1400 #if 0
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1402 	  "I219 V Ethernet Connection",
   1403 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1405 	  "I219 V Ethernet Connection",
   1406 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1408 	  "I219 V Ethernet Connection",
   1409 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1411 	  "I219 V Ethernet Connection",
   1412 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1414 	  "I219 LM Ethernet Connection",
   1415 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1417 	  "I219 LM Ethernet Connection",
   1418 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1420 	  "I219 LM Ethernet Connection",
   1421 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1423 	  "I219 LM Ethernet Connection",
   1424 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1426 	  "I219 LM Ethernet Connection",
   1427 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1428 #endif
   1429 	{ 0,			0,
   1430 	  NULL,
   1431 	  0,			0 },
   1432 };
   1433 
   1434 /*
   1435  * Register read/write functions.
   1436  * Other than CSR_{READ|WRITE}().
   1437  */
   1438 
   1439 #if 0 /* Not currently used */
   1440 static inline uint32_t
   1441 wm_io_read(struct wm_softc *sc, int reg)
   1442 {
   1443 
   1444 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1445 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1446 }
   1447 #endif
   1448 
   1449 static inline void
   1450 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1451 {
   1452 
   1453 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1454 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1455 }
   1456 
   1457 static inline void
   1458 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1459     uint32_t data)
   1460 {
   1461 	uint32_t regval;
   1462 	int i;
   1463 
   1464 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1465 
   1466 	CSR_WRITE(sc, reg, regval);
   1467 
   1468 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1469 		delay(5);
   1470 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1471 			break;
   1472 	}
   1473 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1474 		aprint_error("%s: WARNING:"
   1475 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1476 		    device_xname(sc->sc_dev), reg);
   1477 	}
   1478 }
   1479 
   1480 static inline void
   1481 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1482 {
   1483 	wa->wa_low = htole32(v & 0xffffffffU);
   1484 	if (sizeof(bus_addr_t) == 8)
   1485 		wa->wa_high = htole32((uint64_t) v >> 32);
   1486 	else
   1487 		wa->wa_high = 0;
   1488 }
   1489 
   1490 /*
   1491  * Descriptor sync/init functions.
   1492  */
   1493 static inline void
   1494 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1495 {
   1496 	struct wm_softc *sc = txq->txq_sc;
   1497 
   1498 	/* If it will wrap around, sync to the end of the ring. */
   1499 	if ((start + num) > WM_NTXDESC(txq)) {
   1500 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1501 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1502 		    (WM_NTXDESC(txq) - start), ops);
   1503 		num -= (WM_NTXDESC(txq) - start);
   1504 		start = 0;
   1505 	}
   1506 
   1507 	/* Now sync whatever is left. */
   1508 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1509 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1510 }
   1511 
   1512 static inline void
   1513 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1514 {
   1515 	struct wm_softc *sc = rxq->rxq_sc;
   1516 
   1517 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1518 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1519 }
   1520 
   1521 static inline void
   1522 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1523 {
   1524 	struct wm_softc *sc = rxq->rxq_sc;
   1525 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1526 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1527 	struct mbuf *m = rxs->rxs_mbuf;
   1528 
   1529 	/*
   1530 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1531 	 * so that the payload after the Ethernet header is aligned
   1532 	 * to a 4-byte boundary.
   1533 
   1534 	 * XXX BRAINDAMAGE ALERT!
   1535 	 * The stupid chip uses the same size for every buffer, which
   1536 	 * is set in the Receive Control register.  We are using the 2K
   1537 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1538 	 * reason, we can't "scoot" packets longer than the standard
   1539 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1540 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1541 	 * the upper layer copy the headers.
   1542 	 */
   1543 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1544 
   1545 	wm_set_dma_addr(&rxd->wrx_addr,
   1546 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1547 	rxd->wrx_len = 0;
   1548 	rxd->wrx_cksum = 0;
   1549 	rxd->wrx_status = 0;
   1550 	rxd->wrx_errors = 0;
   1551 	rxd->wrx_special = 0;
   1552 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1553 
   1554 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1555 }
   1556 
   1557 /*
   1558  * Device driver interface functions and commonly used functions.
   1559  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1560  */
   1561 
   1562 /* Lookup supported device table */
   1563 static const struct wm_product *
   1564 wm_lookup(const struct pci_attach_args *pa)
   1565 {
   1566 	const struct wm_product *wmp;
   1567 
   1568 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1569 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1570 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1571 			return wmp;
   1572 	}
   1573 	return NULL;
   1574 }
   1575 
   1576 /* The match function (ca_match) */
   1577 static int
   1578 wm_match(device_t parent, cfdata_t cf, void *aux)
   1579 {
   1580 	struct pci_attach_args *pa = aux;
   1581 
   1582 	if (wm_lookup(pa) != NULL)
   1583 		return 1;
   1584 
   1585 	return 0;
   1586 }
   1587 
   1588 /* The attach function (ca_attach) */
   1589 static void
   1590 wm_attach(device_t parent, device_t self, void *aux)
   1591 {
   1592 	struct wm_softc *sc = device_private(self);
   1593 	struct pci_attach_args *pa = aux;
   1594 	prop_dictionary_t dict;
   1595 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1596 	pci_chipset_tag_t pc = pa->pa_pc;
   1597 	int counts[PCI_INTR_TYPE_SIZE];
   1598 	pci_intr_type_t max_type;
   1599 	const char *eetype, *xname;
   1600 	bus_space_tag_t memt;
   1601 	bus_space_handle_t memh;
   1602 	bus_size_t memsize;
   1603 	int memh_valid;
   1604 	int i, error;
   1605 	const struct wm_product *wmp;
   1606 	prop_data_t ea;
   1607 	prop_number_t pn;
   1608 	uint8_t enaddr[ETHER_ADDR_LEN];
   1609 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1610 	pcireg_t preg, memtype;
   1611 	uint16_t eeprom_data, apme_mask;
   1612 	bool force_clear_smbi;
   1613 	uint32_t link_mode;
   1614 	uint32_t reg;
   1615 
   1616 	sc->sc_dev = self;
   1617 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1618 	sc->sc_core_stopping = false;
   1619 
   1620 	wmp = wm_lookup(pa);
   1621 #ifdef DIAGNOSTIC
   1622 	if (wmp == NULL) {
   1623 		printf("\n");
   1624 		panic("wm_attach: impossible");
   1625 	}
   1626 #endif
   1627 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1628 
   1629 	sc->sc_pc = pa->pa_pc;
   1630 	sc->sc_pcitag = pa->pa_tag;
   1631 
   1632 	if (pci_dma64_available(pa))
   1633 		sc->sc_dmat = pa->pa_dmat64;
   1634 	else
   1635 		sc->sc_dmat = pa->pa_dmat;
   1636 
   1637 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1638 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1639 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1640 
   1641 	sc->sc_type = wmp->wmp_type;
   1642 
   1643 	/* Set default function pointers */
   1644 	sc->phy.acquire = wm_get_null;
   1645 	sc->phy.release = wm_put_null;
   1646 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1647 
   1648 	if (sc->sc_type < WM_T_82543) {
   1649 		if (sc->sc_rev < 2) {
   1650 			aprint_error_dev(sc->sc_dev,
   1651 			    "i82542 must be at least rev. 2\n");
   1652 			return;
   1653 		}
   1654 		if (sc->sc_rev < 3)
   1655 			sc->sc_type = WM_T_82542_2_0;
   1656 	}
   1657 
   1658 	/*
   1659 	 * Disable MSI for Errata:
   1660 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1661 	 *
   1662 	 *  82544: Errata 25
   1663 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1664 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1665 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1666 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1667 	 *
   1668 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1669 	 *
   1670 	 *  82571 & 82572: Errata 63
   1671 	 */
   1672 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1673 	    || (sc->sc_type == WM_T_82572))
   1674 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1675 
   1676 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1677 	    || (sc->sc_type == WM_T_82580)
   1678 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1679 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1680 		sc->sc_flags |= WM_F_NEWQUEUE;
   1681 
   1682 	/* Set device properties (mactype) */
   1683 	dict = device_properties(sc->sc_dev);
   1684 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1685 
   1686 	/*
   1687 	 * Map the device.  All devices support memory-mapped acccess,
   1688 	 * and it is really required for normal operation.
   1689 	 */
   1690 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1691 	switch (memtype) {
   1692 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1693 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1694 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1695 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1696 		break;
   1697 	default:
   1698 		memh_valid = 0;
   1699 		break;
   1700 	}
   1701 
   1702 	if (memh_valid) {
   1703 		sc->sc_st = memt;
   1704 		sc->sc_sh = memh;
   1705 		sc->sc_ss = memsize;
   1706 	} else {
   1707 		aprint_error_dev(sc->sc_dev,
   1708 		    "unable to map device registers\n");
   1709 		return;
   1710 	}
   1711 
   1712 	/*
   1713 	 * In addition, i82544 and later support I/O mapped indirect
   1714 	 * register access.  It is not desirable (nor supported in
   1715 	 * this driver) to use it for normal operation, though it is
   1716 	 * required to work around bugs in some chip versions.
   1717 	 */
   1718 	if (sc->sc_type >= WM_T_82544) {
   1719 		/* First we have to find the I/O BAR. */
   1720 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1721 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1722 			if (memtype == PCI_MAPREG_TYPE_IO)
   1723 				break;
   1724 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1725 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1726 				i += 4;	/* skip high bits, too */
   1727 		}
   1728 		if (i < PCI_MAPREG_END) {
   1729 			/*
   1730 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1731 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1732 			 * It's no problem because newer chips has no this
   1733 			 * bug.
   1734 			 *
   1735 			 * The i8254x doesn't apparently respond when the
   1736 			 * I/O BAR is 0, which looks somewhat like it's not
   1737 			 * been configured.
   1738 			 */
   1739 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1740 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1741 				aprint_error_dev(sc->sc_dev,
   1742 				    "WARNING: I/O BAR at zero.\n");
   1743 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1744 					0, &sc->sc_iot, &sc->sc_ioh,
   1745 					NULL, &sc->sc_ios) == 0) {
   1746 				sc->sc_flags |= WM_F_IOH_VALID;
   1747 			} else {
   1748 				aprint_error_dev(sc->sc_dev,
   1749 				    "WARNING: unable to map I/O space\n");
   1750 			}
   1751 		}
   1752 
   1753 	}
   1754 
   1755 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1756 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1757 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1758 	if (sc->sc_type < WM_T_82542_2_1)
   1759 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1760 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1761 
   1762 	/* power up chip */
   1763 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1764 	    NULL)) && error != EOPNOTSUPP) {
   1765 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1766 		return;
   1767 	}
   1768 
   1769 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1770 
   1771 	/* Allocation settings */
   1772 	max_type = PCI_INTR_TYPE_MSIX;
   1773 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1774 	counts[PCI_INTR_TYPE_MSI] = 1;
   1775 	counts[PCI_INTR_TYPE_INTX] = 1;
   1776 
   1777 alloc_retry:
   1778 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1779 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1780 		return;
   1781 	}
   1782 
   1783 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1784 		error = wm_setup_msix(sc);
   1785 		if (error) {
   1786 			pci_intr_release(pc, sc->sc_intrs,
   1787 			    counts[PCI_INTR_TYPE_MSIX]);
   1788 
   1789 			/* Setup for MSI: Disable MSI-X */
   1790 			max_type = PCI_INTR_TYPE_MSI;
   1791 			counts[PCI_INTR_TYPE_MSI] = 1;
   1792 			counts[PCI_INTR_TYPE_INTX] = 1;
   1793 			goto alloc_retry;
   1794 		}
   1795 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1796 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1797 		error = wm_setup_legacy(sc);
   1798 		if (error) {
   1799 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1800 			    counts[PCI_INTR_TYPE_MSI]);
   1801 
   1802 			/* The next try is for INTx: Disable MSI */
   1803 			max_type = PCI_INTR_TYPE_INTX;
   1804 			counts[PCI_INTR_TYPE_INTX] = 1;
   1805 			goto alloc_retry;
   1806 		}
   1807 	} else {
   1808 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1809 		error = wm_setup_legacy(sc);
   1810 		if (error) {
   1811 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1812 			    counts[PCI_INTR_TYPE_INTX]);
   1813 			return;
   1814 		}
   1815 	}
   1816 
   1817 	/*
   1818 	 * Check the function ID (unit number of the chip).
   1819 	 */
   1820 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1821 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1822 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1823 	    || (sc->sc_type == WM_T_82580)
   1824 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1825 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1826 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1827 	else
   1828 		sc->sc_funcid = 0;
   1829 
   1830 	/*
   1831 	 * Determine a few things about the bus we're connected to.
   1832 	 */
   1833 	if (sc->sc_type < WM_T_82543) {
   1834 		/* We don't really know the bus characteristics here. */
   1835 		sc->sc_bus_speed = 33;
   1836 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1837 		/*
   1838 		 * CSA (Communication Streaming Architecture) is about as fast
   1839 		 * a 32-bit 66MHz PCI Bus.
   1840 		 */
   1841 		sc->sc_flags |= WM_F_CSA;
   1842 		sc->sc_bus_speed = 66;
   1843 		aprint_verbose_dev(sc->sc_dev,
   1844 		    "Communication Streaming Architecture\n");
   1845 		if (sc->sc_type == WM_T_82547) {
   1846 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1847 			callout_setfunc(&sc->sc_txfifo_ch,
   1848 					wm_82547_txfifo_stall, sc);
   1849 			aprint_verbose_dev(sc->sc_dev,
   1850 			    "using 82547 Tx FIFO stall work-around\n");
   1851 		}
   1852 	} else if (sc->sc_type >= WM_T_82571) {
   1853 		sc->sc_flags |= WM_F_PCIE;
   1854 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1855 		    && (sc->sc_type != WM_T_ICH10)
   1856 		    && (sc->sc_type != WM_T_PCH)
   1857 		    && (sc->sc_type != WM_T_PCH2)
   1858 		    && (sc->sc_type != WM_T_PCH_LPT)
   1859 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1860 			/* ICH* and PCH* have no PCIe capability registers */
   1861 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1862 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1863 				NULL) == 0)
   1864 				aprint_error_dev(sc->sc_dev,
   1865 				    "unable to find PCIe capability\n");
   1866 		}
   1867 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1868 	} else {
   1869 		reg = CSR_READ(sc, WMREG_STATUS);
   1870 		if (reg & STATUS_BUS64)
   1871 			sc->sc_flags |= WM_F_BUS64;
   1872 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1873 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1874 
   1875 			sc->sc_flags |= WM_F_PCIX;
   1876 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1877 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1878 				aprint_error_dev(sc->sc_dev,
   1879 				    "unable to find PCIX capability\n");
   1880 			else if (sc->sc_type != WM_T_82545_3 &&
   1881 				 sc->sc_type != WM_T_82546_3) {
   1882 				/*
   1883 				 * Work around a problem caused by the BIOS
   1884 				 * setting the max memory read byte count
   1885 				 * incorrectly.
   1886 				 */
   1887 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1888 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1889 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1890 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1891 
   1892 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1893 				    PCIX_CMD_BYTECNT_SHIFT;
   1894 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1895 				    PCIX_STATUS_MAXB_SHIFT;
   1896 				if (bytecnt > maxb) {
   1897 					aprint_verbose_dev(sc->sc_dev,
   1898 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1899 					    512 << bytecnt, 512 << maxb);
   1900 					pcix_cmd = (pcix_cmd &
   1901 					    ~PCIX_CMD_BYTECNT_MASK) |
   1902 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1903 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1904 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1905 					    pcix_cmd);
   1906 				}
   1907 			}
   1908 		}
   1909 		/*
   1910 		 * The quad port adapter is special; it has a PCIX-PCIX
   1911 		 * bridge on the board, and can run the secondary bus at
   1912 		 * a higher speed.
   1913 		 */
   1914 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1915 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1916 								      : 66;
   1917 		} else if (sc->sc_flags & WM_F_PCIX) {
   1918 			switch (reg & STATUS_PCIXSPD_MASK) {
   1919 			case STATUS_PCIXSPD_50_66:
   1920 				sc->sc_bus_speed = 66;
   1921 				break;
   1922 			case STATUS_PCIXSPD_66_100:
   1923 				sc->sc_bus_speed = 100;
   1924 				break;
   1925 			case STATUS_PCIXSPD_100_133:
   1926 				sc->sc_bus_speed = 133;
   1927 				break;
   1928 			default:
   1929 				aprint_error_dev(sc->sc_dev,
   1930 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1931 				    reg & STATUS_PCIXSPD_MASK);
   1932 				sc->sc_bus_speed = 66;
   1933 				break;
   1934 			}
   1935 		} else
   1936 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1937 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1938 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1939 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1940 	}
   1941 
   1942 	/* clear interesting stat counters */
   1943 	CSR_READ(sc, WMREG_COLC);
   1944 	CSR_READ(sc, WMREG_RXERRC);
   1945 
   1946 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   1947 	    || (sc->sc_type >= WM_T_ICH8))
   1948 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1949 	if (sc->sc_type >= WM_T_ICH8)
   1950 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1951 
   1952 	/* Set PHY, NVM mutex related stuff */
   1953 	switch (sc->sc_type) {
   1954 	case WM_T_82542_2_0:
   1955 	case WM_T_82542_2_1:
   1956 	case WM_T_82543:
   1957 	case WM_T_82544:
   1958 		/* Microwire */
   1959 		sc->sc_nvm_wordsize = 64;
   1960 		sc->sc_nvm_addrbits = 6;
   1961 		break;
   1962 	case WM_T_82540:
   1963 	case WM_T_82545:
   1964 	case WM_T_82545_3:
   1965 	case WM_T_82546:
   1966 	case WM_T_82546_3:
   1967 		/* Microwire */
   1968 		reg = CSR_READ(sc, WMREG_EECD);
   1969 		if (reg & EECD_EE_SIZE) {
   1970 			sc->sc_nvm_wordsize = 256;
   1971 			sc->sc_nvm_addrbits = 8;
   1972 		} else {
   1973 			sc->sc_nvm_wordsize = 64;
   1974 			sc->sc_nvm_addrbits = 6;
   1975 		}
   1976 		sc->sc_flags |= WM_F_LOCK_EECD;
   1977 		break;
   1978 	case WM_T_82541:
   1979 	case WM_T_82541_2:
   1980 	case WM_T_82547:
   1981 	case WM_T_82547_2:
   1982 		sc->sc_flags |= WM_F_LOCK_EECD;
   1983 		reg = CSR_READ(sc, WMREG_EECD);
   1984 		if (reg & EECD_EE_TYPE) {
   1985 			/* SPI */
   1986 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1987 			wm_nvm_set_addrbits_size_eecd(sc);
   1988 		} else {
   1989 			/* Microwire */
   1990 			if ((reg & EECD_EE_ABITS) != 0) {
   1991 				sc->sc_nvm_wordsize = 256;
   1992 				sc->sc_nvm_addrbits = 8;
   1993 			} else {
   1994 				sc->sc_nvm_wordsize = 64;
   1995 				sc->sc_nvm_addrbits = 6;
   1996 			}
   1997 		}
   1998 		break;
   1999 	case WM_T_82571:
   2000 	case WM_T_82572:
   2001 		/* SPI */
   2002 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2003 		wm_nvm_set_addrbits_size_eecd(sc);
   2004 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2005 		sc->phy.acquire = wm_get_swsm_semaphore;
   2006 		sc->phy.release = wm_put_swsm_semaphore;
   2007 		break;
   2008 	case WM_T_82573:
   2009 	case WM_T_82574:
   2010 	case WM_T_82583:
   2011 		if (sc->sc_type == WM_T_82573) {
   2012 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2013 			sc->phy.acquire = wm_get_swsm_semaphore;
   2014 			sc->phy.release = wm_put_swsm_semaphore;
   2015 		} else {
   2016 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2017 			/* Both PHY and NVM use the same semaphore. */
   2018 			sc->phy.acquire
   2019 			    = wm_get_swfwhw_semaphore;
   2020 			sc->phy.release
   2021 			    = wm_put_swfwhw_semaphore;
   2022 		}
   2023 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2024 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2025 			sc->sc_nvm_wordsize = 2048;
   2026 		} else {
   2027 			/* SPI */
   2028 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2029 			wm_nvm_set_addrbits_size_eecd(sc);
   2030 		}
   2031 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2032 		break;
   2033 	case WM_T_82575:
   2034 	case WM_T_82576:
   2035 	case WM_T_82580:
   2036 	case WM_T_I350:
   2037 	case WM_T_I354:
   2038 	case WM_T_80003:
   2039 		/* SPI */
   2040 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2041 		wm_nvm_set_addrbits_size_eecd(sc);
   2042 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2043 		    | WM_F_LOCK_SWSM;
   2044 		sc->phy.acquire = wm_get_phy_82575;
   2045 		sc->phy.release = wm_put_phy_82575;
   2046 		break;
   2047 	case WM_T_ICH8:
   2048 	case WM_T_ICH9:
   2049 	case WM_T_ICH10:
   2050 	case WM_T_PCH:
   2051 	case WM_T_PCH2:
   2052 	case WM_T_PCH_LPT:
   2053 		/* FLASH */
   2054 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2055 		sc->sc_nvm_wordsize = 2048;
   2056 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2057 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2058 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2059 			aprint_error_dev(sc->sc_dev,
   2060 			    "can't map FLASH registers\n");
   2061 			goto out;
   2062 		}
   2063 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2064 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2065 		    ICH_FLASH_SECTOR_SIZE;
   2066 		sc->sc_ich8_flash_bank_size =
   2067 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2068 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2069 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2070 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2071 		sc->sc_flashreg_offset = 0;
   2072 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2073 		sc->phy.release = wm_put_swflag_ich8lan;
   2074 		break;
   2075 	case WM_T_PCH_SPT:
   2076 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2077 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2078 		sc->sc_flasht = sc->sc_st;
   2079 		sc->sc_flashh = sc->sc_sh;
   2080 		sc->sc_ich8_flash_base = 0;
   2081 		sc->sc_nvm_wordsize =
   2082 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2083 			* NVM_SIZE_MULTIPLIER;
   2084 		/* It is size in bytes, we want words */
   2085 		sc->sc_nvm_wordsize /= 2;
   2086 		/* assume 2 banks */
   2087 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2088 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2089 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2090 		sc->phy.release = wm_put_swflag_ich8lan;
   2091 		break;
   2092 	case WM_T_I210:
   2093 	case WM_T_I211:
   2094 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2095 			wm_nvm_set_addrbits_size_eecd(sc);
   2096 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2097 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2098 		} else {
   2099 			sc->sc_nvm_wordsize = INVM_SIZE;
   2100 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2101 		}
   2102 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2103 		sc->phy.acquire = wm_get_phy_82575;
   2104 		sc->phy.release = wm_put_phy_82575;
   2105 		break;
   2106 	default:
   2107 		break;
   2108 	}
   2109 
   2110 	/* Reset the chip to a known state. */
   2111 	wm_reset(sc);
   2112 
   2113 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2114 	switch (sc->sc_type) {
   2115 	case WM_T_82571:
   2116 	case WM_T_82572:
   2117 		reg = CSR_READ(sc, WMREG_SWSM2);
   2118 		if ((reg & SWSM2_LOCK) == 0) {
   2119 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2120 			force_clear_smbi = true;
   2121 		} else
   2122 			force_clear_smbi = false;
   2123 		break;
   2124 	case WM_T_82573:
   2125 	case WM_T_82574:
   2126 	case WM_T_82583:
   2127 		force_clear_smbi = true;
   2128 		break;
   2129 	default:
   2130 		force_clear_smbi = false;
   2131 		break;
   2132 	}
   2133 	if (force_clear_smbi) {
   2134 		reg = CSR_READ(sc, WMREG_SWSM);
   2135 		if ((reg & SWSM_SMBI) != 0)
   2136 			aprint_error_dev(sc->sc_dev,
   2137 			    "Please update the Bootagent\n");
   2138 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2139 	}
   2140 
   2141 	/*
   2142 	 * Defer printing the EEPROM type until after verifying the checksum
   2143 	 * This allows the EEPROM type to be printed correctly in the case
   2144 	 * that no EEPROM is attached.
   2145 	 */
   2146 	/*
   2147 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2148 	 * this for later, so we can fail future reads from the EEPROM.
   2149 	 */
   2150 	if (wm_nvm_validate_checksum(sc)) {
   2151 		/*
   2152 		 * Read twice again because some PCI-e parts fail the
   2153 		 * first check due to the link being in sleep state.
   2154 		 */
   2155 		if (wm_nvm_validate_checksum(sc))
   2156 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2157 	}
   2158 
   2159 	/* Set device properties (macflags) */
   2160 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2161 
   2162 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2163 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2164 	else {
   2165 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2166 		    sc->sc_nvm_wordsize);
   2167 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2168 			aprint_verbose("iNVM");
   2169 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2170 			aprint_verbose("FLASH(HW)");
   2171 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2172 			aprint_verbose("FLASH");
   2173 		else {
   2174 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2175 				eetype = "SPI";
   2176 			else
   2177 				eetype = "MicroWire";
   2178 			aprint_verbose("(%d address bits) %s EEPROM",
   2179 			    sc->sc_nvm_addrbits, eetype);
   2180 		}
   2181 	}
   2182 	wm_nvm_version(sc);
   2183 	aprint_verbose("\n");
   2184 
   2185 	/* Check for I21[01] PLL workaround */
   2186 	if (sc->sc_type == WM_T_I210)
   2187 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2188 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2189 		/* NVM image release 3.25 has a workaround */
   2190 		if ((sc->sc_nvm_ver_major < 3)
   2191 		    || ((sc->sc_nvm_ver_major == 3)
   2192 			&& (sc->sc_nvm_ver_minor < 25))) {
   2193 			aprint_verbose_dev(sc->sc_dev,
   2194 			    "ROM image version %d.%d is older than 3.25\n",
   2195 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2196 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2197 		}
   2198 	}
   2199 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2200 		wm_pll_workaround_i210(sc);
   2201 
   2202 	wm_get_wakeup(sc);
   2203 
   2204 	/* Non-AMT based hardware can now take control from firmware */
   2205 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2206 		wm_get_hw_control(sc);
   2207 
   2208 	/*
   2209 	 * Read the Ethernet address from the EEPROM, if not first found
   2210 	 * in device properties.
   2211 	 */
   2212 	ea = prop_dictionary_get(dict, "mac-address");
   2213 	if (ea != NULL) {
   2214 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2215 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2216 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2217 	} else {
   2218 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2219 			aprint_error_dev(sc->sc_dev,
   2220 			    "unable to read Ethernet address\n");
   2221 			goto out;
   2222 		}
   2223 	}
   2224 
   2225 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2226 	    ether_sprintf(enaddr));
   2227 
   2228 	/*
   2229 	 * Read the config info from the EEPROM, and set up various
   2230 	 * bits in the control registers based on their contents.
   2231 	 */
   2232 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2233 	if (pn != NULL) {
   2234 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2235 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2236 	} else {
   2237 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2238 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2239 			goto out;
   2240 		}
   2241 	}
   2242 
   2243 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2244 	if (pn != NULL) {
   2245 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2246 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2247 	} else {
   2248 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2249 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2250 			goto out;
   2251 		}
   2252 	}
   2253 
   2254 	/* check for WM_F_WOL */
   2255 	switch (sc->sc_type) {
   2256 	case WM_T_82542_2_0:
   2257 	case WM_T_82542_2_1:
   2258 	case WM_T_82543:
   2259 		/* dummy? */
   2260 		eeprom_data = 0;
   2261 		apme_mask = NVM_CFG3_APME;
   2262 		break;
   2263 	case WM_T_82544:
   2264 		apme_mask = NVM_CFG2_82544_APM_EN;
   2265 		eeprom_data = cfg2;
   2266 		break;
   2267 	case WM_T_82546:
   2268 	case WM_T_82546_3:
   2269 	case WM_T_82571:
   2270 	case WM_T_82572:
   2271 	case WM_T_82573:
   2272 	case WM_T_82574:
   2273 	case WM_T_82583:
   2274 	case WM_T_80003:
   2275 	default:
   2276 		apme_mask = NVM_CFG3_APME;
   2277 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2278 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2279 		break;
   2280 	case WM_T_82575:
   2281 	case WM_T_82576:
   2282 	case WM_T_82580:
   2283 	case WM_T_I350:
   2284 	case WM_T_I354: /* XXX ok? */
   2285 	case WM_T_ICH8:
   2286 	case WM_T_ICH9:
   2287 	case WM_T_ICH10:
   2288 	case WM_T_PCH:
   2289 	case WM_T_PCH2:
   2290 	case WM_T_PCH_LPT:
   2291 	case WM_T_PCH_SPT:
   2292 		/* XXX The funcid should be checked on some devices */
   2293 		apme_mask = WUC_APME;
   2294 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2295 		break;
   2296 	}
   2297 
   2298 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2299 	if ((eeprom_data & apme_mask) != 0)
   2300 		sc->sc_flags |= WM_F_WOL;
   2301 #ifdef WM_DEBUG
   2302 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2303 		printf("WOL\n");
   2304 #endif
   2305 
   2306 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2307 		/* Check NVM for autonegotiation */
   2308 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2309 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2310 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2311 		}
   2312 	}
   2313 
   2314 	/*
   2315 	 * XXX need special handling for some multiple port cards
   2316 	 * to disable a paticular port.
   2317 	 */
   2318 
   2319 	if (sc->sc_type >= WM_T_82544) {
   2320 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2321 		if (pn != NULL) {
   2322 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2323 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2324 		} else {
   2325 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2326 				aprint_error_dev(sc->sc_dev,
   2327 				    "unable to read SWDPIN\n");
   2328 				goto out;
   2329 			}
   2330 		}
   2331 	}
   2332 
   2333 	if (cfg1 & NVM_CFG1_ILOS)
   2334 		sc->sc_ctrl |= CTRL_ILOS;
   2335 
   2336 	/*
   2337 	 * XXX
   2338 	 * This code isn't correct because pin 2 and 3 are located
   2339 	 * in different position on newer chips. Check all datasheet.
   2340 	 *
   2341 	 * Until resolve this problem, check if a chip < 82580
   2342 	 */
   2343 	if (sc->sc_type <= WM_T_82580) {
   2344 		if (sc->sc_type >= WM_T_82544) {
   2345 			sc->sc_ctrl |=
   2346 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2347 			    CTRL_SWDPIO_SHIFT;
   2348 			sc->sc_ctrl |=
   2349 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2350 			    CTRL_SWDPINS_SHIFT;
   2351 		} else {
   2352 			sc->sc_ctrl |=
   2353 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2354 			    CTRL_SWDPIO_SHIFT;
   2355 		}
   2356 	}
   2357 
   2358 	/* XXX For other than 82580? */
   2359 	if (sc->sc_type == WM_T_82580) {
   2360 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2361 		if (nvmword & __BIT(13))
   2362 			sc->sc_ctrl |= CTRL_ILOS;
   2363 	}
   2364 
   2365 #if 0
   2366 	if (sc->sc_type >= WM_T_82544) {
   2367 		if (cfg1 & NVM_CFG1_IPS0)
   2368 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2369 		if (cfg1 & NVM_CFG1_IPS1)
   2370 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2371 		sc->sc_ctrl_ext |=
   2372 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2373 		    CTRL_EXT_SWDPIO_SHIFT;
   2374 		sc->sc_ctrl_ext |=
   2375 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2376 		    CTRL_EXT_SWDPINS_SHIFT;
   2377 	} else {
   2378 		sc->sc_ctrl_ext |=
   2379 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2380 		    CTRL_EXT_SWDPIO_SHIFT;
   2381 	}
   2382 #endif
   2383 
   2384 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2385 #if 0
   2386 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2387 #endif
   2388 
   2389 	if (sc->sc_type == WM_T_PCH) {
   2390 		uint16_t val;
   2391 
   2392 		/* Save the NVM K1 bit setting */
   2393 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2394 
   2395 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2396 			sc->sc_nvm_k1_enabled = 1;
   2397 		else
   2398 			sc->sc_nvm_k1_enabled = 0;
   2399 	}
   2400 
   2401 	/*
   2402 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2403 	 * media structures accordingly.
   2404 	 */
   2405 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2406 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2407 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2408 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2409 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2410 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2411 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2412 	} else if (sc->sc_type < WM_T_82543 ||
   2413 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2414 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2415 			aprint_error_dev(sc->sc_dev,
   2416 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2417 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2418 		}
   2419 		wm_tbi_mediainit(sc);
   2420 	} else {
   2421 		switch (sc->sc_type) {
   2422 		case WM_T_82575:
   2423 		case WM_T_82576:
   2424 		case WM_T_82580:
   2425 		case WM_T_I350:
   2426 		case WM_T_I354:
   2427 		case WM_T_I210:
   2428 		case WM_T_I211:
   2429 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2430 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2431 			switch (link_mode) {
   2432 			case CTRL_EXT_LINK_MODE_1000KX:
   2433 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2434 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2435 				break;
   2436 			case CTRL_EXT_LINK_MODE_SGMII:
   2437 				if (wm_sgmii_uses_mdio(sc)) {
   2438 					aprint_verbose_dev(sc->sc_dev,
   2439 					    "SGMII(MDIO)\n");
   2440 					sc->sc_flags |= WM_F_SGMII;
   2441 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2442 					break;
   2443 				}
   2444 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2445 				/*FALLTHROUGH*/
   2446 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2447 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2448 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2449 					if (link_mode
   2450 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2451 						sc->sc_mediatype
   2452 						    = WM_MEDIATYPE_COPPER;
   2453 						sc->sc_flags |= WM_F_SGMII;
   2454 					} else {
   2455 						sc->sc_mediatype
   2456 						    = WM_MEDIATYPE_SERDES;
   2457 						aprint_verbose_dev(sc->sc_dev,
   2458 						    "SERDES\n");
   2459 					}
   2460 					break;
   2461 				}
   2462 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2463 					aprint_verbose_dev(sc->sc_dev,
   2464 					    "SERDES\n");
   2465 
   2466 				/* Change current link mode setting */
   2467 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2468 				switch (sc->sc_mediatype) {
   2469 				case WM_MEDIATYPE_COPPER:
   2470 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2471 					break;
   2472 				case WM_MEDIATYPE_SERDES:
   2473 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2474 					break;
   2475 				default:
   2476 					break;
   2477 				}
   2478 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2479 				break;
   2480 			case CTRL_EXT_LINK_MODE_GMII:
   2481 			default:
   2482 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2483 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2484 				break;
   2485 			}
   2486 
   2487 			reg &= ~CTRL_EXT_I2C_ENA;
   2488 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2489 				reg |= CTRL_EXT_I2C_ENA;
   2490 			else
   2491 				reg &= ~CTRL_EXT_I2C_ENA;
   2492 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2493 
   2494 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2495 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2496 			else
   2497 				wm_tbi_mediainit(sc);
   2498 			break;
   2499 		default:
   2500 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2501 				aprint_error_dev(sc->sc_dev,
   2502 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2503 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2504 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2505 		}
   2506 	}
   2507 
   2508 	ifp = &sc->sc_ethercom.ec_if;
   2509 	xname = device_xname(sc->sc_dev);
   2510 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2511 	ifp->if_softc = sc;
   2512 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2513 	ifp->if_extflags = IFEF_START_MPSAFE;
   2514 	ifp->if_ioctl = wm_ioctl;
   2515 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2516 		ifp->if_start = wm_nq_start;
   2517 		if (sc->sc_nqueues > 1)
   2518 			ifp->if_transmit = wm_nq_transmit;
   2519 	} else {
   2520 		ifp->if_start = wm_start;
   2521 		if (sc->sc_nqueues > 1)
   2522 			ifp->if_transmit = wm_transmit;
   2523 	}
   2524 	ifp->if_watchdog = wm_watchdog;
   2525 	ifp->if_init = wm_init;
   2526 	ifp->if_stop = wm_stop;
   2527 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2528 	IFQ_SET_READY(&ifp->if_snd);
   2529 
   2530 	/* Check for jumbo frame */
   2531 	switch (sc->sc_type) {
   2532 	case WM_T_82573:
   2533 		/* XXX limited to 9234 if ASPM is disabled */
   2534 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2535 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2536 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2537 		break;
   2538 	case WM_T_82571:
   2539 	case WM_T_82572:
   2540 	case WM_T_82574:
   2541 	case WM_T_82575:
   2542 	case WM_T_82576:
   2543 	case WM_T_82580:
   2544 	case WM_T_I350:
   2545 	case WM_T_I354: /* XXXX ok? */
   2546 	case WM_T_I210:
   2547 	case WM_T_I211:
   2548 	case WM_T_80003:
   2549 	case WM_T_ICH9:
   2550 	case WM_T_ICH10:
   2551 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2552 	case WM_T_PCH_LPT:
   2553 	case WM_T_PCH_SPT:
   2554 		/* XXX limited to 9234 */
   2555 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2556 		break;
   2557 	case WM_T_PCH:
   2558 		/* XXX limited to 4096 */
   2559 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2560 		break;
   2561 	case WM_T_82542_2_0:
   2562 	case WM_T_82542_2_1:
   2563 	case WM_T_82583:
   2564 	case WM_T_ICH8:
   2565 		/* No support for jumbo frame */
   2566 		break;
   2567 	default:
   2568 		/* ETHER_MAX_LEN_JUMBO */
   2569 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2570 		break;
   2571 	}
   2572 
   2573 	/* If we're a i82543 or greater, we can support VLANs. */
   2574 	if (sc->sc_type >= WM_T_82543)
   2575 		sc->sc_ethercom.ec_capabilities |=
   2576 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2577 
   2578 	/*
   2579 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2580 	 * on i82543 and later.
   2581 	 */
   2582 	if (sc->sc_type >= WM_T_82543) {
   2583 		ifp->if_capabilities |=
   2584 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2585 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2586 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2587 		    IFCAP_CSUM_TCPv6_Tx |
   2588 		    IFCAP_CSUM_UDPv6_Tx;
   2589 	}
   2590 
   2591 	/*
   2592 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2593 	 *
   2594 	 *	82541GI (8086:1076) ... no
   2595 	 *	82572EI (8086:10b9) ... yes
   2596 	 */
   2597 	if (sc->sc_type >= WM_T_82571) {
   2598 		ifp->if_capabilities |=
   2599 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2600 	}
   2601 
   2602 	/*
   2603 	 * If we're a i82544 or greater (except i82547), we can do
   2604 	 * TCP segmentation offload.
   2605 	 */
   2606 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2607 		ifp->if_capabilities |= IFCAP_TSOv4;
   2608 	}
   2609 
   2610 	if (sc->sc_type >= WM_T_82571) {
   2611 		ifp->if_capabilities |= IFCAP_TSOv6;
   2612 	}
   2613 
   2614 #ifdef WM_MPSAFE
   2615 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2616 #else
   2617 	sc->sc_core_lock = NULL;
   2618 #endif
   2619 
   2620 	/* Attach the interface. */
   2621 	if_initialize(ifp);
   2622 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2623 	ether_ifattach(ifp, enaddr);
   2624 	if_register(ifp);
   2625 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2626 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2627 			  RND_FLAG_DEFAULT);
   2628 
   2629 #ifdef WM_EVENT_COUNTERS
   2630 	/* Attach event counters. */
   2631 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2632 	    NULL, xname, "linkintr");
   2633 
   2634 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2635 	    NULL, xname, "tx_xoff");
   2636 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2637 	    NULL, xname, "tx_xon");
   2638 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2639 	    NULL, xname, "rx_xoff");
   2640 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2641 	    NULL, xname, "rx_xon");
   2642 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2643 	    NULL, xname, "rx_macctl");
   2644 #endif /* WM_EVENT_COUNTERS */
   2645 
   2646 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2647 		pmf_class_network_register(self, ifp);
   2648 	else
   2649 		aprint_error_dev(self, "couldn't establish power handler\n");
   2650 
   2651 	sc->sc_flags |= WM_F_ATTACHED;
   2652  out:
   2653 	return;
   2654 }
   2655 
   2656 /* The detach function (ca_detach) */
   2657 static int
   2658 wm_detach(device_t self, int flags __unused)
   2659 {
   2660 	struct wm_softc *sc = device_private(self);
   2661 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2662 	int i;
   2663 
   2664 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2665 		return 0;
   2666 
   2667 	/* Stop the interface. Callouts are stopped in it. */
   2668 	wm_stop(ifp, 1);
   2669 
   2670 	pmf_device_deregister(self);
   2671 
   2672 	/* Tell the firmware about the release */
   2673 	WM_CORE_LOCK(sc);
   2674 	wm_release_manageability(sc);
   2675 	wm_release_hw_control(sc);
   2676 	wm_enable_wakeup(sc);
   2677 	WM_CORE_UNLOCK(sc);
   2678 
   2679 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2680 
   2681 	/* Delete all remaining media. */
   2682 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2683 
   2684 	ether_ifdetach(ifp);
   2685 	if_detach(ifp);
   2686 	if_percpuq_destroy(sc->sc_ipq);
   2687 
   2688 	/* Unload RX dmamaps and free mbufs */
   2689 	for (i = 0; i < sc->sc_nqueues; i++) {
   2690 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2691 		mutex_enter(rxq->rxq_lock);
   2692 		wm_rxdrain(rxq);
   2693 		mutex_exit(rxq->rxq_lock);
   2694 	}
   2695 	/* Must unlock here */
   2696 
   2697 	/* Disestablish the interrupt handler */
   2698 	for (i = 0; i < sc->sc_nintrs; i++) {
   2699 		if (sc->sc_ihs[i] != NULL) {
   2700 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2701 			sc->sc_ihs[i] = NULL;
   2702 		}
   2703 	}
   2704 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2705 
   2706 	wm_free_txrx_queues(sc);
   2707 
   2708 	/* Unmap the registers */
   2709 	if (sc->sc_ss) {
   2710 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2711 		sc->sc_ss = 0;
   2712 	}
   2713 	if (sc->sc_ios) {
   2714 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2715 		sc->sc_ios = 0;
   2716 	}
   2717 	if (sc->sc_flashs) {
   2718 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2719 		sc->sc_flashs = 0;
   2720 	}
   2721 
   2722 	if (sc->sc_core_lock)
   2723 		mutex_obj_free(sc->sc_core_lock);
   2724 	if (sc->sc_ich_phymtx)
   2725 		mutex_obj_free(sc->sc_ich_phymtx);
   2726 	if (sc->sc_ich_nvmmtx)
   2727 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2728 
   2729 	return 0;
   2730 }
   2731 
   2732 static bool
   2733 wm_suspend(device_t self, const pmf_qual_t *qual)
   2734 {
   2735 	struct wm_softc *sc = device_private(self);
   2736 
   2737 	wm_release_manageability(sc);
   2738 	wm_release_hw_control(sc);
   2739 	wm_enable_wakeup(sc);
   2740 
   2741 	return true;
   2742 }
   2743 
   2744 static bool
   2745 wm_resume(device_t self, const pmf_qual_t *qual)
   2746 {
   2747 	struct wm_softc *sc = device_private(self);
   2748 
   2749 	wm_init_manageability(sc);
   2750 
   2751 	return true;
   2752 }
   2753 
   2754 /*
   2755  * wm_watchdog:		[ifnet interface function]
   2756  *
   2757  *	Watchdog timer handler.
   2758  */
   2759 static void
   2760 wm_watchdog(struct ifnet *ifp)
   2761 {
   2762 	int qid;
   2763 	struct wm_softc *sc = ifp->if_softc;
   2764 
   2765 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2766 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2767 
   2768 		wm_watchdog_txq(ifp, txq);
   2769 	}
   2770 
   2771 	/* Reset the interface. */
   2772 	(void) wm_init(ifp);
   2773 
   2774 	/*
   2775 	 * There are still some upper layer processing which call
   2776 	 * ifp->if_start(). e.g. ALTQ
   2777 	 */
   2778 	/* Try to get more packets going. */
   2779 	ifp->if_start(ifp);
   2780 }
   2781 
   2782 static void
   2783 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2784 {
   2785 	struct wm_softc *sc = ifp->if_softc;
   2786 
   2787 	/*
   2788 	 * Since we're using delayed interrupts, sweep up
   2789 	 * before we report an error.
   2790 	 */
   2791 	mutex_enter(txq->txq_lock);
   2792 	wm_txeof(sc, txq);
   2793 	mutex_exit(txq->txq_lock);
   2794 
   2795 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2796 #ifdef WM_DEBUG
   2797 		int i, j;
   2798 		struct wm_txsoft *txs;
   2799 #endif
   2800 		log(LOG_ERR,
   2801 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2802 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2803 		    txq->txq_next);
   2804 		ifp->if_oerrors++;
   2805 #ifdef WM_DEBUG
   2806 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2807 		    i = WM_NEXTTXS(txq, i)) {
   2808 		    txs = &txq->txq_soft[i];
   2809 		    printf("txs %d tx %d -> %d\n",
   2810 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2811 		    for (j = txs->txs_firstdesc; ;
   2812 			j = WM_NEXTTX(txq, j)) {
   2813 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2814 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2815 			printf("\t %#08x%08x\n",
   2816 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2817 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2818 			if (j == txs->txs_lastdesc)
   2819 				break;
   2820 			}
   2821 		}
   2822 #endif
   2823 	}
   2824 }
   2825 
   2826 /*
   2827  * wm_tick:
   2828  *
   2829  *	One second timer, used to check link status, sweep up
   2830  *	completed transmit jobs, etc.
   2831  */
   2832 static void
   2833 wm_tick(void *arg)
   2834 {
   2835 	struct wm_softc *sc = arg;
   2836 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2837 #ifndef WM_MPSAFE
   2838 	int s = splnet();
   2839 #endif
   2840 
   2841 	WM_CORE_LOCK(sc);
   2842 
   2843 	if (sc->sc_core_stopping)
   2844 		goto out;
   2845 
   2846 	if (sc->sc_type >= WM_T_82542_2_1) {
   2847 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2848 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2849 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2850 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2851 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2852 	}
   2853 
   2854 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2855 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2856 	    + CSR_READ(sc, WMREG_CRCERRS)
   2857 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2858 	    + CSR_READ(sc, WMREG_SYMERRC)
   2859 	    + CSR_READ(sc, WMREG_RXERRC)
   2860 	    + CSR_READ(sc, WMREG_SEC)
   2861 	    + CSR_READ(sc, WMREG_CEXTERR)
   2862 	    + CSR_READ(sc, WMREG_RLEC);
   2863 	/*
   2864 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2865 	 * memory. It does not mean the number of dropped packet. Because
   2866 	 * ethernet controller can receive packets in such case if there is
   2867 	 * space in phy's FIFO.
   2868 	 *
   2869 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2870 	 * own EVCNT instead of if_iqdrops.
   2871 	 */
   2872 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2873 
   2874 	if (sc->sc_flags & WM_F_HAS_MII)
   2875 		mii_tick(&sc->sc_mii);
   2876 	else if ((sc->sc_type >= WM_T_82575)
   2877 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2878 		wm_serdes_tick(sc);
   2879 	else
   2880 		wm_tbi_tick(sc);
   2881 
   2882 out:
   2883 	WM_CORE_UNLOCK(sc);
   2884 #ifndef WM_MPSAFE
   2885 	splx(s);
   2886 #endif
   2887 
   2888 	if (!sc->sc_core_stopping)
   2889 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2890 }
   2891 
   2892 static int
   2893 wm_ifflags_cb(struct ethercom *ec)
   2894 {
   2895 	struct ifnet *ifp = &ec->ec_if;
   2896 	struct wm_softc *sc = ifp->if_softc;
   2897 	int rc = 0;
   2898 
   2899 	WM_CORE_LOCK(sc);
   2900 
   2901 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2902 	sc->sc_if_flags = ifp->if_flags;
   2903 
   2904 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2905 		rc = ENETRESET;
   2906 		goto out;
   2907 	}
   2908 
   2909 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2910 		wm_set_filter(sc);
   2911 
   2912 	wm_set_vlan(sc);
   2913 
   2914 out:
   2915 	WM_CORE_UNLOCK(sc);
   2916 
   2917 	return rc;
   2918 }
   2919 
   2920 /*
   2921  * wm_ioctl:		[ifnet interface function]
   2922  *
   2923  *	Handle control requests from the operator.
   2924  */
   2925 static int
   2926 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2927 {
   2928 	struct wm_softc *sc = ifp->if_softc;
   2929 	struct ifreq *ifr = (struct ifreq *) data;
   2930 	struct ifaddr *ifa = (struct ifaddr *)data;
   2931 	struct sockaddr_dl *sdl;
   2932 	int s, error;
   2933 
   2934 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2935 		device_xname(sc->sc_dev), __func__));
   2936 
   2937 #ifndef WM_MPSAFE
   2938 	s = splnet();
   2939 #endif
   2940 	switch (cmd) {
   2941 	case SIOCSIFMEDIA:
   2942 	case SIOCGIFMEDIA:
   2943 		WM_CORE_LOCK(sc);
   2944 		/* Flow control requires full-duplex mode. */
   2945 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2946 		    (ifr->ifr_media & IFM_FDX) == 0)
   2947 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2948 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2949 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2950 				/* We can do both TXPAUSE and RXPAUSE. */
   2951 				ifr->ifr_media |=
   2952 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2953 			}
   2954 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2955 		}
   2956 		WM_CORE_UNLOCK(sc);
   2957 #ifdef WM_MPSAFE
   2958 		s = splnet();
   2959 #endif
   2960 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2961 #ifdef WM_MPSAFE
   2962 		splx(s);
   2963 #endif
   2964 		break;
   2965 	case SIOCINITIFADDR:
   2966 		WM_CORE_LOCK(sc);
   2967 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2968 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2969 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2970 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2971 			/* unicast address is first multicast entry */
   2972 			wm_set_filter(sc);
   2973 			error = 0;
   2974 			WM_CORE_UNLOCK(sc);
   2975 			break;
   2976 		}
   2977 		WM_CORE_UNLOCK(sc);
   2978 		/*FALLTHROUGH*/
   2979 	default:
   2980 #ifdef WM_MPSAFE
   2981 		s = splnet();
   2982 #endif
   2983 		/* It may call wm_start, so unlock here */
   2984 		error = ether_ioctl(ifp, cmd, data);
   2985 #ifdef WM_MPSAFE
   2986 		splx(s);
   2987 #endif
   2988 		if (error != ENETRESET)
   2989 			break;
   2990 
   2991 		error = 0;
   2992 
   2993 		if (cmd == SIOCSIFCAP) {
   2994 			error = (*ifp->if_init)(ifp);
   2995 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2996 			;
   2997 		else if (ifp->if_flags & IFF_RUNNING) {
   2998 			/*
   2999 			 * Multicast list has changed; set the hardware filter
   3000 			 * accordingly.
   3001 			 */
   3002 			WM_CORE_LOCK(sc);
   3003 			wm_set_filter(sc);
   3004 			WM_CORE_UNLOCK(sc);
   3005 		}
   3006 		break;
   3007 	}
   3008 
   3009 #ifndef WM_MPSAFE
   3010 	splx(s);
   3011 #endif
   3012 	return error;
   3013 }
   3014 
   3015 /* MAC address related */
   3016 
   3017 /*
   3018  * Get the offset of MAC address and return it.
   3019  * If error occured, use offset 0.
   3020  */
   3021 static uint16_t
   3022 wm_check_alt_mac_addr(struct wm_softc *sc)
   3023 {
   3024 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3025 	uint16_t offset = NVM_OFF_MACADDR;
   3026 
   3027 	/* Try to read alternative MAC address pointer */
   3028 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3029 		return 0;
   3030 
   3031 	/* Check pointer if it's valid or not. */
   3032 	if ((offset == 0x0000) || (offset == 0xffff))
   3033 		return 0;
   3034 
   3035 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3036 	/*
   3037 	 * Check whether alternative MAC address is valid or not.
   3038 	 * Some cards have non 0xffff pointer but those don't use
   3039 	 * alternative MAC address in reality.
   3040 	 *
   3041 	 * Check whether the broadcast bit is set or not.
   3042 	 */
   3043 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3044 		if (((myea[0] & 0xff) & 0x01) == 0)
   3045 			return offset; /* Found */
   3046 
   3047 	/* Not found */
   3048 	return 0;
   3049 }
   3050 
   3051 static int
   3052 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3053 {
   3054 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3055 	uint16_t offset = NVM_OFF_MACADDR;
   3056 	int do_invert = 0;
   3057 
   3058 	switch (sc->sc_type) {
   3059 	case WM_T_82580:
   3060 	case WM_T_I350:
   3061 	case WM_T_I354:
   3062 		/* EEPROM Top Level Partitioning */
   3063 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3064 		break;
   3065 	case WM_T_82571:
   3066 	case WM_T_82575:
   3067 	case WM_T_82576:
   3068 	case WM_T_80003:
   3069 	case WM_T_I210:
   3070 	case WM_T_I211:
   3071 		offset = wm_check_alt_mac_addr(sc);
   3072 		if (offset == 0)
   3073 			if ((sc->sc_funcid & 0x01) == 1)
   3074 				do_invert = 1;
   3075 		break;
   3076 	default:
   3077 		if ((sc->sc_funcid & 0x01) == 1)
   3078 			do_invert = 1;
   3079 		break;
   3080 	}
   3081 
   3082 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3083 		goto bad;
   3084 
   3085 	enaddr[0] = myea[0] & 0xff;
   3086 	enaddr[1] = myea[0] >> 8;
   3087 	enaddr[2] = myea[1] & 0xff;
   3088 	enaddr[3] = myea[1] >> 8;
   3089 	enaddr[4] = myea[2] & 0xff;
   3090 	enaddr[5] = myea[2] >> 8;
   3091 
   3092 	/*
   3093 	 * Toggle the LSB of the MAC address on the second port
   3094 	 * of some dual port cards.
   3095 	 */
   3096 	if (do_invert != 0)
   3097 		enaddr[5] ^= 1;
   3098 
   3099 	return 0;
   3100 
   3101  bad:
   3102 	return -1;
   3103 }
   3104 
   3105 /*
   3106  * wm_set_ral:
   3107  *
   3108  *	Set an entery in the receive address list.
   3109  */
   3110 static void
   3111 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3112 {
   3113 	uint32_t ral_lo, ral_hi;
   3114 
   3115 	if (enaddr != NULL) {
   3116 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3117 		    (enaddr[3] << 24);
   3118 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3119 		ral_hi |= RAL_AV;
   3120 	} else {
   3121 		ral_lo = 0;
   3122 		ral_hi = 0;
   3123 	}
   3124 
   3125 	if (sc->sc_type >= WM_T_82544) {
   3126 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3127 		    ral_lo);
   3128 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3129 		    ral_hi);
   3130 	} else {
   3131 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3132 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3133 	}
   3134 }
   3135 
   3136 /*
   3137  * wm_mchash:
   3138  *
   3139  *	Compute the hash of the multicast address for the 4096-bit
   3140  *	multicast filter.
   3141  */
   3142 static uint32_t
   3143 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3144 {
   3145 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3146 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3147 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3148 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3149 	uint32_t hash;
   3150 
   3151 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3152 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3153 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3154 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3155 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3156 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3157 		return (hash & 0x3ff);
   3158 	}
   3159 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3160 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3161 
   3162 	return (hash & 0xfff);
   3163 }
   3164 
   3165 /*
   3166  * wm_set_filter:
   3167  *
   3168  *	Set up the receive filter.
   3169  */
   3170 static void
   3171 wm_set_filter(struct wm_softc *sc)
   3172 {
   3173 	struct ethercom *ec = &sc->sc_ethercom;
   3174 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3175 	struct ether_multi *enm;
   3176 	struct ether_multistep step;
   3177 	bus_addr_t mta_reg;
   3178 	uint32_t hash, reg, bit;
   3179 	int i, size, ralmax;
   3180 
   3181 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3182 		device_xname(sc->sc_dev), __func__));
   3183 
   3184 	if (sc->sc_type >= WM_T_82544)
   3185 		mta_reg = WMREG_CORDOVA_MTA;
   3186 	else
   3187 		mta_reg = WMREG_MTA;
   3188 
   3189 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3190 
   3191 	if (ifp->if_flags & IFF_BROADCAST)
   3192 		sc->sc_rctl |= RCTL_BAM;
   3193 	if (ifp->if_flags & IFF_PROMISC) {
   3194 		sc->sc_rctl |= RCTL_UPE;
   3195 		goto allmulti;
   3196 	}
   3197 
   3198 	/*
   3199 	 * Set the station address in the first RAL slot, and
   3200 	 * clear the remaining slots.
   3201 	 */
   3202 	if (sc->sc_type == WM_T_ICH8)
   3203 		size = WM_RAL_TABSIZE_ICH8 -1;
   3204 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3205 	    || (sc->sc_type == WM_T_PCH))
   3206 		size = WM_RAL_TABSIZE_ICH8;
   3207 	else if (sc->sc_type == WM_T_PCH2)
   3208 		size = WM_RAL_TABSIZE_PCH2;
   3209 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3210 		size = WM_RAL_TABSIZE_PCH_LPT;
   3211 	else if (sc->sc_type == WM_T_82575)
   3212 		size = WM_RAL_TABSIZE_82575;
   3213 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3214 		size = WM_RAL_TABSIZE_82576;
   3215 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3216 		size = WM_RAL_TABSIZE_I350;
   3217 	else
   3218 		size = WM_RAL_TABSIZE;
   3219 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3220 
   3221 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3222 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3223 		switch (i) {
   3224 		case 0:
   3225 			/* We can use all entries */
   3226 			ralmax = size;
   3227 			break;
   3228 		case 1:
   3229 			/* Only RAR[0] */
   3230 			ralmax = 1;
   3231 			break;
   3232 		default:
   3233 			/* available SHRA + RAR[0] */
   3234 			ralmax = i + 1;
   3235 		}
   3236 	} else
   3237 		ralmax = size;
   3238 	for (i = 1; i < size; i++) {
   3239 		if (i < ralmax)
   3240 			wm_set_ral(sc, NULL, i);
   3241 	}
   3242 
   3243 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3244 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3245 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3246 	    || (sc->sc_type == WM_T_PCH_SPT))
   3247 		size = WM_ICH8_MC_TABSIZE;
   3248 	else
   3249 		size = WM_MC_TABSIZE;
   3250 	/* Clear out the multicast table. */
   3251 	for (i = 0; i < size; i++)
   3252 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3253 
   3254 	ETHER_FIRST_MULTI(step, ec, enm);
   3255 	while (enm != NULL) {
   3256 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3257 			/*
   3258 			 * We must listen to a range of multicast addresses.
   3259 			 * For now, just accept all multicasts, rather than
   3260 			 * trying to set only those filter bits needed to match
   3261 			 * the range.  (At this time, the only use of address
   3262 			 * ranges is for IP multicast routing, for which the
   3263 			 * range is big enough to require all bits set.)
   3264 			 */
   3265 			goto allmulti;
   3266 		}
   3267 
   3268 		hash = wm_mchash(sc, enm->enm_addrlo);
   3269 
   3270 		reg = (hash >> 5);
   3271 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3272 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3273 		    || (sc->sc_type == WM_T_PCH2)
   3274 		    || (sc->sc_type == WM_T_PCH_LPT)
   3275 		    || (sc->sc_type == WM_T_PCH_SPT))
   3276 			reg &= 0x1f;
   3277 		else
   3278 			reg &= 0x7f;
   3279 		bit = hash & 0x1f;
   3280 
   3281 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3282 		hash |= 1U << bit;
   3283 
   3284 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3285 			/*
   3286 			 * 82544 Errata 9: Certain register cannot be written
   3287 			 * with particular alignments in PCI-X bus operation
   3288 			 * (FCAH, MTA and VFTA).
   3289 			 */
   3290 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3291 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3292 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3293 		} else
   3294 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3295 
   3296 		ETHER_NEXT_MULTI(step, enm);
   3297 	}
   3298 
   3299 	ifp->if_flags &= ~IFF_ALLMULTI;
   3300 	goto setit;
   3301 
   3302  allmulti:
   3303 	ifp->if_flags |= IFF_ALLMULTI;
   3304 	sc->sc_rctl |= RCTL_MPE;
   3305 
   3306  setit:
   3307 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3308 }
   3309 
   3310 /* Reset and init related */
   3311 
   3312 static void
   3313 wm_set_vlan(struct wm_softc *sc)
   3314 {
   3315 
   3316 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3317 		device_xname(sc->sc_dev), __func__));
   3318 
   3319 	/* Deal with VLAN enables. */
   3320 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3321 		sc->sc_ctrl |= CTRL_VME;
   3322 	else
   3323 		sc->sc_ctrl &= ~CTRL_VME;
   3324 
   3325 	/* Write the control registers. */
   3326 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3327 }
   3328 
   3329 static void
   3330 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3331 {
   3332 	uint32_t gcr;
   3333 	pcireg_t ctrl2;
   3334 
   3335 	gcr = CSR_READ(sc, WMREG_GCR);
   3336 
   3337 	/* Only take action if timeout value is defaulted to 0 */
   3338 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3339 		goto out;
   3340 
   3341 	if ((gcr & GCR_CAP_VER2) == 0) {
   3342 		gcr |= GCR_CMPL_TMOUT_10MS;
   3343 		goto out;
   3344 	}
   3345 
   3346 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3347 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3348 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3349 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3350 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3351 
   3352 out:
   3353 	/* Disable completion timeout resend */
   3354 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3355 
   3356 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3357 }
   3358 
   3359 void
   3360 wm_get_auto_rd_done(struct wm_softc *sc)
   3361 {
   3362 	int i;
   3363 
   3364 	/* wait for eeprom to reload */
   3365 	switch (sc->sc_type) {
   3366 	case WM_T_82571:
   3367 	case WM_T_82572:
   3368 	case WM_T_82573:
   3369 	case WM_T_82574:
   3370 	case WM_T_82583:
   3371 	case WM_T_82575:
   3372 	case WM_T_82576:
   3373 	case WM_T_82580:
   3374 	case WM_T_I350:
   3375 	case WM_T_I354:
   3376 	case WM_T_I210:
   3377 	case WM_T_I211:
   3378 	case WM_T_80003:
   3379 	case WM_T_ICH8:
   3380 	case WM_T_ICH9:
   3381 		for (i = 0; i < 10; i++) {
   3382 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3383 				break;
   3384 			delay(1000);
   3385 		}
   3386 		if (i == 10) {
   3387 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3388 			    "complete\n", device_xname(sc->sc_dev));
   3389 		}
   3390 		break;
   3391 	default:
   3392 		break;
   3393 	}
   3394 }
   3395 
   3396 void
   3397 wm_lan_init_done(struct wm_softc *sc)
   3398 {
   3399 	uint32_t reg = 0;
   3400 	int i;
   3401 
   3402 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3403 		device_xname(sc->sc_dev), __func__));
   3404 
   3405 	/* Wait for eeprom to reload */
   3406 	switch (sc->sc_type) {
   3407 	case WM_T_ICH10:
   3408 	case WM_T_PCH:
   3409 	case WM_T_PCH2:
   3410 	case WM_T_PCH_LPT:
   3411 	case WM_T_PCH_SPT:
   3412 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3413 			reg = CSR_READ(sc, WMREG_STATUS);
   3414 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3415 				break;
   3416 			delay(100);
   3417 		}
   3418 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3419 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3420 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3421 		}
   3422 		break;
   3423 	default:
   3424 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3425 		    __func__);
   3426 		break;
   3427 	}
   3428 
   3429 	reg &= ~STATUS_LAN_INIT_DONE;
   3430 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3431 }
   3432 
   3433 void
   3434 wm_get_cfg_done(struct wm_softc *sc)
   3435 {
   3436 	int mask;
   3437 	uint32_t reg;
   3438 	int i;
   3439 
   3440 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3441 		device_xname(sc->sc_dev), __func__));
   3442 
   3443 	/* Wait for eeprom to reload */
   3444 	switch (sc->sc_type) {
   3445 	case WM_T_82542_2_0:
   3446 	case WM_T_82542_2_1:
   3447 		/* null */
   3448 		break;
   3449 	case WM_T_82543:
   3450 	case WM_T_82544:
   3451 	case WM_T_82540:
   3452 	case WM_T_82545:
   3453 	case WM_T_82545_3:
   3454 	case WM_T_82546:
   3455 	case WM_T_82546_3:
   3456 	case WM_T_82541:
   3457 	case WM_T_82541_2:
   3458 	case WM_T_82547:
   3459 	case WM_T_82547_2:
   3460 	case WM_T_82573:
   3461 	case WM_T_82574:
   3462 	case WM_T_82583:
   3463 		/* generic */
   3464 		delay(10*1000);
   3465 		break;
   3466 	case WM_T_80003:
   3467 	case WM_T_82571:
   3468 	case WM_T_82572:
   3469 	case WM_T_82575:
   3470 	case WM_T_82576:
   3471 	case WM_T_82580:
   3472 	case WM_T_I350:
   3473 	case WM_T_I354:
   3474 	case WM_T_I210:
   3475 	case WM_T_I211:
   3476 		if (sc->sc_type == WM_T_82571) {
   3477 			/* Only 82571 shares port 0 */
   3478 			mask = EEMNGCTL_CFGDONE_0;
   3479 		} else
   3480 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3481 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3482 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3483 				break;
   3484 			delay(1000);
   3485 		}
   3486 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3487 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3488 				device_xname(sc->sc_dev), __func__));
   3489 		}
   3490 		break;
   3491 	case WM_T_ICH8:
   3492 	case WM_T_ICH9:
   3493 	case WM_T_ICH10:
   3494 	case WM_T_PCH:
   3495 	case WM_T_PCH2:
   3496 	case WM_T_PCH_LPT:
   3497 	case WM_T_PCH_SPT:
   3498 		delay(10*1000);
   3499 		if (sc->sc_type >= WM_T_ICH10)
   3500 			wm_lan_init_done(sc);
   3501 		else
   3502 			wm_get_auto_rd_done(sc);
   3503 
   3504 		reg = CSR_READ(sc, WMREG_STATUS);
   3505 		if ((reg & STATUS_PHYRA) != 0)
   3506 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3507 		break;
   3508 	default:
   3509 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3510 		    __func__);
   3511 		break;
   3512 	}
   3513 }
   3514 
   3515 /* Init hardware bits */
   3516 void
   3517 wm_initialize_hardware_bits(struct wm_softc *sc)
   3518 {
   3519 	uint32_t tarc0, tarc1, reg;
   3520 
   3521 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3522 		device_xname(sc->sc_dev), __func__));
   3523 
   3524 	/* For 82571 variant, 80003 and ICHs */
   3525 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3526 	    || (sc->sc_type >= WM_T_80003)) {
   3527 
   3528 		/* Transmit Descriptor Control 0 */
   3529 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3530 		reg |= TXDCTL_COUNT_DESC;
   3531 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3532 
   3533 		/* Transmit Descriptor Control 1 */
   3534 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3535 		reg |= TXDCTL_COUNT_DESC;
   3536 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3537 
   3538 		/* TARC0 */
   3539 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3540 		switch (sc->sc_type) {
   3541 		case WM_T_82571:
   3542 		case WM_T_82572:
   3543 		case WM_T_82573:
   3544 		case WM_T_82574:
   3545 		case WM_T_82583:
   3546 		case WM_T_80003:
   3547 			/* Clear bits 30..27 */
   3548 			tarc0 &= ~__BITS(30, 27);
   3549 			break;
   3550 		default:
   3551 			break;
   3552 		}
   3553 
   3554 		switch (sc->sc_type) {
   3555 		case WM_T_82571:
   3556 		case WM_T_82572:
   3557 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3558 
   3559 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3560 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3561 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3562 			/* 8257[12] Errata No.7 */
   3563 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3564 
   3565 			/* TARC1 bit 28 */
   3566 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3567 				tarc1 &= ~__BIT(28);
   3568 			else
   3569 				tarc1 |= __BIT(28);
   3570 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3571 
   3572 			/*
   3573 			 * 8257[12] Errata No.13
   3574 			 * Disable Dyamic Clock Gating.
   3575 			 */
   3576 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3577 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3578 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3579 			break;
   3580 		case WM_T_82573:
   3581 		case WM_T_82574:
   3582 		case WM_T_82583:
   3583 			if ((sc->sc_type == WM_T_82574)
   3584 			    || (sc->sc_type == WM_T_82583))
   3585 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3586 
   3587 			/* Extended Device Control */
   3588 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3589 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3590 			reg |= __BIT(22);	/* Set bit 22 */
   3591 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3592 
   3593 			/* Device Control */
   3594 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3595 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3596 
   3597 			/* PCIe Control Register */
   3598 			/*
   3599 			 * 82573 Errata (unknown).
   3600 			 *
   3601 			 * 82574 Errata 25 and 82583 Errata 12
   3602 			 * "Dropped Rx Packets":
   3603 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3604 			 */
   3605 			reg = CSR_READ(sc, WMREG_GCR);
   3606 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3607 			CSR_WRITE(sc, WMREG_GCR, reg);
   3608 
   3609 			if ((sc->sc_type == WM_T_82574)
   3610 			    || (sc->sc_type == WM_T_82583)) {
   3611 				/*
   3612 				 * Document says this bit must be set for
   3613 				 * proper operation.
   3614 				 */
   3615 				reg = CSR_READ(sc, WMREG_GCR);
   3616 				reg |= __BIT(22);
   3617 				CSR_WRITE(sc, WMREG_GCR, reg);
   3618 
   3619 				/*
   3620 				 * Apply workaround for hardware errata
   3621 				 * documented in errata docs Fixes issue where
   3622 				 * some error prone or unreliable PCIe
   3623 				 * completions are occurring, particularly
   3624 				 * with ASPM enabled. Without fix, issue can
   3625 				 * cause Tx timeouts.
   3626 				 */
   3627 				reg = CSR_READ(sc, WMREG_GCR2);
   3628 				reg |= __BIT(0);
   3629 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3630 			}
   3631 			break;
   3632 		case WM_T_80003:
   3633 			/* TARC0 */
   3634 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3635 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3636 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3637 
   3638 			/* TARC1 bit 28 */
   3639 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3640 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3641 				tarc1 &= ~__BIT(28);
   3642 			else
   3643 				tarc1 |= __BIT(28);
   3644 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3645 			break;
   3646 		case WM_T_ICH8:
   3647 		case WM_T_ICH9:
   3648 		case WM_T_ICH10:
   3649 		case WM_T_PCH:
   3650 		case WM_T_PCH2:
   3651 		case WM_T_PCH_LPT:
   3652 		case WM_T_PCH_SPT:
   3653 			/* TARC0 */
   3654 			if ((sc->sc_type == WM_T_ICH8)
   3655 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3656 				/* Set TARC0 bits 29 and 28 */
   3657 				tarc0 |= __BITS(29, 28);
   3658 			}
   3659 			/* Set TARC0 bits 23,24,26,27 */
   3660 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3661 
   3662 			/* CTRL_EXT */
   3663 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3664 			reg |= __BIT(22);	/* Set bit 22 */
   3665 			/*
   3666 			 * Enable PHY low-power state when MAC is at D3
   3667 			 * w/o WoL
   3668 			 */
   3669 			if (sc->sc_type >= WM_T_PCH)
   3670 				reg |= CTRL_EXT_PHYPDEN;
   3671 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3672 
   3673 			/* TARC1 */
   3674 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3675 			/* bit 28 */
   3676 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3677 				tarc1 &= ~__BIT(28);
   3678 			else
   3679 				tarc1 |= __BIT(28);
   3680 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3681 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3682 
   3683 			/* Device Status */
   3684 			if (sc->sc_type == WM_T_ICH8) {
   3685 				reg = CSR_READ(sc, WMREG_STATUS);
   3686 				reg &= ~__BIT(31);
   3687 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3688 
   3689 			}
   3690 
   3691 			/* IOSFPC */
   3692 			if (sc->sc_type == WM_T_PCH_SPT) {
   3693 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3694 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3695 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3696 			}
   3697 			/*
   3698 			 * Work-around descriptor data corruption issue during
   3699 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3700 			 * capability.
   3701 			 */
   3702 			reg = CSR_READ(sc, WMREG_RFCTL);
   3703 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3704 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3705 			break;
   3706 		default:
   3707 			break;
   3708 		}
   3709 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3710 
   3711 		/*
   3712 		 * 8257[12] Errata No.52 and some others.
   3713 		 * Avoid RSS Hash Value bug.
   3714 		 */
   3715 		switch (sc->sc_type) {
   3716 		case WM_T_82571:
   3717 		case WM_T_82572:
   3718 		case WM_T_82573:
   3719 		case WM_T_80003:
   3720 		case WM_T_ICH8:
   3721 			reg = CSR_READ(sc, WMREG_RFCTL);
   3722 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3723 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3724 			break;
   3725 		default:
   3726 			break;
   3727 		}
   3728 	}
   3729 }
   3730 
   3731 static uint32_t
   3732 wm_rxpbs_adjust_82580(uint32_t val)
   3733 {
   3734 	uint32_t rv = 0;
   3735 
   3736 	if (val < __arraycount(wm_82580_rxpbs_table))
   3737 		rv = wm_82580_rxpbs_table[val];
   3738 
   3739 	return rv;
   3740 }
   3741 
   3742 /*
   3743  * wm_reset_phy:
   3744  *
   3745  *	generic PHY reset function.
   3746  *	Same as e1000_phy_hw_reset_generic()
   3747  */
   3748 static void
   3749 wm_reset_phy(struct wm_softc *sc)
   3750 {
   3751 	uint32_t reg;
   3752 
   3753 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3754 		device_xname(sc->sc_dev), __func__));
   3755 	if (wm_phy_resetisblocked(sc))
   3756 		return;
   3757 
   3758 	sc->phy.acquire(sc);
   3759 
   3760 	reg = CSR_READ(sc, WMREG_CTRL);
   3761 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3762 	CSR_WRITE_FLUSH(sc);
   3763 
   3764 	delay(sc->phy.reset_delay_us);
   3765 
   3766 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3767 	CSR_WRITE_FLUSH(sc);
   3768 
   3769 	delay(150);
   3770 
   3771 	sc->phy.release(sc);
   3772 
   3773 	wm_get_cfg_done(sc);
   3774 }
   3775 
   3776 static void
   3777 wm_flush_desc_rings(struct wm_softc *sc)
   3778 {
   3779 	pcireg_t preg;
   3780 	uint32_t reg;
   3781 	int nexttx;
   3782 
   3783 	/* First, disable MULR fix in FEXTNVM11 */
   3784 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3785 	reg |= FEXTNVM11_DIS_MULRFIX;
   3786 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3787 
   3788 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3789 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3790 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3791 		struct wm_txqueue *txq;
   3792 		wiseman_txdesc_t *txd;
   3793 
   3794 		/* TX */
   3795 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3796 		    device_xname(sc->sc_dev), preg, reg);
   3797 		reg = CSR_READ(sc, WMREG_TCTL);
   3798 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3799 
   3800 		txq = &sc->sc_queue[0].wmq_txq;
   3801 		nexttx = txq->txq_next;
   3802 		txd = &txq->txq_descs[nexttx];
   3803 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3804 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3805 		txd->wtx_fields.wtxu_status = 0;
   3806 		txd->wtx_fields.wtxu_options = 0;
   3807 		txd->wtx_fields.wtxu_vlan = 0;
   3808 
   3809 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3810 			BUS_SPACE_BARRIER_WRITE);
   3811 
   3812 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3813 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3814 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3815 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3816 		delay(250);
   3817 	}
   3818 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3819 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3820 		uint32_t rctl;
   3821 
   3822 		/* RX */
   3823 		printf("%s: Need RX flush (reg = %08x)\n",
   3824 		    device_xname(sc->sc_dev), preg);
   3825 		rctl = CSR_READ(sc, WMREG_RCTL);
   3826 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3827 		CSR_WRITE_FLUSH(sc);
   3828 		delay(150);
   3829 
   3830 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3831 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3832 		reg &= 0xffffc000;
   3833 		/*
   3834 		 * update thresholds: prefetch threshold to 31, host threshold
   3835 		 * to 1 and make sure the granularity is "descriptors" and not
   3836 		 * "cache lines"
   3837 		 */
   3838 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   3839 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   3840 
   3841 		/*
   3842 		 * momentarily enable the RX ring for the changes to take
   3843 		 * effect
   3844 		 */
   3845 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   3846 		CSR_WRITE_FLUSH(sc);
   3847 		delay(150);
   3848 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3849 	}
   3850 }
   3851 
   3852 /*
   3853  * wm_reset:
   3854  *
   3855  *	Reset the i82542 chip.
   3856  */
   3857 static void
   3858 wm_reset(struct wm_softc *sc)
   3859 {
   3860 	int phy_reset = 0;
   3861 	int i, error = 0;
   3862 	uint32_t reg;
   3863 
   3864 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3865 		device_xname(sc->sc_dev), __func__));
   3866 	KASSERT(sc->sc_type != 0);
   3867 
   3868 	/*
   3869 	 * Allocate on-chip memory according to the MTU size.
   3870 	 * The Packet Buffer Allocation register must be written
   3871 	 * before the chip is reset.
   3872 	 */
   3873 	switch (sc->sc_type) {
   3874 	case WM_T_82547:
   3875 	case WM_T_82547_2:
   3876 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3877 		    PBA_22K : PBA_30K;
   3878 		for (i = 0; i < sc->sc_nqueues; i++) {
   3879 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3880 			txq->txq_fifo_head = 0;
   3881 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3882 			txq->txq_fifo_size =
   3883 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3884 			txq->txq_fifo_stall = 0;
   3885 		}
   3886 		break;
   3887 	case WM_T_82571:
   3888 	case WM_T_82572:
   3889 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3890 	case WM_T_80003:
   3891 		sc->sc_pba = PBA_32K;
   3892 		break;
   3893 	case WM_T_82573:
   3894 		sc->sc_pba = PBA_12K;
   3895 		break;
   3896 	case WM_T_82574:
   3897 	case WM_T_82583:
   3898 		sc->sc_pba = PBA_20K;
   3899 		break;
   3900 	case WM_T_82576:
   3901 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3902 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3903 		break;
   3904 	case WM_T_82580:
   3905 	case WM_T_I350:
   3906 	case WM_T_I354:
   3907 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3908 		break;
   3909 	case WM_T_I210:
   3910 	case WM_T_I211:
   3911 		sc->sc_pba = PBA_34K;
   3912 		break;
   3913 	case WM_T_ICH8:
   3914 		/* Workaround for a bit corruption issue in FIFO memory */
   3915 		sc->sc_pba = PBA_8K;
   3916 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3917 		break;
   3918 	case WM_T_ICH9:
   3919 	case WM_T_ICH10:
   3920 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3921 		    PBA_14K : PBA_10K;
   3922 		break;
   3923 	case WM_T_PCH:
   3924 	case WM_T_PCH2:
   3925 	case WM_T_PCH_LPT:
   3926 	case WM_T_PCH_SPT:
   3927 		sc->sc_pba = PBA_26K;
   3928 		break;
   3929 	default:
   3930 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3931 		    PBA_40K : PBA_48K;
   3932 		break;
   3933 	}
   3934 	/*
   3935 	 * Only old or non-multiqueue devices have the PBA register
   3936 	 * XXX Need special handling for 82575.
   3937 	 */
   3938 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3939 	    || (sc->sc_type == WM_T_82575))
   3940 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3941 
   3942 	/* Prevent the PCI-E bus from sticking */
   3943 	if (sc->sc_flags & WM_F_PCIE) {
   3944 		int timeout = 800;
   3945 
   3946 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3947 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3948 
   3949 		while (timeout--) {
   3950 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3951 			    == 0)
   3952 				break;
   3953 			delay(100);
   3954 		}
   3955 	}
   3956 
   3957 	/* Set the completion timeout for interface */
   3958 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3959 	    || (sc->sc_type == WM_T_82580)
   3960 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3961 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3962 		wm_set_pcie_completion_timeout(sc);
   3963 
   3964 	/* Clear interrupt */
   3965 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3966 	if (sc->sc_nintrs > 1) {
   3967 		if (sc->sc_type != WM_T_82574) {
   3968 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3969 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3970 		} else {
   3971 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3972 		}
   3973 	}
   3974 
   3975 	/* Stop the transmit and receive processes. */
   3976 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3977 	sc->sc_rctl &= ~RCTL_EN;
   3978 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3979 	CSR_WRITE_FLUSH(sc);
   3980 
   3981 	/* XXX set_tbi_sbp_82543() */
   3982 
   3983 	delay(10*1000);
   3984 
   3985 	/* Must acquire the MDIO ownership before MAC reset */
   3986 	switch (sc->sc_type) {
   3987 	case WM_T_82573:
   3988 	case WM_T_82574:
   3989 	case WM_T_82583:
   3990 		error = wm_get_hw_semaphore_82573(sc);
   3991 		break;
   3992 	default:
   3993 		break;
   3994 	}
   3995 
   3996 	/*
   3997 	 * 82541 Errata 29? & 82547 Errata 28?
   3998 	 * See also the description about PHY_RST bit in CTRL register
   3999 	 * in 8254x_GBe_SDM.pdf.
   4000 	 */
   4001 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4002 		CSR_WRITE(sc, WMREG_CTRL,
   4003 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4004 		CSR_WRITE_FLUSH(sc);
   4005 		delay(5000);
   4006 	}
   4007 
   4008 	switch (sc->sc_type) {
   4009 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4010 	case WM_T_82541:
   4011 	case WM_T_82541_2:
   4012 	case WM_T_82547:
   4013 	case WM_T_82547_2:
   4014 		/*
   4015 		 * On some chipsets, a reset through a memory-mapped write
   4016 		 * cycle can cause the chip to reset before completing the
   4017 		 * write cycle.  This causes major headache that can be
   4018 		 * avoided by issuing the reset via indirect register writes
   4019 		 * through I/O space.
   4020 		 *
   4021 		 * So, if we successfully mapped the I/O BAR at attach time,
   4022 		 * use that.  Otherwise, try our luck with a memory-mapped
   4023 		 * reset.
   4024 		 */
   4025 		if (sc->sc_flags & WM_F_IOH_VALID)
   4026 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4027 		else
   4028 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4029 		break;
   4030 	case WM_T_82545_3:
   4031 	case WM_T_82546_3:
   4032 		/* Use the shadow control register on these chips. */
   4033 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4034 		break;
   4035 	case WM_T_80003:
   4036 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4037 		sc->phy.acquire(sc);
   4038 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4039 		sc->phy.release(sc);
   4040 		break;
   4041 	case WM_T_ICH8:
   4042 	case WM_T_ICH9:
   4043 	case WM_T_ICH10:
   4044 	case WM_T_PCH:
   4045 	case WM_T_PCH2:
   4046 	case WM_T_PCH_LPT:
   4047 	case WM_T_PCH_SPT:
   4048 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4049 		if (wm_phy_resetisblocked(sc) == false) {
   4050 			/*
   4051 			 * Gate automatic PHY configuration by hardware on
   4052 			 * non-managed 82579
   4053 			 */
   4054 			if ((sc->sc_type == WM_T_PCH2)
   4055 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4056 				== 0))
   4057 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4058 
   4059 			reg |= CTRL_PHY_RESET;
   4060 			phy_reset = 1;
   4061 		} else
   4062 			printf("XXX reset is blocked!!!\n");
   4063 		sc->phy.acquire(sc);
   4064 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4065 		/* Don't insert a completion barrier when reset */
   4066 		delay(20*1000);
   4067 		mutex_exit(sc->sc_ich_phymtx);
   4068 		break;
   4069 	case WM_T_82580:
   4070 	case WM_T_I350:
   4071 	case WM_T_I354:
   4072 	case WM_T_I210:
   4073 	case WM_T_I211:
   4074 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4075 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4076 			CSR_WRITE_FLUSH(sc);
   4077 		delay(5000);
   4078 		break;
   4079 	case WM_T_82542_2_0:
   4080 	case WM_T_82542_2_1:
   4081 	case WM_T_82543:
   4082 	case WM_T_82540:
   4083 	case WM_T_82545:
   4084 	case WM_T_82546:
   4085 	case WM_T_82571:
   4086 	case WM_T_82572:
   4087 	case WM_T_82573:
   4088 	case WM_T_82574:
   4089 	case WM_T_82575:
   4090 	case WM_T_82576:
   4091 	case WM_T_82583:
   4092 	default:
   4093 		/* Everything else can safely use the documented method. */
   4094 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4095 		break;
   4096 	}
   4097 
   4098 	/* Must release the MDIO ownership after MAC reset */
   4099 	switch (sc->sc_type) {
   4100 	case WM_T_82573:
   4101 	case WM_T_82574:
   4102 	case WM_T_82583:
   4103 		if (error == 0)
   4104 			wm_put_hw_semaphore_82573(sc);
   4105 		break;
   4106 	default:
   4107 		break;
   4108 	}
   4109 
   4110 	if (phy_reset != 0)
   4111 		wm_get_cfg_done(sc);
   4112 
   4113 	/* reload EEPROM */
   4114 	switch (sc->sc_type) {
   4115 	case WM_T_82542_2_0:
   4116 	case WM_T_82542_2_1:
   4117 	case WM_T_82543:
   4118 	case WM_T_82544:
   4119 		delay(10);
   4120 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4121 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4122 		CSR_WRITE_FLUSH(sc);
   4123 		delay(2000);
   4124 		break;
   4125 	case WM_T_82540:
   4126 	case WM_T_82545:
   4127 	case WM_T_82545_3:
   4128 	case WM_T_82546:
   4129 	case WM_T_82546_3:
   4130 		delay(5*1000);
   4131 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4132 		break;
   4133 	case WM_T_82541:
   4134 	case WM_T_82541_2:
   4135 	case WM_T_82547:
   4136 	case WM_T_82547_2:
   4137 		delay(20000);
   4138 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4139 		break;
   4140 	case WM_T_82571:
   4141 	case WM_T_82572:
   4142 	case WM_T_82573:
   4143 	case WM_T_82574:
   4144 	case WM_T_82583:
   4145 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4146 			delay(10);
   4147 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4148 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4149 			CSR_WRITE_FLUSH(sc);
   4150 		}
   4151 		/* check EECD_EE_AUTORD */
   4152 		wm_get_auto_rd_done(sc);
   4153 		/*
   4154 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4155 		 * is set.
   4156 		 */
   4157 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4158 		    || (sc->sc_type == WM_T_82583))
   4159 			delay(25*1000);
   4160 		break;
   4161 	case WM_T_82575:
   4162 	case WM_T_82576:
   4163 	case WM_T_82580:
   4164 	case WM_T_I350:
   4165 	case WM_T_I354:
   4166 	case WM_T_I210:
   4167 	case WM_T_I211:
   4168 	case WM_T_80003:
   4169 		/* check EECD_EE_AUTORD */
   4170 		wm_get_auto_rd_done(sc);
   4171 		break;
   4172 	case WM_T_ICH8:
   4173 	case WM_T_ICH9:
   4174 	case WM_T_ICH10:
   4175 	case WM_T_PCH:
   4176 	case WM_T_PCH2:
   4177 	case WM_T_PCH_LPT:
   4178 	case WM_T_PCH_SPT:
   4179 		break;
   4180 	default:
   4181 		panic("%s: unknown type\n", __func__);
   4182 	}
   4183 
   4184 	/* Check whether EEPROM is present or not */
   4185 	switch (sc->sc_type) {
   4186 	case WM_T_82575:
   4187 	case WM_T_82576:
   4188 	case WM_T_82580:
   4189 	case WM_T_I350:
   4190 	case WM_T_I354:
   4191 	case WM_T_ICH8:
   4192 	case WM_T_ICH9:
   4193 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4194 			/* Not found */
   4195 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4196 			if (sc->sc_type == WM_T_82575)
   4197 				wm_reset_init_script_82575(sc);
   4198 		}
   4199 		break;
   4200 	default:
   4201 		break;
   4202 	}
   4203 
   4204 	if ((sc->sc_type == WM_T_82580)
   4205 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4206 		/* clear global device reset status bit */
   4207 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4208 	}
   4209 
   4210 	/* Clear any pending interrupt events. */
   4211 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4212 	reg = CSR_READ(sc, WMREG_ICR);
   4213 	if (sc->sc_nintrs > 1) {
   4214 		if (sc->sc_type != WM_T_82574) {
   4215 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4216 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4217 		} else
   4218 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4219 	}
   4220 
   4221 	/* reload sc_ctrl */
   4222 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4223 
   4224 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4225 		wm_set_eee_i350(sc);
   4226 
   4227 	/* Clear the host wakeup bit after lcd reset */
   4228 	if (sc->sc_type >= WM_T_PCH) {
   4229 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4230 		    BM_PORT_GEN_CFG);
   4231 		reg &= ~BM_WUC_HOST_WU_BIT;
   4232 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4233 		    BM_PORT_GEN_CFG, reg);
   4234 	}
   4235 
   4236 	/*
   4237 	 * For PCH, this write will make sure that any noise will be detected
   4238 	 * as a CRC error and be dropped rather than show up as a bad packet
   4239 	 * to the DMA engine
   4240 	 */
   4241 	if (sc->sc_type == WM_T_PCH)
   4242 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4243 
   4244 	if (sc->sc_type >= WM_T_82544)
   4245 		CSR_WRITE(sc, WMREG_WUC, 0);
   4246 
   4247 	wm_reset_mdicnfg_82580(sc);
   4248 
   4249 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4250 		wm_pll_workaround_i210(sc);
   4251 }
   4252 
   4253 /*
   4254  * wm_add_rxbuf:
   4255  *
   4256  *	Add a receive buffer to the indiciated descriptor.
   4257  */
   4258 static int
   4259 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4260 {
   4261 	struct wm_softc *sc = rxq->rxq_sc;
   4262 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4263 	struct mbuf *m;
   4264 	int error;
   4265 
   4266 	KASSERT(mutex_owned(rxq->rxq_lock));
   4267 
   4268 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4269 	if (m == NULL)
   4270 		return ENOBUFS;
   4271 
   4272 	MCLGET(m, M_DONTWAIT);
   4273 	if ((m->m_flags & M_EXT) == 0) {
   4274 		m_freem(m);
   4275 		return ENOBUFS;
   4276 	}
   4277 
   4278 	if (rxs->rxs_mbuf != NULL)
   4279 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4280 
   4281 	rxs->rxs_mbuf = m;
   4282 
   4283 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4284 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4285 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4286 	if (error) {
   4287 		/* XXX XXX XXX */
   4288 		aprint_error_dev(sc->sc_dev,
   4289 		    "unable to load rx DMA map %d, error = %d\n",
   4290 		    idx, error);
   4291 		panic("wm_add_rxbuf");
   4292 	}
   4293 
   4294 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4295 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4296 
   4297 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4298 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4299 			wm_init_rxdesc(rxq, idx);
   4300 	} else
   4301 		wm_init_rxdesc(rxq, idx);
   4302 
   4303 	return 0;
   4304 }
   4305 
   4306 /*
   4307  * wm_rxdrain:
   4308  *
   4309  *	Drain the receive queue.
   4310  */
   4311 static void
   4312 wm_rxdrain(struct wm_rxqueue *rxq)
   4313 {
   4314 	struct wm_softc *sc = rxq->rxq_sc;
   4315 	struct wm_rxsoft *rxs;
   4316 	int i;
   4317 
   4318 	KASSERT(mutex_owned(rxq->rxq_lock));
   4319 
   4320 	for (i = 0; i < WM_NRXDESC; i++) {
   4321 		rxs = &rxq->rxq_soft[i];
   4322 		if (rxs->rxs_mbuf != NULL) {
   4323 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4324 			m_freem(rxs->rxs_mbuf);
   4325 			rxs->rxs_mbuf = NULL;
   4326 		}
   4327 	}
   4328 }
   4329 
   4330 
   4331 /*
   4332  * XXX copy from FreeBSD's sys/net/rss_config.c
   4333  */
   4334 /*
   4335  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4336  * effectiveness may be limited by algorithm choice and available entropy
   4337  * during the boot.
   4338  *
   4339  * XXXRW: And that we don't randomize it yet!
   4340  *
   4341  * This is the default Microsoft RSS specification key which is also
   4342  * the Chelsio T5 firmware default key.
   4343  */
   4344 #define RSS_KEYSIZE 40
   4345 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4346 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4347 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4348 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4349 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4350 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4351 };
   4352 
   4353 /*
   4354  * Caller must pass an array of size sizeof(rss_key).
   4355  *
   4356  * XXX
   4357  * As if_ixgbe may use this function, this function should not be
   4358  * if_wm specific function.
   4359  */
   4360 static void
   4361 wm_rss_getkey(uint8_t *key)
   4362 {
   4363 
   4364 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4365 }
   4366 
   4367 /*
   4368  * Setup registers for RSS.
   4369  *
   4370  * XXX not yet VMDq support
   4371  */
   4372 static void
   4373 wm_init_rss(struct wm_softc *sc)
   4374 {
   4375 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4376 	int i;
   4377 
   4378 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4379 
   4380 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4381 		int qid, reta_ent;
   4382 
   4383 		qid  = i % sc->sc_nqueues;
   4384 		switch(sc->sc_type) {
   4385 		case WM_T_82574:
   4386 			reta_ent = __SHIFTIN(qid,
   4387 			    RETA_ENT_QINDEX_MASK_82574);
   4388 			break;
   4389 		case WM_T_82575:
   4390 			reta_ent = __SHIFTIN(qid,
   4391 			    RETA_ENT_QINDEX1_MASK_82575);
   4392 			break;
   4393 		default:
   4394 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4395 			break;
   4396 		}
   4397 
   4398 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4399 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4400 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4401 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4402 	}
   4403 
   4404 	wm_rss_getkey((uint8_t *)rss_key);
   4405 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4406 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4407 
   4408 	if (sc->sc_type == WM_T_82574)
   4409 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4410 	else
   4411 		mrqc = MRQC_ENABLE_RSS_MQ;
   4412 
   4413 	/* XXXX
   4414 	 * The same as FreeBSD igb.
   4415 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4416 	 */
   4417 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4418 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4419 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4420 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4421 
   4422 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4423 }
   4424 
   4425 /*
   4426  * Adjust TX and RX queue numbers which the system actulally uses.
   4427  *
   4428  * The numbers are affected by below parameters.
   4429  *     - The nubmer of hardware queues
   4430  *     - The number of MSI-X vectors (= "nvectors" argument)
   4431  *     - ncpu
   4432  */
   4433 static void
   4434 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4435 {
   4436 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4437 
   4438 	if (nvectors < 2) {
   4439 		sc->sc_nqueues = 1;
   4440 		return;
   4441 	}
   4442 
   4443 	switch(sc->sc_type) {
   4444 	case WM_T_82572:
   4445 		hw_ntxqueues = 2;
   4446 		hw_nrxqueues = 2;
   4447 		break;
   4448 	case WM_T_82574:
   4449 		hw_ntxqueues = 2;
   4450 		hw_nrxqueues = 2;
   4451 		break;
   4452 	case WM_T_82575:
   4453 		hw_ntxqueues = 4;
   4454 		hw_nrxqueues = 4;
   4455 		break;
   4456 	case WM_T_82576:
   4457 		hw_ntxqueues = 16;
   4458 		hw_nrxqueues = 16;
   4459 		break;
   4460 	case WM_T_82580:
   4461 	case WM_T_I350:
   4462 	case WM_T_I354:
   4463 		hw_ntxqueues = 8;
   4464 		hw_nrxqueues = 8;
   4465 		break;
   4466 	case WM_T_I210:
   4467 		hw_ntxqueues = 4;
   4468 		hw_nrxqueues = 4;
   4469 		break;
   4470 	case WM_T_I211:
   4471 		hw_ntxqueues = 2;
   4472 		hw_nrxqueues = 2;
   4473 		break;
   4474 		/*
   4475 		 * As below ethernet controllers does not support MSI-X,
   4476 		 * this driver let them not use multiqueue.
   4477 		 *     - WM_T_80003
   4478 		 *     - WM_T_ICH8
   4479 		 *     - WM_T_ICH9
   4480 		 *     - WM_T_ICH10
   4481 		 *     - WM_T_PCH
   4482 		 *     - WM_T_PCH2
   4483 		 *     - WM_T_PCH_LPT
   4484 		 */
   4485 	default:
   4486 		hw_ntxqueues = 1;
   4487 		hw_nrxqueues = 1;
   4488 		break;
   4489 	}
   4490 
   4491 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4492 
   4493 	/*
   4494 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4495 	 * the number of queues used actually.
   4496 	 */
   4497 	if (nvectors < hw_nqueues + 1) {
   4498 		sc->sc_nqueues = nvectors - 1;
   4499 	} else {
   4500 		sc->sc_nqueues = hw_nqueues;
   4501 	}
   4502 
   4503 	/*
   4504 	 * As queues more then cpus cannot improve scaling, we limit
   4505 	 * the number of queues used actually.
   4506 	 */
   4507 	if (ncpu < sc->sc_nqueues)
   4508 		sc->sc_nqueues = ncpu;
   4509 }
   4510 
   4511 /*
   4512  * Both single interrupt MSI and INTx can use this function.
   4513  */
   4514 static int
   4515 wm_setup_legacy(struct wm_softc *sc)
   4516 {
   4517 	pci_chipset_tag_t pc = sc->sc_pc;
   4518 	const char *intrstr = NULL;
   4519 	char intrbuf[PCI_INTRSTR_LEN];
   4520 	int error;
   4521 
   4522 	error = wm_alloc_txrx_queues(sc);
   4523 	if (error) {
   4524 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4525 		    error);
   4526 		return ENOMEM;
   4527 	}
   4528 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4529 	    sizeof(intrbuf));
   4530 #ifdef WM_MPSAFE
   4531 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4532 #endif
   4533 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4534 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4535 	if (sc->sc_ihs[0] == NULL) {
   4536 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4537 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4538 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4539 		return ENOMEM;
   4540 	}
   4541 
   4542 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4543 	sc->sc_nintrs = 1;
   4544 	return 0;
   4545 }
   4546 
   4547 static int
   4548 wm_setup_msix(struct wm_softc *sc)
   4549 {
   4550 	void *vih;
   4551 	kcpuset_t *affinity;
   4552 	int qidx, error, intr_idx, txrx_established;
   4553 	pci_chipset_tag_t pc = sc->sc_pc;
   4554 	const char *intrstr = NULL;
   4555 	char intrbuf[PCI_INTRSTR_LEN];
   4556 	char intr_xname[INTRDEVNAMEBUF];
   4557 
   4558 	if (sc->sc_nqueues < ncpu) {
   4559 		/*
   4560 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4561 		 * interrupts start from CPU#1.
   4562 		 */
   4563 		sc->sc_affinity_offset = 1;
   4564 	} else {
   4565 		/*
   4566 		 * In this case, this device use all CPUs. So, we unify
   4567 		 * affinitied cpu_index to msix vector number for readability.
   4568 		 */
   4569 		sc->sc_affinity_offset = 0;
   4570 	}
   4571 
   4572 	error = wm_alloc_txrx_queues(sc);
   4573 	if (error) {
   4574 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4575 		    error);
   4576 		return ENOMEM;
   4577 	}
   4578 
   4579 	kcpuset_create(&affinity, false);
   4580 	intr_idx = 0;
   4581 
   4582 	/*
   4583 	 * TX and RX
   4584 	 */
   4585 	txrx_established = 0;
   4586 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4587 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4588 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4589 
   4590 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4591 		    sizeof(intrbuf));
   4592 #ifdef WM_MPSAFE
   4593 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4594 		    PCI_INTR_MPSAFE, true);
   4595 #endif
   4596 		memset(intr_xname, 0, sizeof(intr_xname));
   4597 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4598 		    device_xname(sc->sc_dev), qidx);
   4599 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4600 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4601 		if (vih == NULL) {
   4602 			aprint_error_dev(sc->sc_dev,
   4603 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4604 			    intrstr ? " at " : "",
   4605 			    intrstr ? intrstr : "");
   4606 
   4607 			goto fail;
   4608 		}
   4609 		kcpuset_zero(affinity);
   4610 		/* Round-robin affinity */
   4611 		kcpuset_set(affinity, affinity_to);
   4612 		error = interrupt_distribute(vih, affinity, NULL);
   4613 		if (error == 0) {
   4614 			aprint_normal_dev(sc->sc_dev,
   4615 			    "for TX and RX interrupting at %s affinity to %u\n",
   4616 			    intrstr, affinity_to);
   4617 		} else {
   4618 			aprint_normal_dev(sc->sc_dev,
   4619 			    "for TX and RX interrupting at %s\n", intrstr);
   4620 		}
   4621 		sc->sc_ihs[intr_idx] = vih;
   4622 		wmq->wmq_id= qidx;
   4623 		wmq->wmq_intr_idx = intr_idx;
   4624 
   4625 		txrx_established++;
   4626 		intr_idx++;
   4627 	}
   4628 
   4629 	/*
   4630 	 * LINK
   4631 	 */
   4632 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4633 	    sizeof(intrbuf));
   4634 #ifdef WM_MPSAFE
   4635 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4636 #endif
   4637 	memset(intr_xname, 0, sizeof(intr_xname));
   4638 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4639 	    device_xname(sc->sc_dev));
   4640 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4641 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4642 	if (vih == NULL) {
   4643 		aprint_error_dev(sc->sc_dev,
   4644 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4645 		    intrstr ? " at " : "",
   4646 		    intrstr ? intrstr : "");
   4647 
   4648 		goto fail;
   4649 	}
   4650 	/* keep default affinity to LINK interrupt */
   4651 	aprint_normal_dev(sc->sc_dev,
   4652 	    "for LINK interrupting at %s\n", intrstr);
   4653 	sc->sc_ihs[intr_idx] = vih;
   4654 	sc->sc_link_intr_idx = intr_idx;
   4655 
   4656 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4657 	kcpuset_destroy(affinity);
   4658 	return 0;
   4659 
   4660  fail:
   4661 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4662 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4663 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4664 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4665 	}
   4666 
   4667 	kcpuset_destroy(affinity);
   4668 	return ENOMEM;
   4669 }
   4670 
   4671 static void
   4672 wm_turnon(struct wm_softc *sc)
   4673 {
   4674 	int i;
   4675 
   4676 	KASSERT(WM_CORE_LOCKED(sc));
   4677 
   4678 	for(i = 0; i < sc->sc_nqueues; i++) {
   4679 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4680 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4681 
   4682 		mutex_enter(txq->txq_lock);
   4683 		txq->txq_stopping = false;
   4684 		mutex_exit(txq->txq_lock);
   4685 
   4686 		mutex_enter(rxq->rxq_lock);
   4687 		rxq->rxq_stopping = false;
   4688 		mutex_exit(rxq->rxq_lock);
   4689 	}
   4690 
   4691 	sc->sc_core_stopping = false;
   4692 }
   4693 
   4694 static void
   4695 wm_turnoff(struct wm_softc *sc)
   4696 {
   4697 	int i;
   4698 
   4699 	KASSERT(WM_CORE_LOCKED(sc));
   4700 
   4701 	sc->sc_core_stopping = true;
   4702 
   4703 	for(i = 0; i < sc->sc_nqueues; i++) {
   4704 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4705 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4706 
   4707 		mutex_enter(rxq->rxq_lock);
   4708 		rxq->rxq_stopping = true;
   4709 		mutex_exit(rxq->rxq_lock);
   4710 
   4711 		mutex_enter(txq->txq_lock);
   4712 		txq->txq_stopping = true;
   4713 		mutex_exit(txq->txq_lock);
   4714 	}
   4715 }
   4716 
   4717 /*
   4718  * wm_init:		[ifnet interface function]
   4719  *
   4720  *	Initialize the interface.
   4721  */
   4722 static int
   4723 wm_init(struct ifnet *ifp)
   4724 {
   4725 	struct wm_softc *sc = ifp->if_softc;
   4726 	int ret;
   4727 
   4728 	WM_CORE_LOCK(sc);
   4729 	ret = wm_init_locked(ifp);
   4730 	WM_CORE_UNLOCK(sc);
   4731 
   4732 	return ret;
   4733 }
   4734 
   4735 static int
   4736 wm_init_locked(struct ifnet *ifp)
   4737 {
   4738 	struct wm_softc *sc = ifp->if_softc;
   4739 	int i, j, trynum, error = 0;
   4740 	uint32_t reg;
   4741 
   4742 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4743 		device_xname(sc->sc_dev), __func__));
   4744 	KASSERT(WM_CORE_LOCKED(sc));
   4745 
   4746 	/*
   4747 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4748 	 * There is a small but measurable benefit to avoiding the adjusment
   4749 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4750 	 * on such platforms.  One possibility is that the DMA itself is
   4751 	 * slightly more efficient if the front of the entire packet (instead
   4752 	 * of the front of the headers) is aligned.
   4753 	 *
   4754 	 * Note we must always set align_tweak to 0 if we are using
   4755 	 * jumbo frames.
   4756 	 */
   4757 #ifdef __NO_STRICT_ALIGNMENT
   4758 	sc->sc_align_tweak = 0;
   4759 #else
   4760 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4761 		sc->sc_align_tweak = 0;
   4762 	else
   4763 		sc->sc_align_tweak = 2;
   4764 #endif /* __NO_STRICT_ALIGNMENT */
   4765 
   4766 	/* Cancel any pending I/O. */
   4767 	wm_stop_locked(ifp, 0);
   4768 
   4769 	/* update statistics before reset */
   4770 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4771 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4772 
   4773 	/* PCH_SPT hardware workaround */
   4774 	if (sc->sc_type == WM_T_PCH_SPT)
   4775 		wm_flush_desc_rings(sc);
   4776 
   4777 	/* Reset the chip to a known state. */
   4778 	wm_reset(sc);
   4779 
   4780 	/* AMT based hardware can now take control from firmware */
   4781 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4782 		wm_get_hw_control(sc);
   4783 
   4784 	/* Init hardware bits */
   4785 	wm_initialize_hardware_bits(sc);
   4786 
   4787 	/* Reset the PHY. */
   4788 	if (sc->sc_flags & WM_F_HAS_MII)
   4789 		wm_gmii_reset(sc);
   4790 
   4791 	/* Calculate (E)ITR value */
   4792 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4793 		sc->sc_itr = 450;	/* For EITR */
   4794 	} else if (sc->sc_type >= WM_T_82543) {
   4795 		/*
   4796 		 * Set up the interrupt throttling register (units of 256ns)
   4797 		 * Note that a footnote in Intel's documentation says this
   4798 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4799 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4800 		 * that that is also true for the 1024ns units of the other
   4801 		 * interrupt-related timer registers -- so, really, we ought
   4802 		 * to divide this value by 4 when the link speed is low.
   4803 		 *
   4804 		 * XXX implement this division at link speed change!
   4805 		 */
   4806 
   4807 		/*
   4808 		 * For N interrupts/sec, set this value to:
   4809 		 * 1000000000 / (N * 256).  Note that we set the
   4810 		 * absolute and packet timer values to this value
   4811 		 * divided by 4 to get "simple timer" behavior.
   4812 		 */
   4813 
   4814 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4815 	}
   4816 
   4817 	error = wm_init_txrx_queues(sc);
   4818 	if (error)
   4819 		goto out;
   4820 
   4821 	/*
   4822 	 * Clear out the VLAN table -- we don't use it (yet).
   4823 	 */
   4824 	CSR_WRITE(sc, WMREG_VET, 0);
   4825 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4826 		trynum = 10; /* Due to hw errata */
   4827 	else
   4828 		trynum = 1;
   4829 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4830 		for (j = 0; j < trynum; j++)
   4831 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4832 
   4833 	/*
   4834 	 * Set up flow-control parameters.
   4835 	 *
   4836 	 * XXX Values could probably stand some tuning.
   4837 	 */
   4838 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4839 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4840 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4841 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4842 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4843 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4844 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4845 	}
   4846 
   4847 	sc->sc_fcrtl = FCRTL_DFLT;
   4848 	if (sc->sc_type < WM_T_82543) {
   4849 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4850 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4851 	} else {
   4852 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4853 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4854 	}
   4855 
   4856 	if (sc->sc_type == WM_T_80003)
   4857 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4858 	else
   4859 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4860 
   4861 	/* Writes the control register. */
   4862 	wm_set_vlan(sc);
   4863 
   4864 	if (sc->sc_flags & WM_F_HAS_MII) {
   4865 		int val;
   4866 
   4867 		switch (sc->sc_type) {
   4868 		case WM_T_80003:
   4869 		case WM_T_ICH8:
   4870 		case WM_T_ICH9:
   4871 		case WM_T_ICH10:
   4872 		case WM_T_PCH:
   4873 		case WM_T_PCH2:
   4874 		case WM_T_PCH_LPT:
   4875 		case WM_T_PCH_SPT:
   4876 			/*
   4877 			 * Set the mac to wait the maximum time between each
   4878 			 * iteration and increase the max iterations when
   4879 			 * polling the phy; this fixes erroneous timeouts at
   4880 			 * 10Mbps.
   4881 			 */
   4882 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4883 			    0xFFFF);
   4884 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4885 			val |= 0x3F;
   4886 			wm_kmrn_writereg(sc,
   4887 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4888 			break;
   4889 		default:
   4890 			break;
   4891 		}
   4892 
   4893 		if (sc->sc_type == WM_T_80003) {
   4894 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4895 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4896 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4897 
   4898 			/* Bypass RX and TX FIFO's */
   4899 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4900 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4901 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4902 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4903 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4904 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4905 		}
   4906 	}
   4907 #if 0
   4908 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4909 #endif
   4910 
   4911 	/* Set up checksum offload parameters. */
   4912 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4913 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4914 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4915 		reg |= RXCSUM_IPOFL;
   4916 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4917 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4918 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4919 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4920 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4921 
   4922 	/* Set up MSI-X */
   4923 	if (sc->sc_nintrs > 1) {
   4924 		uint32_t ivar;
   4925 		struct wm_queue *wmq;
   4926 		int qid, qintr_idx;
   4927 
   4928 		if (sc->sc_type == WM_T_82575) {
   4929 			/* Interrupt control */
   4930 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4931 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4932 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4933 
   4934 			/* TX and RX */
   4935 			for (i = 0; i < sc->sc_nqueues; i++) {
   4936 				wmq = &sc->sc_queue[i];
   4937 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   4938 				    EITR_TX_QUEUE(wmq->wmq_id)
   4939 				    | EITR_RX_QUEUE(wmq->wmq_id));
   4940 			}
   4941 			/* Link status */
   4942 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4943 			    EITR_OTHER);
   4944 		} else if (sc->sc_type == WM_T_82574) {
   4945 			/* Interrupt control */
   4946 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4947 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4948 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4949 
   4950 			ivar = 0;
   4951 			/* TX and RX */
   4952 			for (i = 0; i < sc->sc_nqueues; i++) {
   4953 				wmq = &sc->sc_queue[i];
   4954 				qid = wmq->wmq_id;
   4955 				qintr_idx = wmq->wmq_intr_idx;
   4956 
   4957 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4958 				    IVAR_TX_MASK_Q_82574(qid));
   4959 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4960 				    IVAR_RX_MASK_Q_82574(qid));
   4961 			}
   4962 			/* Link status */
   4963 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4964 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   4965 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4966 		} else {
   4967 			/* Interrupt control */
   4968 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   4969 			    | GPIE_EIAME | GPIE_PBA);
   4970 
   4971 			switch (sc->sc_type) {
   4972 			case WM_T_82580:
   4973 			case WM_T_I350:
   4974 			case WM_T_I354:
   4975 			case WM_T_I210:
   4976 			case WM_T_I211:
   4977 				/* TX and RX */
   4978 				for (i = 0; i < sc->sc_nqueues; i++) {
   4979 					wmq = &sc->sc_queue[i];
   4980 					qid = wmq->wmq_id;
   4981 					qintr_idx = wmq->wmq_intr_idx;
   4982 
   4983 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4984 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4985 					ivar |= __SHIFTIN((qintr_idx
   4986 						| IVAR_VALID),
   4987 					    IVAR_TX_MASK_Q(qid));
   4988 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4989 					ivar |= __SHIFTIN((qintr_idx
   4990 						| IVAR_VALID),
   4991 					    IVAR_RX_MASK_Q(qid));
   4992 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4993 				}
   4994 				break;
   4995 			case WM_T_82576:
   4996 				/* TX and RX */
   4997 				for (i = 0; i < sc->sc_nqueues; i++) {
   4998 					wmq = &sc->sc_queue[i];
   4999 					qid = wmq->wmq_id;
   5000 					qintr_idx = wmq->wmq_intr_idx;
   5001 
   5002 					ivar = CSR_READ(sc,
   5003 					    WMREG_IVAR_Q_82576(qid));
   5004 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5005 					ivar |= __SHIFTIN((qintr_idx
   5006 						| IVAR_VALID),
   5007 					    IVAR_TX_MASK_Q_82576(qid));
   5008 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5009 					ivar |= __SHIFTIN((qintr_idx
   5010 						| IVAR_VALID),
   5011 					    IVAR_RX_MASK_Q_82576(qid));
   5012 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5013 					    ivar);
   5014 				}
   5015 				break;
   5016 			default:
   5017 				break;
   5018 			}
   5019 
   5020 			/* Link status */
   5021 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5022 			    IVAR_MISC_OTHER);
   5023 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5024 		}
   5025 
   5026 		if (sc->sc_nqueues > 1) {
   5027 			wm_init_rss(sc);
   5028 
   5029 			/*
   5030 			** NOTE: Receive Full-Packet Checksum Offload
   5031 			** is mutually exclusive with Multiqueue. However
   5032 			** this is not the same as TCP/IP checksums which
   5033 			** still work.
   5034 			*/
   5035 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5036 			reg |= RXCSUM_PCSD;
   5037 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5038 		}
   5039 	}
   5040 
   5041 	/* Set up the interrupt registers. */
   5042 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5043 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5044 	    ICR_RXO | ICR_RXT0;
   5045 	if (sc->sc_nintrs > 1) {
   5046 		uint32_t mask;
   5047 		struct wm_queue *wmq;
   5048 
   5049 		switch (sc->sc_type) {
   5050 		case WM_T_82574:
   5051 			CSR_WRITE(sc, WMREG_EIAC_82574,
   5052 			    WMREG_EIAC_82574_MSIX_MASK);
   5053 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   5054 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5055 			break;
   5056 		default:
   5057 			if (sc->sc_type == WM_T_82575) {
   5058 				mask = 0;
   5059 				for (i = 0; i < sc->sc_nqueues; i++) {
   5060 					wmq = &sc->sc_queue[i];
   5061 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5062 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5063 				}
   5064 				mask |= EITR_OTHER;
   5065 			} else {
   5066 				mask = 0;
   5067 				for (i = 0; i < sc->sc_nqueues; i++) {
   5068 					wmq = &sc->sc_queue[i];
   5069 					mask |= 1 << wmq->wmq_intr_idx;
   5070 				}
   5071 				mask |= 1 << sc->sc_link_intr_idx;
   5072 			}
   5073 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5074 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5075 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5076 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5077 			break;
   5078 		}
   5079 	} else
   5080 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5081 
   5082 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5083 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5084 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5085 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   5086 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5087 		reg |= KABGTXD_BGSQLBIAS;
   5088 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5089 	}
   5090 
   5091 	/* Set up the inter-packet gap. */
   5092 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5093 
   5094 	if (sc->sc_type >= WM_T_82543) {
   5095 		/*
   5096 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   5097 		 * the multi queue function with MSI-X.
   5098 		 */
   5099 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5100 			int qidx;
   5101 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5102 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   5103 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
   5104 				    sc->sc_itr);
   5105 			}
   5106 			/*
   5107 			 * Link interrupts occur much less than TX
   5108 			 * interrupts and RX interrupts. So, we don't
   5109 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5110 			 * FreeBSD's if_igb.
   5111 			 */
   5112 		} else
   5113 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   5114 	}
   5115 
   5116 	/* Set the VLAN ethernetype. */
   5117 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5118 
   5119 	/*
   5120 	 * Set up the transmit control register; we start out with
   5121 	 * a collision distance suitable for FDX, but update it whe
   5122 	 * we resolve the media type.
   5123 	 */
   5124 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5125 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5126 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5127 	if (sc->sc_type >= WM_T_82571)
   5128 		sc->sc_tctl |= TCTL_MULR;
   5129 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5130 
   5131 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5132 		/* Write TDT after TCTL.EN is set. See the document. */
   5133 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5134 	}
   5135 
   5136 	if (sc->sc_type == WM_T_80003) {
   5137 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5138 		reg &= ~TCTL_EXT_GCEX_MASK;
   5139 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5140 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5141 	}
   5142 
   5143 	/* Set the media. */
   5144 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5145 		goto out;
   5146 
   5147 	/* Configure for OS presence */
   5148 	wm_init_manageability(sc);
   5149 
   5150 	/*
   5151 	 * Set up the receive control register; we actually program
   5152 	 * the register when we set the receive filter.  Use multicast
   5153 	 * address offset type 0.
   5154 	 *
   5155 	 * Only the i82544 has the ability to strip the incoming
   5156 	 * CRC, so we don't enable that feature.
   5157 	 */
   5158 	sc->sc_mchash_type = 0;
   5159 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5160 	    | RCTL_MO(sc->sc_mchash_type);
   5161 
   5162 	/*
   5163 	 * The I350 has a bug where it always strips the CRC whether
   5164 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5165 	 */
   5166 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5167 	    || (sc->sc_type == WM_T_I210))
   5168 		sc->sc_rctl |= RCTL_SECRC;
   5169 
   5170 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5171 	    && (ifp->if_mtu > ETHERMTU)) {
   5172 		sc->sc_rctl |= RCTL_LPE;
   5173 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5174 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5175 	}
   5176 
   5177 	if (MCLBYTES == 2048) {
   5178 		sc->sc_rctl |= RCTL_2k;
   5179 	} else {
   5180 		if (sc->sc_type >= WM_T_82543) {
   5181 			switch (MCLBYTES) {
   5182 			case 4096:
   5183 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5184 				break;
   5185 			case 8192:
   5186 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5187 				break;
   5188 			case 16384:
   5189 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5190 				break;
   5191 			default:
   5192 				panic("wm_init: MCLBYTES %d unsupported",
   5193 				    MCLBYTES);
   5194 				break;
   5195 			}
   5196 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5197 	}
   5198 
   5199 	/* Set the receive filter. */
   5200 	wm_set_filter(sc);
   5201 
   5202 	/* Enable ECC */
   5203 	switch (sc->sc_type) {
   5204 	case WM_T_82571:
   5205 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5206 		reg |= PBA_ECC_CORR_EN;
   5207 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5208 		break;
   5209 	case WM_T_PCH_LPT:
   5210 	case WM_T_PCH_SPT:
   5211 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5212 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5213 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5214 
   5215 		sc->sc_ctrl |= CTRL_MEHE;
   5216 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5217 		break;
   5218 	default:
   5219 		break;
   5220 	}
   5221 
   5222 	/* On 575 and later set RDT only if RX enabled */
   5223 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5224 		int qidx;
   5225 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5226 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5227 			for (i = 0; i < WM_NRXDESC; i++) {
   5228 				mutex_enter(rxq->rxq_lock);
   5229 				wm_init_rxdesc(rxq, i);
   5230 				mutex_exit(rxq->rxq_lock);
   5231 
   5232 			}
   5233 		}
   5234 	}
   5235 
   5236 	wm_turnon(sc);
   5237 
   5238 	/* Start the one second link check clock. */
   5239 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5240 
   5241 	/* ...all done! */
   5242 	ifp->if_flags |= IFF_RUNNING;
   5243 	ifp->if_flags &= ~IFF_OACTIVE;
   5244 
   5245  out:
   5246 	sc->sc_if_flags = ifp->if_flags;
   5247 	if (error)
   5248 		log(LOG_ERR, "%s: interface not running\n",
   5249 		    device_xname(sc->sc_dev));
   5250 	return error;
   5251 }
   5252 
   5253 /*
   5254  * wm_stop:		[ifnet interface function]
   5255  *
   5256  *	Stop transmission on the interface.
   5257  */
   5258 static void
   5259 wm_stop(struct ifnet *ifp, int disable)
   5260 {
   5261 	struct wm_softc *sc = ifp->if_softc;
   5262 
   5263 	WM_CORE_LOCK(sc);
   5264 	wm_stop_locked(ifp, disable);
   5265 	WM_CORE_UNLOCK(sc);
   5266 }
   5267 
   5268 static void
   5269 wm_stop_locked(struct ifnet *ifp, int disable)
   5270 {
   5271 	struct wm_softc *sc = ifp->if_softc;
   5272 	struct wm_txsoft *txs;
   5273 	int i, qidx;
   5274 
   5275 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5276 		device_xname(sc->sc_dev), __func__));
   5277 	KASSERT(WM_CORE_LOCKED(sc));
   5278 
   5279 	wm_turnoff(sc);
   5280 
   5281 	/* Stop the one second clock. */
   5282 	callout_stop(&sc->sc_tick_ch);
   5283 
   5284 	/* Stop the 82547 Tx FIFO stall check timer. */
   5285 	if (sc->sc_type == WM_T_82547)
   5286 		callout_stop(&sc->sc_txfifo_ch);
   5287 
   5288 	if (sc->sc_flags & WM_F_HAS_MII) {
   5289 		/* Down the MII. */
   5290 		mii_down(&sc->sc_mii);
   5291 	} else {
   5292 #if 0
   5293 		/* Should we clear PHY's status properly? */
   5294 		wm_reset(sc);
   5295 #endif
   5296 	}
   5297 
   5298 	/* Stop the transmit and receive processes. */
   5299 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5300 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5301 	sc->sc_rctl &= ~RCTL_EN;
   5302 
   5303 	/*
   5304 	 * Clear the interrupt mask to ensure the device cannot assert its
   5305 	 * interrupt line.
   5306 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5307 	 * service any currently pending or shared interrupt.
   5308 	 */
   5309 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5310 	sc->sc_icr = 0;
   5311 	if (sc->sc_nintrs > 1) {
   5312 		if (sc->sc_type != WM_T_82574) {
   5313 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5314 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5315 		} else
   5316 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5317 	}
   5318 
   5319 	/* Release any queued transmit buffers. */
   5320 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5321 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5322 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5323 		mutex_enter(txq->txq_lock);
   5324 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5325 			txs = &txq->txq_soft[i];
   5326 			if (txs->txs_mbuf != NULL) {
   5327 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5328 				m_freem(txs->txs_mbuf);
   5329 				txs->txs_mbuf = NULL;
   5330 			}
   5331 		}
   5332 		mutex_exit(txq->txq_lock);
   5333 	}
   5334 
   5335 	/* Mark the interface as down and cancel the watchdog timer. */
   5336 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5337 	ifp->if_timer = 0;
   5338 
   5339 	if (disable) {
   5340 		for (i = 0; i < sc->sc_nqueues; i++) {
   5341 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5342 			mutex_enter(rxq->rxq_lock);
   5343 			wm_rxdrain(rxq);
   5344 			mutex_exit(rxq->rxq_lock);
   5345 		}
   5346 	}
   5347 
   5348 #if 0 /* notyet */
   5349 	if (sc->sc_type >= WM_T_82544)
   5350 		CSR_WRITE(sc, WMREG_WUC, 0);
   5351 #endif
   5352 }
   5353 
   5354 static void
   5355 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5356 {
   5357 	struct mbuf *m;
   5358 	int i;
   5359 
   5360 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5361 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5362 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5363 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5364 		    m->m_data, m->m_len, m->m_flags);
   5365 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5366 	    i, i == 1 ? "" : "s");
   5367 }
   5368 
   5369 /*
   5370  * wm_82547_txfifo_stall:
   5371  *
   5372  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5373  *	reset the FIFO pointers, and restart packet transmission.
   5374  */
   5375 static void
   5376 wm_82547_txfifo_stall(void *arg)
   5377 {
   5378 	struct wm_softc *sc = arg;
   5379 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5380 
   5381 	mutex_enter(txq->txq_lock);
   5382 
   5383 	if (txq->txq_stopping)
   5384 		goto out;
   5385 
   5386 	if (txq->txq_fifo_stall) {
   5387 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5388 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5389 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5390 			/*
   5391 			 * Packets have drained.  Stop transmitter, reset
   5392 			 * FIFO pointers, restart transmitter, and kick
   5393 			 * the packet queue.
   5394 			 */
   5395 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5396 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5397 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5398 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5399 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5400 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5401 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5402 			CSR_WRITE_FLUSH(sc);
   5403 
   5404 			txq->txq_fifo_head = 0;
   5405 			txq->txq_fifo_stall = 0;
   5406 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5407 		} else {
   5408 			/*
   5409 			 * Still waiting for packets to drain; try again in
   5410 			 * another tick.
   5411 			 */
   5412 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5413 		}
   5414 	}
   5415 
   5416 out:
   5417 	mutex_exit(txq->txq_lock);
   5418 }
   5419 
   5420 /*
   5421  * wm_82547_txfifo_bugchk:
   5422  *
   5423  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5424  *	prevent enqueueing a packet that would wrap around the end
   5425  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5426  *
   5427  *	We do this by checking the amount of space before the end
   5428  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5429  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5430  *	the internal FIFO pointers to the beginning, and restart
   5431  *	transmission on the interface.
   5432  */
   5433 #define	WM_FIFO_HDR		0x10
   5434 #define	WM_82547_PAD_LEN	0x3e0
   5435 static int
   5436 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5437 {
   5438 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5439 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5440 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5441 
   5442 	/* Just return if already stalled. */
   5443 	if (txq->txq_fifo_stall)
   5444 		return 1;
   5445 
   5446 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5447 		/* Stall only occurs in half-duplex mode. */
   5448 		goto send_packet;
   5449 	}
   5450 
   5451 	if (len >= WM_82547_PAD_LEN + space) {
   5452 		txq->txq_fifo_stall = 1;
   5453 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5454 		return 1;
   5455 	}
   5456 
   5457  send_packet:
   5458 	txq->txq_fifo_head += len;
   5459 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5460 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5461 
   5462 	return 0;
   5463 }
   5464 
   5465 static int
   5466 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5467 {
   5468 	int error;
   5469 
   5470 	/*
   5471 	 * Allocate the control data structures, and create and load the
   5472 	 * DMA map for it.
   5473 	 *
   5474 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5475 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5476 	 * both sets within the same 4G segment.
   5477 	 */
   5478 	if (sc->sc_type < WM_T_82544)
   5479 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5480 	else
   5481 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5482 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5483 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5484 	else
   5485 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5486 
   5487 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5488 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5489 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5490 		aprint_error_dev(sc->sc_dev,
   5491 		    "unable to allocate TX control data, error = %d\n",
   5492 		    error);
   5493 		goto fail_0;
   5494 	}
   5495 
   5496 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5497 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5498 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5499 		aprint_error_dev(sc->sc_dev,
   5500 		    "unable to map TX control data, error = %d\n", error);
   5501 		goto fail_1;
   5502 	}
   5503 
   5504 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5505 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5506 		aprint_error_dev(sc->sc_dev,
   5507 		    "unable to create TX control data DMA map, error = %d\n",
   5508 		    error);
   5509 		goto fail_2;
   5510 	}
   5511 
   5512 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5513 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5514 		aprint_error_dev(sc->sc_dev,
   5515 		    "unable to load TX control data DMA map, error = %d\n",
   5516 		    error);
   5517 		goto fail_3;
   5518 	}
   5519 
   5520 	return 0;
   5521 
   5522  fail_3:
   5523 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5524  fail_2:
   5525 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5526 	    WM_TXDESCS_SIZE(txq));
   5527  fail_1:
   5528 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5529  fail_0:
   5530 	return error;
   5531 }
   5532 
   5533 static void
   5534 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5535 {
   5536 
   5537 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5538 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5539 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5540 	    WM_TXDESCS_SIZE(txq));
   5541 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5542 }
   5543 
   5544 static int
   5545 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5546 {
   5547 	int error;
   5548 
   5549 	/*
   5550 	 * Allocate the control data structures, and create and load the
   5551 	 * DMA map for it.
   5552 	 *
   5553 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5554 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5555 	 * both sets within the same 4G segment.
   5556 	 */
   5557 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5558 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
   5559 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5560 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5561 		aprint_error_dev(sc->sc_dev,
   5562 		    "unable to allocate RX control data, error = %d\n",
   5563 		    error);
   5564 		goto fail_0;
   5565 	}
   5566 
   5567 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5568 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5569 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5570 		aprint_error_dev(sc->sc_dev,
   5571 		    "unable to map RX control data, error = %d\n", error);
   5572 		goto fail_1;
   5573 	}
   5574 
   5575 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5576 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5577 		aprint_error_dev(sc->sc_dev,
   5578 		    "unable to create RX control data DMA map, error = %d\n",
   5579 		    error);
   5580 		goto fail_2;
   5581 	}
   5582 
   5583 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5584 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5585 		aprint_error_dev(sc->sc_dev,
   5586 		    "unable to load RX control data DMA map, error = %d\n",
   5587 		    error);
   5588 		goto fail_3;
   5589 	}
   5590 
   5591 	return 0;
   5592 
   5593  fail_3:
   5594 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5595  fail_2:
   5596 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5597 	    rxq->rxq_desc_size);
   5598  fail_1:
   5599 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5600  fail_0:
   5601 	return error;
   5602 }
   5603 
   5604 static void
   5605 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5606 {
   5607 
   5608 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5609 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5610 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5611 	    rxq->rxq_desc_size);
   5612 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5613 }
   5614 
   5615 
   5616 static int
   5617 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5618 {
   5619 	int i, error;
   5620 
   5621 	/* Create the transmit buffer DMA maps. */
   5622 	WM_TXQUEUELEN(txq) =
   5623 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5624 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5625 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5626 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5627 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5628 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5629 			aprint_error_dev(sc->sc_dev,
   5630 			    "unable to create Tx DMA map %d, error = %d\n",
   5631 			    i, error);
   5632 			goto fail;
   5633 		}
   5634 	}
   5635 
   5636 	return 0;
   5637 
   5638  fail:
   5639 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5640 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5641 			bus_dmamap_destroy(sc->sc_dmat,
   5642 			    txq->txq_soft[i].txs_dmamap);
   5643 	}
   5644 	return error;
   5645 }
   5646 
   5647 static void
   5648 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5649 {
   5650 	int i;
   5651 
   5652 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5653 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5654 			bus_dmamap_destroy(sc->sc_dmat,
   5655 			    txq->txq_soft[i].txs_dmamap);
   5656 	}
   5657 }
   5658 
   5659 static int
   5660 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5661 {
   5662 	int i, error;
   5663 
   5664 	/* Create the receive buffer DMA maps. */
   5665 	for (i = 0; i < WM_NRXDESC; i++) {
   5666 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5667 			    MCLBYTES, 0, 0,
   5668 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5669 			aprint_error_dev(sc->sc_dev,
   5670 			    "unable to create Rx DMA map %d error = %d\n",
   5671 			    i, error);
   5672 			goto fail;
   5673 		}
   5674 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5675 	}
   5676 
   5677 	return 0;
   5678 
   5679  fail:
   5680 	for (i = 0; i < WM_NRXDESC; i++) {
   5681 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5682 			bus_dmamap_destroy(sc->sc_dmat,
   5683 			    rxq->rxq_soft[i].rxs_dmamap);
   5684 	}
   5685 	return error;
   5686 }
   5687 
   5688 static void
   5689 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5690 {
   5691 	int i;
   5692 
   5693 	for (i = 0; i < WM_NRXDESC; i++) {
   5694 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5695 			bus_dmamap_destroy(sc->sc_dmat,
   5696 			    rxq->rxq_soft[i].rxs_dmamap);
   5697 	}
   5698 }
   5699 
   5700 /*
   5701  * wm_alloc_quques:
   5702  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5703  */
   5704 static int
   5705 wm_alloc_txrx_queues(struct wm_softc *sc)
   5706 {
   5707 	int i, error, tx_done, rx_done;
   5708 
   5709 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5710 	    KM_SLEEP);
   5711 	if (sc->sc_queue == NULL) {
   5712 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5713 		error = ENOMEM;
   5714 		goto fail_0;
   5715 	}
   5716 
   5717 	/*
   5718 	 * For transmission
   5719 	 */
   5720 	error = 0;
   5721 	tx_done = 0;
   5722 	for (i = 0; i < sc->sc_nqueues; i++) {
   5723 #ifdef WM_EVENT_COUNTERS
   5724 		int j;
   5725 		const char *xname;
   5726 #endif
   5727 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5728 		txq->txq_sc = sc;
   5729 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5730 
   5731 		error = wm_alloc_tx_descs(sc, txq);
   5732 		if (error)
   5733 			break;
   5734 		error = wm_alloc_tx_buffer(sc, txq);
   5735 		if (error) {
   5736 			wm_free_tx_descs(sc, txq);
   5737 			break;
   5738 		}
   5739 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5740 		if (txq->txq_interq == NULL) {
   5741 			wm_free_tx_descs(sc, txq);
   5742 			wm_free_tx_buffer(sc, txq);
   5743 			error = ENOMEM;
   5744 			break;
   5745 		}
   5746 
   5747 #ifdef WM_EVENT_COUNTERS
   5748 		xname = device_xname(sc->sc_dev);
   5749 
   5750 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5751 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5752 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5753 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5754 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5755 
   5756 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5757 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5758 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5759 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5760 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5761 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5762 
   5763 		for (j = 0; j < WM_NTXSEGS; j++) {
   5764 			snprintf(txq->txq_txseg_evcnt_names[j],
   5765 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   5766 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   5767 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   5768 		}
   5769 
   5770 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   5771 
   5772 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   5773 #endif /* WM_EVENT_COUNTERS */
   5774 
   5775 		tx_done++;
   5776 	}
   5777 	if (error)
   5778 		goto fail_1;
   5779 
   5780 	/*
   5781 	 * For recieve
   5782 	 */
   5783 	error = 0;
   5784 	rx_done = 0;
   5785 	for (i = 0; i < sc->sc_nqueues; i++) {
   5786 #ifdef WM_EVENT_COUNTERS
   5787 		const char *xname;
   5788 #endif
   5789 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5790 		rxq->rxq_sc = sc;
   5791 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5792 
   5793 		error = wm_alloc_rx_descs(sc, rxq);
   5794 		if (error)
   5795 			break;
   5796 
   5797 		error = wm_alloc_rx_buffer(sc, rxq);
   5798 		if (error) {
   5799 			wm_free_rx_descs(sc, rxq);
   5800 			break;
   5801 		}
   5802 
   5803 #ifdef WM_EVENT_COUNTERS
   5804 		xname = device_xname(sc->sc_dev);
   5805 
   5806 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   5807 
   5808 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   5809 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   5810 #endif /* WM_EVENT_COUNTERS */
   5811 
   5812 		rx_done++;
   5813 	}
   5814 	if (error)
   5815 		goto fail_2;
   5816 
   5817 	return 0;
   5818 
   5819  fail_2:
   5820 	for (i = 0; i < rx_done; i++) {
   5821 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5822 		wm_free_rx_buffer(sc, rxq);
   5823 		wm_free_rx_descs(sc, rxq);
   5824 		if (rxq->rxq_lock)
   5825 			mutex_obj_free(rxq->rxq_lock);
   5826 	}
   5827  fail_1:
   5828 	for (i = 0; i < tx_done; i++) {
   5829 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5830 		pcq_destroy(txq->txq_interq);
   5831 		wm_free_tx_buffer(sc, txq);
   5832 		wm_free_tx_descs(sc, txq);
   5833 		if (txq->txq_lock)
   5834 			mutex_obj_free(txq->txq_lock);
   5835 	}
   5836 
   5837 	kmem_free(sc->sc_queue,
   5838 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   5839  fail_0:
   5840 	return error;
   5841 }
   5842 
   5843 /*
   5844  * wm_free_quques:
   5845  *	Free {tx,rx}descs and {tx,rx} buffers
   5846  */
   5847 static void
   5848 wm_free_txrx_queues(struct wm_softc *sc)
   5849 {
   5850 	int i;
   5851 
   5852 	for (i = 0; i < sc->sc_nqueues; i++) {
   5853 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5854 		wm_free_rx_buffer(sc, rxq);
   5855 		wm_free_rx_descs(sc, rxq);
   5856 		if (rxq->rxq_lock)
   5857 			mutex_obj_free(rxq->rxq_lock);
   5858 	}
   5859 
   5860 	for (i = 0; i < sc->sc_nqueues; i++) {
   5861 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5862 		wm_free_tx_buffer(sc, txq);
   5863 		wm_free_tx_descs(sc, txq);
   5864 		if (txq->txq_lock)
   5865 			mutex_obj_free(txq->txq_lock);
   5866 	}
   5867 
   5868 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   5869 }
   5870 
   5871 static void
   5872 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5873 {
   5874 
   5875 	KASSERT(mutex_owned(txq->txq_lock));
   5876 
   5877 	/* Initialize the transmit descriptor ring. */
   5878 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5879 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5880 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5881 	txq->txq_free = WM_NTXDESC(txq);
   5882 	txq->txq_next = 0;
   5883 }
   5884 
   5885 static void
   5886 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5887     struct wm_txqueue *txq)
   5888 {
   5889 
   5890 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5891 		device_xname(sc->sc_dev), __func__));
   5892 	KASSERT(mutex_owned(txq->txq_lock));
   5893 
   5894 	if (sc->sc_type < WM_T_82543) {
   5895 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5896 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5897 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5898 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5899 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5900 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5901 	} else {
   5902 		int qid = wmq->wmq_id;
   5903 
   5904 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5905 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5906 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5907 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5908 
   5909 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5910 			/*
   5911 			 * Don't write TDT before TCTL.EN is set.
   5912 			 * See the document.
   5913 			 */
   5914 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5915 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5916 			    | TXDCTL_WTHRESH(0));
   5917 		else {
   5918 			/* ITR / 4 */
   5919 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5920 			if (sc->sc_type >= WM_T_82540) {
   5921 				/* should be same */
   5922 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5923 			}
   5924 
   5925 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5926 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5927 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5928 		}
   5929 	}
   5930 }
   5931 
   5932 static void
   5933 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5934 {
   5935 	int i;
   5936 
   5937 	KASSERT(mutex_owned(txq->txq_lock));
   5938 
   5939 	/* Initialize the transmit job descriptors. */
   5940 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5941 		txq->txq_soft[i].txs_mbuf = NULL;
   5942 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5943 	txq->txq_snext = 0;
   5944 	txq->txq_sdirty = 0;
   5945 }
   5946 
   5947 static void
   5948 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5949     struct wm_txqueue *txq)
   5950 {
   5951 
   5952 	KASSERT(mutex_owned(txq->txq_lock));
   5953 
   5954 	/*
   5955 	 * Set up some register offsets that are different between
   5956 	 * the i82542 and the i82543 and later chips.
   5957 	 */
   5958 	if (sc->sc_type < WM_T_82543)
   5959 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5960 	else
   5961 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   5962 
   5963 	wm_init_tx_descs(sc, txq);
   5964 	wm_init_tx_regs(sc, wmq, txq);
   5965 	wm_init_tx_buffer(sc, txq);
   5966 }
   5967 
   5968 static void
   5969 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5970     struct wm_rxqueue *rxq)
   5971 {
   5972 
   5973 	KASSERT(mutex_owned(rxq->rxq_lock));
   5974 
   5975 	/*
   5976 	 * Initialize the receive descriptor and receive job
   5977 	 * descriptor rings.
   5978 	 */
   5979 	if (sc->sc_type < WM_T_82543) {
   5980 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5981 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5982 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5983 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5984 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5985 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5986 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5987 
   5988 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5989 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5990 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5991 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5992 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5993 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5994 	} else {
   5995 		int qid = wmq->wmq_id;
   5996 
   5997 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5998 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5999 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   6000 
   6001 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6002 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6003 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   6004 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   6005 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6006 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6007 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6008 			    | RXDCTL_WTHRESH(1));
   6009 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6010 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6011 		} else {
   6012 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6013 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6014 			/* ITR / 4 */
   6015 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   6016 			/* MUST be same */
   6017 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   6018 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6019 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6020 		}
   6021 	}
   6022 }
   6023 
   6024 static int
   6025 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6026 {
   6027 	struct wm_rxsoft *rxs;
   6028 	int error, i;
   6029 
   6030 	KASSERT(mutex_owned(rxq->rxq_lock));
   6031 
   6032 	for (i = 0; i < WM_NRXDESC; i++) {
   6033 		rxs = &rxq->rxq_soft[i];
   6034 		if (rxs->rxs_mbuf == NULL) {
   6035 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6036 				log(LOG_ERR, "%s: unable to allocate or map "
   6037 				    "rx buffer %d, error = %d\n",
   6038 				    device_xname(sc->sc_dev), i, error);
   6039 				/*
   6040 				 * XXX Should attempt to run with fewer receive
   6041 				 * XXX buffers instead of just failing.
   6042 				 */
   6043 				wm_rxdrain(rxq);
   6044 				return ENOMEM;
   6045 			}
   6046 		} else {
   6047 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6048 				wm_init_rxdesc(rxq, i);
   6049 			/*
   6050 			 * For 82575 and newer device, the RX descriptors
   6051 			 * must be initialized after the setting of RCTL.EN in
   6052 			 * wm_set_filter()
   6053 			 */
   6054 		}
   6055 	}
   6056 	rxq->rxq_ptr = 0;
   6057 	rxq->rxq_discard = 0;
   6058 	WM_RXCHAIN_RESET(rxq);
   6059 
   6060 	return 0;
   6061 }
   6062 
   6063 static int
   6064 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6065     struct wm_rxqueue *rxq)
   6066 {
   6067 
   6068 	KASSERT(mutex_owned(rxq->rxq_lock));
   6069 
   6070 	/*
   6071 	 * Set up some register offsets that are different between
   6072 	 * the i82542 and the i82543 and later chips.
   6073 	 */
   6074 	if (sc->sc_type < WM_T_82543)
   6075 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6076 	else
   6077 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6078 
   6079 	wm_init_rx_regs(sc, wmq, rxq);
   6080 	return wm_init_rx_buffer(sc, rxq);
   6081 }
   6082 
   6083 /*
   6084  * wm_init_quques:
   6085  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6086  */
   6087 static int
   6088 wm_init_txrx_queues(struct wm_softc *sc)
   6089 {
   6090 	int i, error = 0;
   6091 
   6092 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6093 		device_xname(sc->sc_dev), __func__));
   6094 
   6095 	for (i = 0; i < sc->sc_nqueues; i++) {
   6096 		struct wm_queue *wmq = &sc->sc_queue[i];
   6097 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6098 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6099 
   6100 		mutex_enter(txq->txq_lock);
   6101 		wm_init_tx_queue(sc, wmq, txq);
   6102 		mutex_exit(txq->txq_lock);
   6103 
   6104 		mutex_enter(rxq->rxq_lock);
   6105 		error = wm_init_rx_queue(sc, wmq, rxq);
   6106 		mutex_exit(rxq->rxq_lock);
   6107 		if (error)
   6108 			break;
   6109 	}
   6110 
   6111 	return error;
   6112 }
   6113 
   6114 /*
   6115  * wm_tx_offload:
   6116  *
   6117  *	Set up TCP/IP checksumming parameters for the
   6118  *	specified packet.
   6119  */
   6120 static int
   6121 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   6122     uint8_t *fieldsp)
   6123 {
   6124 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6125 	struct mbuf *m0 = txs->txs_mbuf;
   6126 	struct livengood_tcpip_ctxdesc *t;
   6127 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6128 	uint32_t ipcse;
   6129 	struct ether_header *eh;
   6130 	int offset, iphl;
   6131 	uint8_t fields;
   6132 
   6133 	/*
   6134 	 * XXX It would be nice if the mbuf pkthdr had offset
   6135 	 * fields for the protocol headers.
   6136 	 */
   6137 
   6138 	eh = mtod(m0, struct ether_header *);
   6139 	switch (htons(eh->ether_type)) {
   6140 	case ETHERTYPE_IP:
   6141 	case ETHERTYPE_IPV6:
   6142 		offset = ETHER_HDR_LEN;
   6143 		break;
   6144 
   6145 	case ETHERTYPE_VLAN:
   6146 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6147 		break;
   6148 
   6149 	default:
   6150 		/*
   6151 		 * Don't support this protocol or encapsulation.
   6152 		 */
   6153 		*fieldsp = 0;
   6154 		*cmdp = 0;
   6155 		return 0;
   6156 	}
   6157 
   6158 	if ((m0->m_pkthdr.csum_flags &
   6159 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   6160 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6161 	} else {
   6162 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6163 	}
   6164 	ipcse = offset + iphl - 1;
   6165 
   6166 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6167 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6168 	seg = 0;
   6169 	fields = 0;
   6170 
   6171 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6172 		int hlen = offset + iphl;
   6173 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6174 
   6175 		if (__predict_false(m0->m_len <
   6176 				    (hlen + sizeof(struct tcphdr)))) {
   6177 			/*
   6178 			 * TCP/IP headers are not in the first mbuf; we need
   6179 			 * to do this the slow and painful way.  Let's just
   6180 			 * hope this doesn't happen very often.
   6181 			 */
   6182 			struct tcphdr th;
   6183 
   6184 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6185 
   6186 			m_copydata(m0, hlen, sizeof(th), &th);
   6187 			if (v4) {
   6188 				struct ip ip;
   6189 
   6190 				m_copydata(m0, offset, sizeof(ip), &ip);
   6191 				ip.ip_len = 0;
   6192 				m_copyback(m0,
   6193 				    offset + offsetof(struct ip, ip_len),
   6194 				    sizeof(ip.ip_len), &ip.ip_len);
   6195 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6196 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6197 			} else {
   6198 				struct ip6_hdr ip6;
   6199 
   6200 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6201 				ip6.ip6_plen = 0;
   6202 				m_copyback(m0,
   6203 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6204 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6205 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6206 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6207 			}
   6208 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6209 			    sizeof(th.th_sum), &th.th_sum);
   6210 
   6211 			hlen += th.th_off << 2;
   6212 		} else {
   6213 			/*
   6214 			 * TCP/IP headers are in the first mbuf; we can do
   6215 			 * this the easy way.
   6216 			 */
   6217 			struct tcphdr *th;
   6218 
   6219 			if (v4) {
   6220 				struct ip *ip =
   6221 				    (void *)(mtod(m0, char *) + offset);
   6222 				th = (void *)(mtod(m0, char *) + hlen);
   6223 
   6224 				ip->ip_len = 0;
   6225 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6226 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6227 			} else {
   6228 				struct ip6_hdr *ip6 =
   6229 				    (void *)(mtod(m0, char *) + offset);
   6230 				th = (void *)(mtod(m0, char *) + hlen);
   6231 
   6232 				ip6->ip6_plen = 0;
   6233 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6234 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6235 			}
   6236 			hlen += th->th_off << 2;
   6237 		}
   6238 
   6239 		if (v4) {
   6240 			WM_Q_EVCNT_INCR(txq, txtso);
   6241 			cmdlen |= WTX_TCPIP_CMD_IP;
   6242 		} else {
   6243 			WM_Q_EVCNT_INCR(txq, txtso6);
   6244 			ipcse = 0;
   6245 		}
   6246 		cmd |= WTX_TCPIP_CMD_TSE;
   6247 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6248 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6249 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6250 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6251 	}
   6252 
   6253 	/*
   6254 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6255 	 * offload feature, if we load the context descriptor, we
   6256 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6257 	 */
   6258 
   6259 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6260 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6261 	    WTX_TCPIP_IPCSE(ipcse);
   6262 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6263 		WM_Q_EVCNT_INCR(txq, txipsum);
   6264 		fields |= WTX_IXSM;
   6265 	}
   6266 
   6267 	offset += iphl;
   6268 
   6269 	if (m0->m_pkthdr.csum_flags &
   6270 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6271 		WM_Q_EVCNT_INCR(txq, txtusum);
   6272 		fields |= WTX_TXSM;
   6273 		tucs = WTX_TCPIP_TUCSS(offset) |
   6274 		    WTX_TCPIP_TUCSO(offset +
   6275 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6276 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6277 	} else if ((m0->m_pkthdr.csum_flags &
   6278 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6279 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6280 		fields |= WTX_TXSM;
   6281 		tucs = WTX_TCPIP_TUCSS(offset) |
   6282 		    WTX_TCPIP_TUCSO(offset +
   6283 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6284 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6285 	} else {
   6286 		/* Just initialize it to a valid TCP context. */
   6287 		tucs = WTX_TCPIP_TUCSS(offset) |
   6288 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6289 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6290 	}
   6291 
   6292 	/* Fill in the context descriptor. */
   6293 	t = (struct livengood_tcpip_ctxdesc *)
   6294 	    &txq->txq_descs[txq->txq_next];
   6295 	t->tcpip_ipcs = htole32(ipcs);
   6296 	t->tcpip_tucs = htole32(tucs);
   6297 	t->tcpip_cmdlen = htole32(cmdlen);
   6298 	t->tcpip_seg = htole32(seg);
   6299 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6300 
   6301 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6302 	txs->txs_ndesc++;
   6303 
   6304 	*cmdp = cmd;
   6305 	*fieldsp = fields;
   6306 
   6307 	return 0;
   6308 }
   6309 
   6310 static inline int
   6311 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6312 {
   6313 	struct wm_softc *sc = ifp->if_softc;
   6314 	u_int cpuid = cpu_index(curcpu());
   6315 
   6316 	/*
   6317 	 * Currently, simple distribute strategy.
   6318 	 * TODO:
   6319 	 * destribute by flowid(RSS has value).
   6320 	 */
   6321 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
   6322 }
   6323 
   6324 /*
   6325  * wm_start:		[ifnet interface function]
   6326  *
   6327  *	Start packet transmission on the interface.
   6328  */
   6329 static void
   6330 wm_start(struct ifnet *ifp)
   6331 {
   6332 	struct wm_softc *sc = ifp->if_softc;
   6333 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6334 
   6335 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6336 
   6337 	mutex_enter(txq->txq_lock);
   6338 	if (!txq->txq_stopping)
   6339 		wm_start_locked(ifp);
   6340 	mutex_exit(txq->txq_lock);
   6341 }
   6342 
   6343 static void
   6344 wm_start_locked(struct ifnet *ifp)
   6345 {
   6346 	struct wm_softc *sc = ifp->if_softc;
   6347 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6348 
   6349 	wm_send_common_locked(ifp, txq, false);
   6350 }
   6351 
   6352 static int
   6353 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6354 {
   6355 	int qid;
   6356 	struct wm_softc *sc = ifp->if_softc;
   6357 	struct wm_txqueue *txq;
   6358 
   6359 	qid = wm_select_txqueue(ifp, m);
   6360 	txq = &sc->sc_queue[qid].wmq_txq;
   6361 
   6362 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6363 		m_freem(m);
   6364 		WM_Q_EVCNT_INCR(txq, txdrop);
   6365 		return ENOBUFS;
   6366 	}
   6367 
   6368 	if (mutex_tryenter(txq->txq_lock)) {
   6369 		/* XXXX should be per TX queue */
   6370 		ifp->if_obytes += m->m_pkthdr.len;
   6371 		if (m->m_flags & M_MCAST)
   6372 			ifp->if_omcasts++;
   6373 
   6374 		if (!txq->txq_stopping)
   6375 			wm_transmit_locked(ifp, txq);
   6376 		mutex_exit(txq->txq_lock);
   6377 	}
   6378 
   6379 	return 0;
   6380 }
   6381 
   6382 static void
   6383 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6384 {
   6385 
   6386 	wm_send_common_locked(ifp, txq, true);
   6387 }
   6388 
   6389 static void
   6390 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6391     bool is_transmit)
   6392 {
   6393 	struct wm_softc *sc = ifp->if_softc;
   6394 	struct mbuf *m0;
   6395 	struct m_tag *mtag;
   6396 	struct wm_txsoft *txs;
   6397 	bus_dmamap_t dmamap;
   6398 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6399 	bus_addr_t curaddr;
   6400 	bus_size_t seglen, curlen;
   6401 	uint32_t cksumcmd;
   6402 	uint8_t cksumfields;
   6403 
   6404 	KASSERT(mutex_owned(txq->txq_lock));
   6405 
   6406 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6407 		return;
   6408 
   6409 	/* Remember the previous number of free descriptors. */
   6410 	ofree = txq->txq_free;
   6411 
   6412 	/*
   6413 	 * Loop through the send queue, setting up transmit descriptors
   6414 	 * until we drain the queue, or use up all available transmit
   6415 	 * descriptors.
   6416 	 */
   6417 	for (;;) {
   6418 		m0 = NULL;
   6419 
   6420 		/* Get a work queue entry. */
   6421 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6422 			wm_txeof(sc, txq);
   6423 			if (txq->txq_sfree == 0) {
   6424 				DPRINTF(WM_DEBUG_TX,
   6425 				    ("%s: TX: no free job descriptors\n",
   6426 					device_xname(sc->sc_dev)));
   6427 				WM_Q_EVCNT_INCR(txq, txsstall);
   6428 				break;
   6429 			}
   6430 		}
   6431 
   6432 		/* Grab a packet off the queue. */
   6433 		if (is_transmit)
   6434 			m0 = pcq_get(txq->txq_interq);
   6435 		else
   6436 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6437 		if (m0 == NULL)
   6438 			break;
   6439 
   6440 		DPRINTF(WM_DEBUG_TX,
   6441 		    ("%s: TX: have packet to transmit: %p\n",
   6442 		    device_xname(sc->sc_dev), m0));
   6443 
   6444 		txs = &txq->txq_soft[txq->txq_snext];
   6445 		dmamap = txs->txs_dmamap;
   6446 
   6447 		use_tso = (m0->m_pkthdr.csum_flags &
   6448 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6449 
   6450 		/*
   6451 		 * So says the Linux driver:
   6452 		 * The controller does a simple calculation to make sure
   6453 		 * there is enough room in the FIFO before initiating the
   6454 		 * DMA for each buffer.  The calc is:
   6455 		 *	4 = ceil(buffer len / MSS)
   6456 		 * To make sure we don't overrun the FIFO, adjust the max
   6457 		 * buffer len if the MSS drops.
   6458 		 */
   6459 		dmamap->dm_maxsegsz =
   6460 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6461 		    ? m0->m_pkthdr.segsz << 2
   6462 		    : WTX_MAX_LEN;
   6463 
   6464 		/*
   6465 		 * Load the DMA map.  If this fails, the packet either
   6466 		 * didn't fit in the allotted number of segments, or we
   6467 		 * were short on resources.  For the too-many-segments
   6468 		 * case, we simply report an error and drop the packet,
   6469 		 * since we can't sanely copy a jumbo packet to a single
   6470 		 * buffer.
   6471 		 */
   6472 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6473 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6474 		if (error) {
   6475 			if (error == EFBIG) {
   6476 				WM_Q_EVCNT_INCR(txq, txdrop);
   6477 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6478 				    "DMA segments, dropping...\n",
   6479 				    device_xname(sc->sc_dev));
   6480 				wm_dump_mbuf_chain(sc, m0);
   6481 				m_freem(m0);
   6482 				continue;
   6483 			}
   6484 			/*  Short on resources, just stop for now. */
   6485 			DPRINTF(WM_DEBUG_TX,
   6486 			    ("%s: TX: dmamap load failed: %d\n",
   6487 			    device_xname(sc->sc_dev), error));
   6488 			break;
   6489 		}
   6490 
   6491 		segs_needed = dmamap->dm_nsegs;
   6492 		if (use_tso) {
   6493 			/* For sentinel descriptor; see below. */
   6494 			segs_needed++;
   6495 		}
   6496 
   6497 		/*
   6498 		 * Ensure we have enough descriptors free to describe
   6499 		 * the packet.  Note, we always reserve one descriptor
   6500 		 * at the end of the ring due to the semantics of the
   6501 		 * TDT register, plus one more in the event we need
   6502 		 * to load offload context.
   6503 		 */
   6504 		if (segs_needed > txq->txq_free - 2) {
   6505 			/*
   6506 			 * Not enough free descriptors to transmit this
   6507 			 * packet.  We haven't committed anything yet,
   6508 			 * so just unload the DMA map, put the packet
   6509 			 * pack on the queue, and punt.  Notify the upper
   6510 			 * layer that there are no more slots left.
   6511 			 */
   6512 			DPRINTF(WM_DEBUG_TX,
   6513 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6514 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6515 			    segs_needed, txq->txq_free - 1));
   6516 			ifp->if_flags |= IFF_OACTIVE;
   6517 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6518 			WM_Q_EVCNT_INCR(txq, txdstall);
   6519 			break;
   6520 		}
   6521 
   6522 		/*
   6523 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6524 		 * once we know we can transmit the packet, since we
   6525 		 * do some internal FIFO space accounting here.
   6526 		 */
   6527 		if (sc->sc_type == WM_T_82547 &&
   6528 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6529 			DPRINTF(WM_DEBUG_TX,
   6530 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6531 			    device_xname(sc->sc_dev)));
   6532 			ifp->if_flags |= IFF_OACTIVE;
   6533 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6534 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6535 			break;
   6536 		}
   6537 
   6538 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6539 
   6540 		DPRINTF(WM_DEBUG_TX,
   6541 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6542 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6543 
   6544 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6545 
   6546 		/*
   6547 		 * Store a pointer to the packet so that we can free it
   6548 		 * later.
   6549 		 *
   6550 		 * Initially, we consider the number of descriptors the
   6551 		 * packet uses the number of DMA segments.  This may be
   6552 		 * incremented by 1 if we do checksum offload (a descriptor
   6553 		 * is used to set the checksum context).
   6554 		 */
   6555 		txs->txs_mbuf = m0;
   6556 		txs->txs_firstdesc = txq->txq_next;
   6557 		txs->txs_ndesc = segs_needed;
   6558 
   6559 		/* Set up offload parameters for this packet. */
   6560 		if (m0->m_pkthdr.csum_flags &
   6561 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6562 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6563 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6564 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6565 					  &cksumfields) != 0) {
   6566 				/* Error message already displayed. */
   6567 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6568 				continue;
   6569 			}
   6570 		} else {
   6571 			cksumcmd = 0;
   6572 			cksumfields = 0;
   6573 		}
   6574 
   6575 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6576 
   6577 		/* Sync the DMA map. */
   6578 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6579 		    BUS_DMASYNC_PREWRITE);
   6580 
   6581 		/* Initialize the transmit descriptor. */
   6582 		for (nexttx = txq->txq_next, seg = 0;
   6583 		     seg < dmamap->dm_nsegs; seg++) {
   6584 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6585 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6586 			     seglen != 0;
   6587 			     curaddr += curlen, seglen -= curlen,
   6588 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6589 				curlen = seglen;
   6590 
   6591 				/*
   6592 				 * So says the Linux driver:
   6593 				 * Work around for premature descriptor
   6594 				 * write-backs in TSO mode.  Append a
   6595 				 * 4-byte sentinel descriptor.
   6596 				 */
   6597 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6598 				    curlen > 8)
   6599 					curlen -= 4;
   6600 
   6601 				wm_set_dma_addr(
   6602 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6603 				txq->txq_descs[nexttx].wtx_cmdlen
   6604 				    = htole32(cksumcmd | curlen);
   6605 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6606 				    = 0;
   6607 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6608 				    = cksumfields;
   6609 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6610 				lasttx = nexttx;
   6611 
   6612 				DPRINTF(WM_DEBUG_TX,
   6613 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6614 				     "len %#04zx\n",
   6615 				    device_xname(sc->sc_dev), nexttx,
   6616 				    (uint64_t)curaddr, curlen));
   6617 			}
   6618 		}
   6619 
   6620 		KASSERT(lasttx != -1);
   6621 
   6622 		/*
   6623 		 * Set up the command byte on the last descriptor of
   6624 		 * the packet.  If we're in the interrupt delay window,
   6625 		 * delay the interrupt.
   6626 		 */
   6627 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6628 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6629 
   6630 		/*
   6631 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6632 		 * up the descriptor to encapsulate the packet for us.
   6633 		 *
   6634 		 * This is only valid on the last descriptor of the packet.
   6635 		 */
   6636 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6637 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6638 			    htole32(WTX_CMD_VLE);
   6639 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6640 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6641 		}
   6642 
   6643 		txs->txs_lastdesc = lasttx;
   6644 
   6645 		DPRINTF(WM_DEBUG_TX,
   6646 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6647 		    device_xname(sc->sc_dev),
   6648 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6649 
   6650 		/* Sync the descriptors we're using. */
   6651 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6652 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6653 
   6654 		/* Give the packet to the chip. */
   6655 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6656 
   6657 		DPRINTF(WM_DEBUG_TX,
   6658 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6659 
   6660 		DPRINTF(WM_DEBUG_TX,
   6661 		    ("%s: TX: finished transmitting packet, job %d\n",
   6662 		    device_xname(sc->sc_dev), txq->txq_snext));
   6663 
   6664 		/* Advance the tx pointer. */
   6665 		txq->txq_free -= txs->txs_ndesc;
   6666 		txq->txq_next = nexttx;
   6667 
   6668 		txq->txq_sfree--;
   6669 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6670 
   6671 		/* Pass the packet to any BPF listeners. */
   6672 		bpf_mtap(ifp, m0);
   6673 	}
   6674 
   6675 	if (m0 != NULL) {
   6676 		ifp->if_flags |= IFF_OACTIVE;
   6677 		WM_Q_EVCNT_INCR(txq, txdrop);
   6678 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6679 			__func__));
   6680 		m_freem(m0);
   6681 	}
   6682 
   6683 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6684 		/* No more slots; notify upper layer. */
   6685 		ifp->if_flags |= IFF_OACTIVE;
   6686 	}
   6687 
   6688 	if (txq->txq_free != ofree) {
   6689 		/* Set a watchdog timer in case the chip flakes out. */
   6690 		ifp->if_timer = 5;
   6691 	}
   6692 }
   6693 
   6694 /*
   6695  * wm_nq_tx_offload:
   6696  *
   6697  *	Set up TCP/IP checksumming parameters for the
   6698  *	specified packet, for NEWQUEUE devices
   6699  */
   6700 static int
   6701 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6702     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6703 {
   6704 	struct mbuf *m0 = txs->txs_mbuf;
   6705 	struct m_tag *mtag;
   6706 	uint32_t vl_len, mssidx, cmdc;
   6707 	struct ether_header *eh;
   6708 	int offset, iphl;
   6709 
   6710 	/*
   6711 	 * XXX It would be nice if the mbuf pkthdr had offset
   6712 	 * fields for the protocol headers.
   6713 	 */
   6714 	*cmdlenp = 0;
   6715 	*fieldsp = 0;
   6716 
   6717 	eh = mtod(m0, struct ether_header *);
   6718 	switch (htons(eh->ether_type)) {
   6719 	case ETHERTYPE_IP:
   6720 	case ETHERTYPE_IPV6:
   6721 		offset = ETHER_HDR_LEN;
   6722 		break;
   6723 
   6724 	case ETHERTYPE_VLAN:
   6725 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6726 		break;
   6727 
   6728 	default:
   6729 		/* Don't support this protocol or encapsulation. */
   6730 		*do_csum = false;
   6731 		return 0;
   6732 	}
   6733 	*do_csum = true;
   6734 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6735 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6736 
   6737 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6738 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6739 
   6740 	if ((m0->m_pkthdr.csum_flags &
   6741 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6742 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6743 	} else {
   6744 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6745 	}
   6746 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6747 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6748 
   6749 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6750 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6751 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6752 		*cmdlenp |= NQTX_CMD_VLE;
   6753 	}
   6754 
   6755 	mssidx = 0;
   6756 
   6757 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6758 		int hlen = offset + iphl;
   6759 		int tcp_hlen;
   6760 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6761 
   6762 		if (__predict_false(m0->m_len <
   6763 				    (hlen + sizeof(struct tcphdr)))) {
   6764 			/*
   6765 			 * TCP/IP headers are not in the first mbuf; we need
   6766 			 * to do this the slow and painful way.  Let's just
   6767 			 * hope this doesn't happen very often.
   6768 			 */
   6769 			struct tcphdr th;
   6770 
   6771 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6772 
   6773 			m_copydata(m0, hlen, sizeof(th), &th);
   6774 			if (v4) {
   6775 				struct ip ip;
   6776 
   6777 				m_copydata(m0, offset, sizeof(ip), &ip);
   6778 				ip.ip_len = 0;
   6779 				m_copyback(m0,
   6780 				    offset + offsetof(struct ip, ip_len),
   6781 				    sizeof(ip.ip_len), &ip.ip_len);
   6782 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6783 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6784 			} else {
   6785 				struct ip6_hdr ip6;
   6786 
   6787 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6788 				ip6.ip6_plen = 0;
   6789 				m_copyback(m0,
   6790 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6791 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6792 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6793 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6794 			}
   6795 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6796 			    sizeof(th.th_sum), &th.th_sum);
   6797 
   6798 			tcp_hlen = th.th_off << 2;
   6799 		} else {
   6800 			/*
   6801 			 * TCP/IP headers are in the first mbuf; we can do
   6802 			 * this the easy way.
   6803 			 */
   6804 			struct tcphdr *th;
   6805 
   6806 			if (v4) {
   6807 				struct ip *ip =
   6808 				    (void *)(mtod(m0, char *) + offset);
   6809 				th = (void *)(mtod(m0, char *) + hlen);
   6810 
   6811 				ip->ip_len = 0;
   6812 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6813 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6814 			} else {
   6815 				struct ip6_hdr *ip6 =
   6816 				    (void *)(mtod(m0, char *) + offset);
   6817 				th = (void *)(mtod(m0, char *) + hlen);
   6818 
   6819 				ip6->ip6_plen = 0;
   6820 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6821 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6822 			}
   6823 			tcp_hlen = th->th_off << 2;
   6824 		}
   6825 		hlen += tcp_hlen;
   6826 		*cmdlenp |= NQTX_CMD_TSE;
   6827 
   6828 		if (v4) {
   6829 			WM_Q_EVCNT_INCR(txq, txtso);
   6830 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6831 		} else {
   6832 			WM_Q_EVCNT_INCR(txq, txtso6);
   6833 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6834 		}
   6835 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6836 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6837 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6838 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6839 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6840 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6841 	} else {
   6842 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6843 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6844 	}
   6845 
   6846 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6847 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6848 		cmdc |= NQTXC_CMD_IP4;
   6849 	}
   6850 
   6851 	if (m0->m_pkthdr.csum_flags &
   6852 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6853 		WM_Q_EVCNT_INCR(txq, txtusum);
   6854 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6855 			cmdc |= NQTXC_CMD_TCP;
   6856 		} else {
   6857 			cmdc |= NQTXC_CMD_UDP;
   6858 		}
   6859 		cmdc |= NQTXC_CMD_IP4;
   6860 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6861 	}
   6862 	if (m0->m_pkthdr.csum_flags &
   6863 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6864 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6865 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6866 			cmdc |= NQTXC_CMD_TCP;
   6867 		} else {
   6868 			cmdc |= NQTXC_CMD_UDP;
   6869 		}
   6870 		cmdc |= NQTXC_CMD_IP6;
   6871 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6872 	}
   6873 
   6874 	/* Fill in the context descriptor. */
   6875 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6876 	    htole32(vl_len);
   6877 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6878 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6879 	    htole32(cmdc);
   6880 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6881 	    htole32(mssidx);
   6882 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6883 	DPRINTF(WM_DEBUG_TX,
   6884 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6885 	    txq->txq_next, 0, vl_len));
   6886 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6887 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6888 	txs->txs_ndesc++;
   6889 	return 0;
   6890 }
   6891 
   6892 /*
   6893  * wm_nq_start:		[ifnet interface function]
   6894  *
   6895  *	Start packet transmission on the interface for NEWQUEUE devices
   6896  */
   6897 static void
   6898 wm_nq_start(struct ifnet *ifp)
   6899 {
   6900 	struct wm_softc *sc = ifp->if_softc;
   6901 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6902 
   6903 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6904 
   6905 	mutex_enter(txq->txq_lock);
   6906 	if (!txq->txq_stopping)
   6907 		wm_nq_start_locked(ifp);
   6908 	mutex_exit(txq->txq_lock);
   6909 }
   6910 
   6911 static void
   6912 wm_nq_start_locked(struct ifnet *ifp)
   6913 {
   6914 	struct wm_softc *sc = ifp->if_softc;
   6915 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6916 
   6917 	wm_nq_send_common_locked(ifp, txq, false);
   6918 }
   6919 
   6920 static int
   6921 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   6922 {
   6923 	int qid;
   6924 	struct wm_softc *sc = ifp->if_softc;
   6925 	struct wm_txqueue *txq;
   6926 
   6927 	qid = wm_select_txqueue(ifp, m);
   6928 	txq = &sc->sc_queue[qid].wmq_txq;
   6929 
   6930 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6931 		m_freem(m);
   6932 		WM_Q_EVCNT_INCR(txq, txdrop);
   6933 		return ENOBUFS;
   6934 	}
   6935 
   6936 	if (mutex_tryenter(txq->txq_lock)) {
   6937 		/* XXXX should be per TX queue */
   6938 		ifp->if_obytes += m->m_pkthdr.len;
   6939 		if (m->m_flags & M_MCAST)
   6940 			ifp->if_omcasts++;
   6941 
   6942 		if (!txq->txq_stopping)
   6943 			wm_nq_transmit_locked(ifp, txq);
   6944 		mutex_exit(txq->txq_lock);
   6945 	}
   6946 
   6947 	return 0;
   6948 }
   6949 
   6950 static void
   6951 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6952 {
   6953 
   6954 	wm_nq_send_common_locked(ifp, txq, true);
   6955 }
   6956 
   6957 static void
   6958 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6959     bool is_transmit)
   6960 {
   6961 	struct wm_softc *sc = ifp->if_softc;
   6962 	struct mbuf *m0;
   6963 	struct m_tag *mtag;
   6964 	struct wm_txsoft *txs;
   6965 	bus_dmamap_t dmamap;
   6966 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6967 	bool do_csum, sent;
   6968 
   6969 	KASSERT(mutex_owned(txq->txq_lock));
   6970 
   6971 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6972 		return;
   6973 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6974 		return;
   6975 
   6976 	sent = false;
   6977 
   6978 	/*
   6979 	 * Loop through the send queue, setting up transmit descriptors
   6980 	 * until we drain the queue, or use up all available transmit
   6981 	 * descriptors.
   6982 	 */
   6983 	for (;;) {
   6984 		m0 = NULL;
   6985 
   6986 		/* Get a work queue entry. */
   6987 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6988 			wm_txeof(sc, txq);
   6989 			if (txq->txq_sfree == 0) {
   6990 				DPRINTF(WM_DEBUG_TX,
   6991 				    ("%s: TX: no free job descriptors\n",
   6992 					device_xname(sc->sc_dev)));
   6993 				WM_Q_EVCNT_INCR(txq, txsstall);
   6994 				break;
   6995 			}
   6996 		}
   6997 
   6998 		/* Grab a packet off the queue. */
   6999 		if (is_transmit)
   7000 			m0 = pcq_get(txq->txq_interq);
   7001 		else
   7002 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7003 		if (m0 == NULL)
   7004 			break;
   7005 
   7006 		DPRINTF(WM_DEBUG_TX,
   7007 		    ("%s: TX: have packet to transmit: %p\n",
   7008 		    device_xname(sc->sc_dev), m0));
   7009 
   7010 		txs = &txq->txq_soft[txq->txq_snext];
   7011 		dmamap = txs->txs_dmamap;
   7012 
   7013 		/*
   7014 		 * Load the DMA map.  If this fails, the packet either
   7015 		 * didn't fit in the allotted number of segments, or we
   7016 		 * were short on resources.  For the too-many-segments
   7017 		 * case, we simply report an error and drop the packet,
   7018 		 * since we can't sanely copy a jumbo packet to a single
   7019 		 * buffer.
   7020 		 */
   7021 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7022 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7023 		if (error) {
   7024 			if (error == EFBIG) {
   7025 				WM_Q_EVCNT_INCR(txq, txdrop);
   7026 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7027 				    "DMA segments, dropping...\n",
   7028 				    device_xname(sc->sc_dev));
   7029 				wm_dump_mbuf_chain(sc, m0);
   7030 				m_freem(m0);
   7031 				continue;
   7032 			}
   7033 			/* Short on resources, just stop for now. */
   7034 			DPRINTF(WM_DEBUG_TX,
   7035 			    ("%s: TX: dmamap load failed: %d\n",
   7036 			    device_xname(sc->sc_dev), error));
   7037 			break;
   7038 		}
   7039 
   7040 		segs_needed = dmamap->dm_nsegs;
   7041 
   7042 		/*
   7043 		 * Ensure we have enough descriptors free to describe
   7044 		 * the packet.  Note, we always reserve one descriptor
   7045 		 * at the end of the ring due to the semantics of the
   7046 		 * TDT register, plus one more in the event we need
   7047 		 * to load offload context.
   7048 		 */
   7049 		if (segs_needed > txq->txq_free - 2) {
   7050 			/*
   7051 			 * Not enough free descriptors to transmit this
   7052 			 * packet.  We haven't committed anything yet,
   7053 			 * so just unload the DMA map, put the packet
   7054 			 * pack on the queue, and punt.  Notify the upper
   7055 			 * layer that there are no more slots left.
   7056 			 */
   7057 			DPRINTF(WM_DEBUG_TX,
   7058 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7059 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7060 			    segs_needed, txq->txq_free - 1));
   7061 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7062 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7063 			WM_Q_EVCNT_INCR(txq, txdstall);
   7064 			break;
   7065 		}
   7066 
   7067 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7068 
   7069 		DPRINTF(WM_DEBUG_TX,
   7070 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7071 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7072 
   7073 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7074 
   7075 		/*
   7076 		 * Store a pointer to the packet so that we can free it
   7077 		 * later.
   7078 		 *
   7079 		 * Initially, we consider the number of descriptors the
   7080 		 * packet uses the number of DMA segments.  This may be
   7081 		 * incremented by 1 if we do checksum offload (a descriptor
   7082 		 * is used to set the checksum context).
   7083 		 */
   7084 		txs->txs_mbuf = m0;
   7085 		txs->txs_firstdesc = txq->txq_next;
   7086 		txs->txs_ndesc = segs_needed;
   7087 
   7088 		/* Set up offload parameters for this packet. */
   7089 		uint32_t cmdlen, fields, dcmdlen;
   7090 		if (m0->m_pkthdr.csum_flags &
   7091 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7092 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7093 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7094 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7095 			    &do_csum) != 0) {
   7096 				/* Error message already displayed. */
   7097 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7098 				continue;
   7099 			}
   7100 		} else {
   7101 			do_csum = false;
   7102 			cmdlen = 0;
   7103 			fields = 0;
   7104 		}
   7105 
   7106 		/* Sync the DMA map. */
   7107 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7108 		    BUS_DMASYNC_PREWRITE);
   7109 
   7110 		/* Initialize the first transmit descriptor. */
   7111 		nexttx = txq->txq_next;
   7112 		if (!do_csum) {
   7113 			/* setup a legacy descriptor */
   7114 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7115 			    dmamap->dm_segs[0].ds_addr);
   7116 			txq->txq_descs[nexttx].wtx_cmdlen =
   7117 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7118 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7119 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7120 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7121 			    NULL) {
   7122 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7123 				    htole32(WTX_CMD_VLE);
   7124 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7125 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7126 			} else {
   7127 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7128 			}
   7129 			dcmdlen = 0;
   7130 		} else {
   7131 			/* setup an advanced data descriptor */
   7132 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7133 			    htole64(dmamap->dm_segs[0].ds_addr);
   7134 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7135 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7136 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7137 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7138 			    htole32(fields);
   7139 			DPRINTF(WM_DEBUG_TX,
   7140 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7141 			    device_xname(sc->sc_dev), nexttx,
   7142 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7143 			DPRINTF(WM_DEBUG_TX,
   7144 			    ("\t 0x%08x%08x\n", fields,
   7145 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7146 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7147 		}
   7148 
   7149 		lasttx = nexttx;
   7150 		nexttx = WM_NEXTTX(txq, nexttx);
   7151 		/*
   7152 		 * fill in the next descriptors. legacy or adcanced format
   7153 		 * is the same here
   7154 		 */
   7155 		for (seg = 1; seg < dmamap->dm_nsegs;
   7156 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7157 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7158 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7159 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7160 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7161 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7162 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7163 			lasttx = nexttx;
   7164 
   7165 			DPRINTF(WM_DEBUG_TX,
   7166 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7167 			     "len %#04zx\n",
   7168 			    device_xname(sc->sc_dev), nexttx,
   7169 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7170 			    dmamap->dm_segs[seg].ds_len));
   7171 		}
   7172 
   7173 		KASSERT(lasttx != -1);
   7174 
   7175 		/*
   7176 		 * Set up the command byte on the last descriptor of
   7177 		 * the packet.  If we're in the interrupt delay window,
   7178 		 * delay the interrupt.
   7179 		 */
   7180 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7181 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7182 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7183 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7184 
   7185 		txs->txs_lastdesc = lasttx;
   7186 
   7187 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7188 		    device_xname(sc->sc_dev),
   7189 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7190 
   7191 		/* Sync the descriptors we're using. */
   7192 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7193 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7194 
   7195 		/* Give the packet to the chip. */
   7196 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7197 		sent = true;
   7198 
   7199 		DPRINTF(WM_DEBUG_TX,
   7200 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7201 
   7202 		DPRINTF(WM_DEBUG_TX,
   7203 		    ("%s: TX: finished transmitting packet, job %d\n",
   7204 		    device_xname(sc->sc_dev), txq->txq_snext));
   7205 
   7206 		/* Advance the tx pointer. */
   7207 		txq->txq_free -= txs->txs_ndesc;
   7208 		txq->txq_next = nexttx;
   7209 
   7210 		txq->txq_sfree--;
   7211 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7212 
   7213 		/* Pass the packet to any BPF listeners. */
   7214 		bpf_mtap(ifp, m0);
   7215 	}
   7216 
   7217 	if (m0 != NULL) {
   7218 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7219 		WM_Q_EVCNT_INCR(txq, txdrop);
   7220 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7221 			__func__));
   7222 		m_freem(m0);
   7223 	}
   7224 
   7225 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7226 		/* No more slots; notify upper layer. */
   7227 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7228 	}
   7229 
   7230 	if (sent) {
   7231 		/* Set a watchdog timer in case the chip flakes out. */
   7232 		ifp->if_timer = 5;
   7233 	}
   7234 }
   7235 
   7236 /* Interrupt */
   7237 
   7238 /*
   7239  * wm_txeof:
   7240  *
   7241  *	Helper; handle transmit interrupts.
   7242  */
   7243 static int
   7244 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7245 {
   7246 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7247 	struct wm_txsoft *txs;
   7248 	bool processed = false;
   7249 	int count = 0;
   7250 	int i;
   7251 	uint8_t status;
   7252 
   7253 	KASSERT(mutex_owned(txq->txq_lock));
   7254 
   7255 	if (txq->txq_stopping)
   7256 		return 0;
   7257 
   7258 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7259 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7260 	else
   7261 		ifp->if_flags &= ~IFF_OACTIVE;
   7262 
   7263 	/*
   7264 	 * Go through the Tx list and free mbufs for those
   7265 	 * frames which have been transmitted.
   7266 	 */
   7267 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7268 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7269 		txs = &txq->txq_soft[i];
   7270 
   7271 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7272 			device_xname(sc->sc_dev), i));
   7273 
   7274 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7275 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7276 
   7277 		status =
   7278 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7279 		if ((status & WTX_ST_DD) == 0) {
   7280 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7281 			    BUS_DMASYNC_PREREAD);
   7282 			break;
   7283 		}
   7284 
   7285 		processed = true;
   7286 		count++;
   7287 		DPRINTF(WM_DEBUG_TX,
   7288 		    ("%s: TX: job %d done: descs %d..%d\n",
   7289 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7290 		    txs->txs_lastdesc));
   7291 
   7292 		/*
   7293 		 * XXX We should probably be using the statistics
   7294 		 * XXX registers, but I don't know if they exist
   7295 		 * XXX on chips before the i82544.
   7296 		 */
   7297 
   7298 #ifdef WM_EVENT_COUNTERS
   7299 		if (status & WTX_ST_TU)
   7300 			WM_Q_EVCNT_INCR(txq, tu);
   7301 #endif /* WM_EVENT_COUNTERS */
   7302 
   7303 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7304 			ifp->if_oerrors++;
   7305 			if (status & WTX_ST_LC)
   7306 				log(LOG_WARNING, "%s: late collision\n",
   7307 				    device_xname(sc->sc_dev));
   7308 			else if (status & WTX_ST_EC) {
   7309 				ifp->if_collisions += 16;
   7310 				log(LOG_WARNING, "%s: excessive collisions\n",
   7311 				    device_xname(sc->sc_dev));
   7312 			}
   7313 		} else
   7314 			ifp->if_opackets++;
   7315 
   7316 		txq->txq_free += txs->txs_ndesc;
   7317 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7318 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7319 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7320 		m_freem(txs->txs_mbuf);
   7321 		txs->txs_mbuf = NULL;
   7322 	}
   7323 
   7324 	/* Update the dirty transmit buffer pointer. */
   7325 	txq->txq_sdirty = i;
   7326 	DPRINTF(WM_DEBUG_TX,
   7327 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7328 
   7329 	if (count != 0)
   7330 		rnd_add_uint32(&sc->rnd_source, count);
   7331 
   7332 	/*
   7333 	 * If there are no more pending transmissions, cancel the watchdog
   7334 	 * timer.
   7335 	 */
   7336 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7337 		ifp->if_timer = 0;
   7338 
   7339 	return processed;
   7340 }
   7341 
   7342 /*
   7343  * wm_rxeof:
   7344  *
   7345  *	Helper; handle receive interrupts.
   7346  */
   7347 static void
   7348 wm_rxeof(struct wm_rxqueue *rxq)
   7349 {
   7350 	struct wm_softc *sc = rxq->rxq_sc;
   7351 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7352 	struct wm_rxsoft *rxs;
   7353 	struct mbuf *m;
   7354 	int i, len;
   7355 	int count = 0;
   7356 	uint8_t status, errors;
   7357 	uint16_t vlantag;
   7358 
   7359 	KASSERT(mutex_owned(rxq->rxq_lock));
   7360 
   7361 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7362 		rxs = &rxq->rxq_soft[i];
   7363 
   7364 		DPRINTF(WM_DEBUG_RX,
   7365 		    ("%s: RX: checking descriptor %d\n",
   7366 		    device_xname(sc->sc_dev), i));
   7367 
   7368 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7369 
   7370 		status = rxq->rxq_descs[i].wrx_status;
   7371 		errors = rxq->rxq_descs[i].wrx_errors;
   7372 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   7373 		vlantag = rxq->rxq_descs[i].wrx_special;
   7374 
   7375 		if ((status & WRX_ST_DD) == 0) {
   7376 			/* We have processed all of the receive descriptors. */
   7377 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   7378 			break;
   7379 		}
   7380 
   7381 		count++;
   7382 		if (__predict_false(rxq->rxq_discard)) {
   7383 			DPRINTF(WM_DEBUG_RX,
   7384 			    ("%s: RX: discarding contents of descriptor %d\n",
   7385 			    device_xname(sc->sc_dev), i));
   7386 			wm_init_rxdesc(rxq, i);
   7387 			if (status & WRX_ST_EOP) {
   7388 				/* Reset our state. */
   7389 				DPRINTF(WM_DEBUG_RX,
   7390 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7391 				    device_xname(sc->sc_dev)));
   7392 				rxq->rxq_discard = 0;
   7393 			}
   7394 			continue;
   7395 		}
   7396 
   7397 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7398 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7399 
   7400 		m = rxs->rxs_mbuf;
   7401 
   7402 		/*
   7403 		 * Add a new receive buffer to the ring, unless of
   7404 		 * course the length is zero. Treat the latter as a
   7405 		 * failed mapping.
   7406 		 */
   7407 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7408 			/*
   7409 			 * Failed, throw away what we've done so
   7410 			 * far, and discard the rest of the packet.
   7411 			 */
   7412 			ifp->if_ierrors++;
   7413 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7414 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7415 			wm_init_rxdesc(rxq, i);
   7416 			if ((status & WRX_ST_EOP) == 0)
   7417 				rxq->rxq_discard = 1;
   7418 			if (rxq->rxq_head != NULL)
   7419 				m_freem(rxq->rxq_head);
   7420 			WM_RXCHAIN_RESET(rxq);
   7421 			DPRINTF(WM_DEBUG_RX,
   7422 			    ("%s: RX: Rx buffer allocation failed, "
   7423 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7424 			    rxq->rxq_discard ? " (discard)" : ""));
   7425 			continue;
   7426 		}
   7427 
   7428 		m->m_len = len;
   7429 		rxq->rxq_len += len;
   7430 		DPRINTF(WM_DEBUG_RX,
   7431 		    ("%s: RX: buffer at %p len %d\n",
   7432 		    device_xname(sc->sc_dev), m->m_data, len));
   7433 
   7434 		/* If this is not the end of the packet, keep looking. */
   7435 		if ((status & WRX_ST_EOP) == 0) {
   7436 			WM_RXCHAIN_LINK(rxq, m);
   7437 			DPRINTF(WM_DEBUG_RX,
   7438 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7439 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7440 			continue;
   7441 		}
   7442 
   7443 		/*
   7444 		 * Okay, we have the entire packet now.  The chip is
   7445 		 * configured to include the FCS except I350 and I21[01]
   7446 		 * (not all chips can be configured to strip it),
   7447 		 * so we need to trim it.
   7448 		 * May need to adjust length of previous mbuf in the
   7449 		 * chain if the current mbuf is too short.
   7450 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7451 		 * is always set in I350, so we don't trim it.
   7452 		 */
   7453 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7454 		    && (sc->sc_type != WM_T_I210)
   7455 		    && (sc->sc_type != WM_T_I211)) {
   7456 			if (m->m_len < ETHER_CRC_LEN) {
   7457 				rxq->rxq_tail->m_len
   7458 				    -= (ETHER_CRC_LEN - m->m_len);
   7459 				m->m_len = 0;
   7460 			} else
   7461 				m->m_len -= ETHER_CRC_LEN;
   7462 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7463 		} else
   7464 			len = rxq->rxq_len;
   7465 
   7466 		WM_RXCHAIN_LINK(rxq, m);
   7467 
   7468 		*rxq->rxq_tailp = NULL;
   7469 		m = rxq->rxq_head;
   7470 
   7471 		WM_RXCHAIN_RESET(rxq);
   7472 
   7473 		DPRINTF(WM_DEBUG_RX,
   7474 		    ("%s: RX: have entire packet, len -> %d\n",
   7475 		    device_xname(sc->sc_dev), len));
   7476 
   7477 		/* If an error occurred, update stats and drop the packet. */
   7478 		if (errors &
   7479 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7480 			if (errors & WRX_ER_SE)
   7481 				log(LOG_WARNING, "%s: symbol error\n",
   7482 				    device_xname(sc->sc_dev));
   7483 			else if (errors & WRX_ER_SEQ)
   7484 				log(LOG_WARNING, "%s: receive sequence error\n",
   7485 				    device_xname(sc->sc_dev));
   7486 			else if (errors & WRX_ER_CE)
   7487 				log(LOG_WARNING, "%s: CRC error\n",
   7488 				    device_xname(sc->sc_dev));
   7489 			m_freem(m);
   7490 			continue;
   7491 		}
   7492 
   7493 		/* No errors.  Receive the packet. */
   7494 		m_set_rcvif(m, ifp);
   7495 		m->m_pkthdr.len = len;
   7496 
   7497 		/*
   7498 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7499 		 * for us.  Associate the tag with the packet.
   7500 		 */
   7501 		/* XXXX should check for i350 and i354 */
   7502 		if ((status & WRX_ST_VP) != 0) {
   7503 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
   7504 		}
   7505 
   7506 		/* Set up checksum info for this packet. */
   7507 		if ((status & WRX_ST_IXSM) == 0) {
   7508 			if (status & WRX_ST_IPCS) {
   7509 				WM_Q_EVCNT_INCR(rxq, rxipsum);
   7510 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7511 				if (errors & WRX_ER_IPE)
   7512 					m->m_pkthdr.csum_flags |=
   7513 					    M_CSUM_IPv4_BAD;
   7514 			}
   7515 			if (status & WRX_ST_TCPCS) {
   7516 				/*
   7517 				 * Note: we don't know if this was TCP or UDP,
   7518 				 * so we just set both bits, and expect the
   7519 				 * upper layers to deal.
   7520 				 */
   7521 				WM_Q_EVCNT_INCR(rxq, rxtusum);
   7522 				m->m_pkthdr.csum_flags |=
   7523 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7524 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7525 				if (errors & WRX_ER_TCPE)
   7526 					m->m_pkthdr.csum_flags |=
   7527 					    M_CSUM_TCP_UDP_BAD;
   7528 			}
   7529 		}
   7530 
   7531 		ifp->if_ipackets++;
   7532 
   7533 		mutex_exit(rxq->rxq_lock);
   7534 
   7535 		/* Pass this up to any BPF listeners. */
   7536 		bpf_mtap(ifp, m);
   7537 
   7538 		/* Pass it on. */
   7539 		if_percpuq_enqueue(sc->sc_ipq, m);
   7540 
   7541 		mutex_enter(rxq->rxq_lock);
   7542 
   7543 		if (rxq->rxq_stopping)
   7544 			break;
   7545 	}
   7546 
   7547 	/* Update the receive pointer. */
   7548 	rxq->rxq_ptr = i;
   7549 	if (count != 0)
   7550 		rnd_add_uint32(&sc->rnd_source, count);
   7551 
   7552 	DPRINTF(WM_DEBUG_RX,
   7553 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7554 }
   7555 
   7556 /*
   7557  * wm_linkintr_gmii:
   7558  *
   7559  *	Helper; handle link interrupts for GMII.
   7560  */
   7561 static void
   7562 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7563 {
   7564 
   7565 	KASSERT(WM_CORE_LOCKED(sc));
   7566 
   7567 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7568 		__func__));
   7569 
   7570 	if (icr & ICR_LSC) {
   7571 		uint32_t reg;
   7572 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7573 
   7574 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7575 			wm_gig_downshift_workaround_ich8lan(sc);
   7576 
   7577 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7578 			device_xname(sc->sc_dev)));
   7579 		mii_pollstat(&sc->sc_mii);
   7580 		if (sc->sc_type == WM_T_82543) {
   7581 			int miistatus, active;
   7582 
   7583 			/*
   7584 			 * With 82543, we need to force speed and
   7585 			 * duplex on the MAC equal to what the PHY
   7586 			 * speed and duplex configuration is.
   7587 			 */
   7588 			miistatus = sc->sc_mii.mii_media_status;
   7589 
   7590 			if (miistatus & IFM_ACTIVE) {
   7591 				active = sc->sc_mii.mii_media_active;
   7592 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7593 				switch (IFM_SUBTYPE(active)) {
   7594 				case IFM_10_T:
   7595 					sc->sc_ctrl |= CTRL_SPEED_10;
   7596 					break;
   7597 				case IFM_100_TX:
   7598 					sc->sc_ctrl |= CTRL_SPEED_100;
   7599 					break;
   7600 				case IFM_1000_T:
   7601 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7602 					break;
   7603 				default:
   7604 					/*
   7605 					 * fiber?
   7606 					 * Shoud not enter here.
   7607 					 */
   7608 					printf("unknown media (%x)\n", active);
   7609 					break;
   7610 				}
   7611 				if (active & IFM_FDX)
   7612 					sc->sc_ctrl |= CTRL_FD;
   7613 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7614 			}
   7615 		} else if ((sc->sc_type == WM_T_ICH8)
   7616 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7617 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7618 		} else if (sc->sc_type == WM_T_PCH) {
   7619 			wm_k1_gig_workaround_hv(sc,
   7620 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7621 		}
   7622 
   7623 		if ((sc->sc_phytype == WMPHY_82578)
   7624 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7625 			== IFM_1000_T)) {
   7626 
   7627 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7628 				delay(200*1000); /* XXX too big */
   7629 
   7630 				/* Link stall fix for link up */
   7631 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7632 				    HV_MUX_DATA_CTRL,
   7633 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7634 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7635 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7636 				    HV_MUX_DATA_CTRL,
   7637 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7638 			}
   7639 		}
   7640 		/*
   7641 		 * I217 Packet Loss issue:
   7642 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   7643 		 * on power up.
   7644 		 * Set the Beacon Duration for I217 to 8 usec
   7645 		 */
   7646 		if ((sc->sc_type == WM_T_PCH_LPT)
   7647 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   7648 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   7649 			reg &= ~FEXTNVM4_BEACON_DURATION;
   7650 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   7651 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   7652 		}
   7653 
   7654 		/* XXX Work-around I218 hang issue */
   7655 		/* e1000_k1_workaround_lpt_lp() */
   7656 
   7657 		if ((sc->sc_type == WM_T_PCH_LPT)
   7658 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   7659 			/*
   7660 			 * Set platform power management values for Latency
   7661 			 * Tolerance Reporting (LTR)
   7662 			 */
   7663 			wm_platform_pm_pch_lpt(sc,
   7664 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   7665 				    != 0));
   7666 		}
   7667 
   7668 		/* FEXTNVM6 K1-off workaround */
   7669 		if (sc->sc_type == WM_T_PCH_SPT) {
   7670 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   7671 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   7672 			    & FEXTNVM6_K1_OFF_ENABLE)
   7673 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   7674 			else
   7675 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   7676 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   7677 		}
   7678 	} else if (icr & ICR_RXSEQ) {
   7679 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7680 			device_xname(sc->sc_dev)));
   7681 	}
   7682 }
   7683 
   7684 /*
   7685  * wm_linkintr_tbi:
   7686  *
   7687  *	Helper; handle link interrupts for TBI mode.
   7688  */
   7689 static void
   7690 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7691 {
   7692 	uint32_t status;
   7693 
   7694 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7695 		__func__));
   7696 
   7697 	status = CSR_READ(sc, WMREG_STATUS);
   7698 	if (icr & ICR_LSC) {
   7699 		if (status & STATUS_LU) {
   7700 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7701 			    device_xname(sc->sc_dev),
   7702 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7703 			/*
   7704 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7705 			 * so we should update sc->sc_ctrl
   7706 			 */
   7707 
   7708 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7709 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7710 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7711 			if (status & STATUS_FD)
   7712 				sc->sc_tctl |=
   7713 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7714 			else
   7715 				sc->sc_tctl |=
   7716 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7717 			if (sc->sc_ctrl & CTRL_TFCE)
   7718 				sc->sc_fcrtl |= FCRTL_XONE;
   7719 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7720 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7721 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7722 				      sc->sc_fcrtl);
   7723 			sc->sc_tbi_linkup = 1;
   7724 		} else {
   7725 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7726 			    device_xname(sc->sc_dev)));
   7727 			sc->sc_tbi_linkup = 0;
   7728 		}
   7729 		/* Update LED */
   7730 		wm_tbi_serdes_set_linkled(sc);
   7731 	} else if (icr & ICR_RXSEQ) {
   7732 		DPRINTF(WM_DEBUG_LINK,
   7733 		    ("%s: LINK: Receive sequence error\n",
   7734 		    device_xname(sc->sc_dev)));
   7735 	}
   7736 }
   7737 
   7738 /*
   7739  * wm_linkintr_serdes:
   7740  *
   7741  *	Helper; handle link interrupts for TBI mode.
   7742  */
   7743 static void
   7744 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7745 {
   7746 	struct mii_data *mii = &sc->sc_mii;
   7747 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7748 	uint32_t pcs_adv, pcs_lpab, reg;
   7749 
   7750 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7751 		__func__));
   7752 
   7753 	if (icr & ICR_LSC) {
   7754 		/* Check PCS */
   7755 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7756 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7757 			mii->mii_media_status |= IFM_ACTIVE;
   7758 			sc->sc_tbi_linkup = 1;
   7759 		} else {
   7760 			mii->mii_media_status |= IFM_NONE;
   7761 			sc->sc_tbi_linkup = 0;
   7762 			wm_tbi_serdes_set_linkled(sc);
   7763 			return;
   7764 		}
   7765 		mii->mii_media_active |= IFM_1000_SX;
   7766 		if ((reg & PCS_LSTS_FDX) != 0)
   7767 			mii->mii_media_active |= IFM_FDX;
   7768 		else
   7769 			mii->mii_media_active |= IFM_HDX;
   7770 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7771 			/* Check flow */
   7772 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7773 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7774 				DPRINTF(WM_DEBUG_LINK,
   7775 				    ("XXX LINKOK but not ACOMP\n"));
   7776 				return;
   7777 			}
   7778 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7779 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7780 			DPRINTF(WM_DEBUG_LINK,
   7781 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7782 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7783 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7784 				mii->mii_media_active |= IFM_FLOW
   7785 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7786 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7787 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7788 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7789 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7790 				mii->mii_media_active |= IFM_FLOW
   7791 				    | IFM_ETH_TXPAUSE;
   7792 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7793 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7794 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7795 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7796 				mii->mii_media_active |= IFM_FLOW
   7797 				    | IFM_ETH_RXPAUSE;
   7798 		}
   7799 		/* Update LED */
   7800 		wm_tbi_serdes_set_linkled(sc);
   7801 	} else {
   7802 		DPRINTF(WM_DEBUG_LINK,
   7803 		    ("%s: LINK: Receive sequence error\n",
   7804 		    device_xname(sc->sc_dev)));
   7805 	}
   7806 }
   7807 
   7808 /*
   7809  * wm_linkintr:
   7810  *
   7811  *	Helper; handle link interrupts.
   7812  */
   7813 static void
   7814 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7815 {
   7816 
   7817 	KASSERT(WM_CORE_LOCKED(sc));
   7818 
   7819 	if (sc->sc_flags & WM_F_HAS_MII)
   7820 		wm_linkintr_gmii(sc, icr);
   7821 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7822 	    && (sc->sc_type >= WM_T_82575))
   7823 		wm_linkintr_serdes(sc, icr);
   7824 	else
   7825 		wm_linkintr_tbi(sc, icr);
   7826 }
   7827 
   7828 /*
   7829  * wm_intr_legacy:
   7830  *
   7831  *	Interrupt service routine for INTx and MSI.
   7832  */
   7833 static int
   7834 wm_intr_legacy(void *arg)
   7835 {
   7836 	struct wm_softc *sc = arg;
   7837 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7838 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
   7839 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7840 	uint32_t icr, rndval = 0;
   7841 	int handled = 0;
   7842 
   7843 	DPRINTF(WM_DEBUG_TX,
   7844 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7845 	while (1 /* CONSTCOND */) {
   7846 		icr = CSR_READ(sc, WMREG_ICR);
   7847 		if ((icr & sc->sc_icr) == 0)
   7848 			break;
   7849 		if (rndval == 0)
   7850 			rndval = icr;
   7851 
   7852 		mutex_enter(rxq->rxq_lock);
   7853 
   7854 		if (rxq->rxq_stopping) {
   7855 			mutex_exit(rxq->rxq_lock);
   7856 			break;
   7857 		}
   7858 
   7859 		handled = 1;
   7860 
   7861 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7862 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   7863 			DPRINTF(WM_DEBUG_RX,
   7864 			    ("%s: RX: got Rx intr 0x%08x\n",
   7865 			    device_xname(sc->sc_dev),
   7866 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   7867 			WM_Q_EVCNT_INCR(rxq, rxintr);
   7868 		}
   7869 #endif
   7870 		wm_rxeof(rxq);
   7871 
   7872 		mutex_exit(rxq->rxq_lock);
   7873 		mutex_enter(txq->txq_lock);
   7874 
   7875 		if (txq->txq_stopping) {
   7876 			mutex_exit(txq->txq_lock);
   7877 			break;
   7878 		}
   7879 
   7880 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7881 		if (icr & ICR_TXDW) {
   7882 			DPRINTF(WM_DEBUG_TX,
   7883 			    ("%s: TX: got TXDW interrupt\n",
   7884 			    device_xname(sc->sc_dev)));
   7885 			WM_Q_EVCNT_INCR(txq, txdw);
   7886 		}
   7887 #endif
   7888 		wm_txeof(sc, txq);
   7889 
   7890 		mutex_exit(txq->txq_lock);
   7891 		WM_CORE_LOCK(sc);
   7892 
   7893 		if (sc->sc_core_stopping) {
   7894 			WM_CORE_UNLOCK(sc);
   7895 			break;
   7896 		}
   7897 
   7898 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   7899 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7900 			wm_linkintr(sc, icr);
   7901 		}
   7902 
   7903 		WM_CORE_UNLOCK(sc);
   7904 
   7905 		if (icr & ICR_RXO) {
   7906 #if defined(WM_DEBUG)
   7907 			log(LOG_WARNING, "%s: Receive overrun\n",
   7908 			    device_xname(sc->sc_dev));
   7909 #endif /* defined(WM_DEBUG) */
   7910 		}
   7911 	}
   7912 
   7913 	rnd_add_uint32(&sc->rnd_source, rndval);
   7914 
   7915 	if (handled) {
   7916 		/* Try to get more packets going. */
   7917 		ifp->if_start(ifp);
   7918 	}
   7919 
   7920 	return handled;
   7921 }
   7922 
   7923 static int
   7924 wm_txrxintr_msix(void *arg)
   7925 {
   7926 	struct wm_queue *wmq = arg;
   7927 	struct wm_txqueue *txq = &wmq->wmq_txq;
   7928 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7929 	struct wm_softc *sc = txq->txq_sc;
   7930 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7931 
   7932 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   7933 
   7934 	DPRINTF(WM_DEBUG_TX,
   7935 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7936 
   7937 	if (sc->sc_type == WM_T_82574)
   7938 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7939 	else if (sc->sc_type == WM_T_82575)
   7940 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7941 	else
   7942 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   7943 
   7944 	mutex_enter(txq->txq_lock);
   7945 
   7946 	if (txq->txq_stopping) {
   7947 		mutex_exit(txq->txq_lock);
   7948 		return 0;
   7949 	}
   7950 
   7951 	WM_Q_EVCNT_INCR(txq, txdw);
   7952 	wm_txeof(sc, txq);
   7953 
   7954 	/* Try to get more packets going. */
   7955 	if (pcq_peek(txq->txq_interq) != NULL)
   7956 		wm_nq_transmit_locked(ifp, txq);
   7957 	/*
   7958 	 * There are still some upper layer processing which call
   7959 	 * ifp->if_start(). e.g. ALTQ
   7960 	 */
   7961 	if (wmq->wmq_id == 0) {
   7962 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
   7963 			wm_nq_start_locked(ifp);
   7964 	}
   7965 
   7966 	mutex_exit(txq->txq_lock);
   7967 
   7968 	DPRINTF(WM_DEBUG_RX,
   7969 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7970 	mutex_enter(rxq->rxq_lock);
   7971 
   7972 	if (rxq->rxq_stopping) {
   7973 		mutex_exit(rxq->rxq_lock);
   7974 		return 0;
   7975 	}
   7976 
   7977 	WM_Q_EVCNT_INCR(rxq, rxintr);
   7978 	wm_rxeof(rxq);
   7979 	mutex_exit(rxq->rxq_lock);
   7980 
   7981 	if (sc->sc_type == WM_T_82574)
   7982 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7983 	else if (sc->sc_type == WM_T_82575)
   7984 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7985 	else
   7986 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   7987 
   7988 	return 1;
   7989 }
   7990 
   7991 /*
   7992  * wm_linkintr_msix:
   7993  *
   7994  *	Interrupt service routine for link status change for MSI-X.
   7995  */
   7996 static int
   7997 wm_linkintr_msix(void *arg)
   7998 {
   7999 	struct wm_softc *sc = arg;
   8000 	uint32_t reg;
   8001 
   8002 	DPRINTF(WM_DEBUG_LINK,
   8003 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8004 
   8005 	reg = CSR_READ(sc, WMREG_ICR);
   8006 	WM_CORE_LOCK(sc);
   8007 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8008 		goto out;
   8009 
   8010 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8011 	wm_linkintr(sc, ICR_LSC);
   8012 
   8013 out:
   8014 	WM_CORE_UNLOCK(sc);
   8015 
   8016 	if (sc->sc_type == WM_T_82574)
   8017 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8018 	else if (sc->sc_type == WM_T_82575)
   8019 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8020 	else
   8021 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8022 
   8023 	return 1;
   8024 }
   8025 
   8026 /*
   8027  * Media related.
   8028  * GMII, SGMII, TBI (and SERDES)
   8029  */
   8030 
   8031 /* Common */
   8032 
   8033 /*
   8034  * wm_tbi_serdes_set_linkled:
   8035  *
   8036  *	Update the link LED on TBI and SERDES devices.
   8037  */
   8038 static void
   8039 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8040 {
   8041 
   8042 	if (sc->sc_tbi_linkup)
   8043 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8044 	else
   8045 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8046 
   8047 	/* 82540 or newer devices are active low */
   8048 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8049 
   8050 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8051 }
   8052 
   8053 /* GMII related */
   8054 
   8055 /*
   8056  * wm_gmii_reset:
   8057  *
   8058  *	Reset the PHY.
   8059  */
   8060 static void
   8061 wm_gmii_reset(struct wm_softc *sc)
   8062 {
   8063 	uint32_t reg;
   8064 	int rv;
   8065 
   8066 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8067 		device_xname(sc->sc_dev), __func__));
   8068 
   8069 	rv = sc->phy.acquire(sc);
   8070 	if (rv != 0) {
   8071 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8072 		    __func__);
   8073 		return;
   8074 	}
   8075 
   8076 	switch (sc->sc_type) {
   8077 	case WM_T_82542_2_0:
   8078 	case WM_T_82542_2_1:
   8079 		/* null */
   8080 		break;
   8081 	case WM_T_82543:
   8082 		/*
   8083 		 * With 82543, we need to force speed and duplex on the MAC
   8084 		 * equal to what the PHY speed and duplex configuration is.
   8085 		 * In addition, we need to perform a hardware reset on the PHY
   8086 		 * to take it out of reset.
   8087 		 */
   8088 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8089 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8090 
   8091 		/* The PHY reset pin is active-low. */
   8092 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8093 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8094 		    CTRL_EXT_SWDPIN(4));
   8095 		reg |= CTRL_EXT_SWDPIO(4);
   8096 
   8097 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8098 		CSR_WRITE_FLUSH(sc);
   8099 		delay(10*1000);
   8100 
   8101 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8102 		CSR_WRITE_FLUSH(sc);
   8103 		delay(150);
   8104 #if 0
   8105 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8106 #endif
   8107 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8108 		break;
   8109 	case WM_T_82544:	/* reset 10000us */
   8110 	case WM_T_82540:
   8111 	case WM_T_82545:
   8112 	case WM_T_82545_3:
   8113 	case WM_T_82546:
   8114 	case WM_T_82546_3:
   8115 	case WM_T_82541:
   8116 	case WM_T_82541_2:
   8117 	case WM_T_82547:
   8118 	case WM_T_82547_2:
   8119 	case WM_T_82571:	/* reset 100us */
   8120 	case WM_T_82572:
   8121 	case WM_T_82573:
   8122 	case WM_T_82574:
   8123 	case WM_T_82575:
   8124 	case WM_T_82576:
   8125 	case WM_T_82580:
   8126 	case WM_T_I350:
   8127 	case WM_T_I354:
   8128 	case WM_T_I210:
   8129 	case WM_T_I211:
   8130 	case WM_T_82583:
   8131 	case WM_T_80003:
   8132 		/* generic reset */
   8133 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8134 		CSR_WRITE_FLUSH(sc);
   8135 		delay(20000);
   8136 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8137 		CSR_WRITE_FLUSH(sc);
   8138 		delay(20000);
   8139 
   8140 		if ((sc->sc_type == WM_T_82541)
   8141 		    || (sc->sc_type == WM_T_82541_2)
   8142 		    || (sc->sc_type == WM_T_82547)
   8143 		    || (sc->sc_type == WM_T_82547_2)) {
   8144 			/* workaround for igp are done in igp_reset() */
   8145 			/* XXX add code to set LED after phy reset */
   8146 		}
   8147 		break;
   8148 	case WM_T_ICH8:
   8149 	case WM_T_ICH9:
   8150 	case WM_T_ICH10:
   8151 	case WM_T_PCH:
   8152 	case WM_T_PCH2:
   8153 	case WM_T_PCH_LPT:
   8154 	case WM_T_PCH_SPT:
   8155 		/* generic reset */
   8156 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8157 		CSR_WRITE_FLUSH(sc);
   8158 		delay(100);
   8159 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8160 		CSR_WRITE_FLUSH(sc);
   8161 		delay(150);
   8162 		break;
   8163 	default:
   8164 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8165 		    __func__);
   8166 		break;
   8167 	}
   8168 
   8169 	sc->phy.release(sc);
   8170 
   8171 	/* get_cfg_done */
   8172 	wm_get_cfg_done(sc);
   8173 
   8174 	/* extra setup */
   8175 	switch (sc->sc_type) {
   8176 	case WM_T_82542_2_0:
   8177 	case WM_T_82542_2_1:
   8178 	case WM_T_82543:
   8179 	case WM_T_82544:
   8180 	case WM_T_82540:
   8181 	case WM_T_82545:
   8182 	case WM_T_82545_3:
   8183 	case WM_T_82546:
   8184 	case WM_T_82546_3:
   8185 	case WM_T_82541_2:
   8186 	case WM_T_82547_2:
   8187 	case WM_T_82571:
   8188 	case WM_T_82572:
   8189 	case WM_T_82573:
   8190 	case WM_T_82575:
   8191 	case WM_T_82576:
   8192 	case WM_T_82580:
   8193 	case WM_T_I350:
   8194 	case WM_T_I354:
   8195 	case WM_T_I210:
   8196 	case WM_T_I211:
   8197 	case WM_T_80003:
   8198 		/* null */
   8199 		break;
   8200 	case WM_T_82574:
   8201 	case WM_T_82583:
   8202 		wm_lplu_d0_disable(sc);
   8203 		break;
   8204 	case WM_T_82541:
   8205 	case WM_T_82547:
   8206 		/* XXX Configure actively LED after PHY reset */
   8207 		break;
   8208 	case WM_T_ICH8:
   8209 	case WM_T_ICH9:
   8210 	case WM_T_ICH10:
   8211 	case WM_T_PCH:
   8212 	case WM_T_PCH2:
   8213 	case WM_T_PCH_LPT:
   8214 	case WM_T_PCH_SPT:
   8215 		/* Allow time for h/w to get to a quiescent state afer reset */
   8216 		delay(10*1000);
   8217 
   8218 		if (sc->sc_type == WM_T_PCH)
   8219 			wm_hv_phy_workaround_ich8lan(sc);
   8220 
   8221 		if (sc->sc_type == WM_T_PCH2)
   8222 			wm_lv_phy_workaround_ich8lan(sc);
   8223 
   8224 		/* Clear the host wakeup bit after lcd reset */
   8225 		if (sc->sc_type >= WM_T_PCH) {
   8226 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8227 			    BM_PORT_GEN_CFG);
   8228 			reg &= ~BM_WUC_HOST_WU_BIT;
   8229 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8230 			    BM_PORT_GEN_CFG, reg);
   8231 		}
   8232 
   8233 		/*
   8234 		 * XXX Configure the LCD with th extended configuration region
   8235 		 * in NVM
   8236 		 */
   8237 
   8238 		/* Disable D0 LPLU. */
   8239 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8240 			wm_lplu_d0_disable_pch(sc);
   8241 		else
   8242 			wm_lplu_d0_disable(sc);	/* ICH* */
   8243 		break;
   8244 	default:
   8245 		panic("%s: unknown type\n", __func__);
   8246 		break;
   8247 	}
   8248 }
   8249 
   8250 /*
   8251  * wm_get_phy_id_82575:
   8252  *
   8253  * Return PHY ID. Return -1 if it failed.
   8254  */
   8255 static int
   8256 wm_get_phy_id_82575(struct wm_softc *sc)
   8257 {
   8258 	uint32_t reg;
   8259 	int phyid = -1;
   8260 
   8261 	/* XXX */
   8262 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8263 		return -1;
   8264 
   8265 	if (wm_sgmii_uses_mdio(sc)) {
   8266 		switch (sc->sc_type) {
   8267 		case WM_T_82575:
   8268 		case WM_T_82576:
   8269 			reg = CSR_READ(sc, WMREG_MDIC);
   8270 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8271 			break;
   8272 		case WM_T_82580:
   8273 		case WM_T_I350:
   8274 		case WM_T_I354:
   8275 		case WM_T_I210:
   8276 		case WM_T_I211:
   8277 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8278 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8279 			break;
   8280 		default:
   8281 			return -1;
   8282 		}
   8283 	}
   8284 
   8285 	return phyid;
   8286 }
   8287 
   8288 
   8289 /*
   8290  * wm_gmii_mediainit:
   8291  *
   8292  *	Initialize media for use on 1000BASE-T devices.
   8293  */
   8294 static void
   8295 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8296 {
   8297 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8298 	struct mii_data *mii = &sc->sc_mii;
   8299 	uint32_t reg;
   8300 
   8301 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8302 		device_xname(sc->sc_dev), __func__));
   8303 
   8304 	/* We have GMII. */
   8305 	sc->sc_flags |= WM_F_HAS_MII;
   8306 
   8307 	if (sc->sc_type == WM_T_80003)
   8308 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8309 	else
   8310 		sc->sc_tipg = TIPG_1000T_DFLT;
   8311 
   8312 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8313 	if ((sc->sc_type == WM_T_82580)
   8314 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8315 	    || (sc->sc_type == WM_T_I211)) {
   8316 		reg = CSR_READ(sc, WMREG_PHPM);
   8317 		reg &= ~PHPM_GO_LINK_D;
   8318 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8319 	}
   8320 
   8321 	/*
   8322 	 * Let the chip set speed/duplex on its own based on
   8323 	 * signals from the PHY.
   8324 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8325 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8326 	 */
   8327 	sc->sc_ctrl |= CTRL_SLU;
   8328 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8329 
   8330 	/* Initialize our media structures and probe the GMII. */
   8331 	mii->mii_ifp = ifp;
   8332 
   8333 	/*
   8334 	 * Determine the PHY access method.
   8335 	 *
   8336 	 *  For SGMII, use SGMII specific method.
   8337 	 *
   8338 	 *  For some devices, we can determine the PHY access method
   8339 	 * from sc_type.
   8340 	 *
   8341 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   8342 	 * access  method by sc_type, so use the PCI product ID for some
   8343 	 * devices.
   8344 	 * For other ICH8 variants, try to use igp's method. If the PHY
   8345 	 * can't detect, then use bm's method.
   8346 	 */
   8347 	switch (prodid) {
   8348 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   8349 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   8350 		/* 82577 */
   8351 		sc->sc_phytype = WMPHY_82577;
   8352 		break;
   8353 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   8354 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   8355 		/* 82578 */
   8356 		sc->sc_phytype = WMPHY_82578;
   8357 		break;
   8358 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8359 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8360 		/* 82579 */
   8361 		sc->sc_phytype = WMPHY_82579;
   8362 		break;
   8363 	case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8364 	case PCI_PRODUCT_INTEL_82801I_BM:
   8365 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8366 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8367 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8368 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8369 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8370 		/* ICH8, 9, 10 with 82567 */
   8371 		sc->sc_phytype = WMPHY_BM;
   8372 		mii->mii_readreg = wm_gmii_bm_readreg;
   8373 		mii->mii_writereg = wm_gmii_bm_writereg;
   8374 		break;
   8375 	default:
   8376 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   8377 		    && !wm_sgmii_uses_mdio(sc)){
   8378 			/* SGMII */
   8379 			mii->mii_readreg = wm_sgmii_readreg;
   8380 			mii->mii_writereg = wm_sgmii_writereg;
   8381 		} else if ((sc->sc_type == WM_T_82574)
   8382 		    || (sc->sc_type == WM_T_82583)) {
   8383 			/* BM2 (phyaddr == 1) */
   8384 			sc->sc_phytype = WMPHY_BM;
   8385 			mii->mii_readreg = wm_gmii_bm_readreg;
   8386 			mii->mii_writereg = wm_gmii_bm_writereg;
   8387 		} else if (sc->sc_type >= WM_T_ICH8) {
   8388 			/* non-82567 ICH8, 9 and 10 */
   8389 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8390 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8391 		} else if (sc->sc_type >= WM_T_80003) {
   8392 			/* 80003 */
   8393 			sc->sc_phytype = WMPHY_GG82563;
   8394 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8395 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8396 		} else if (sc->sc_type >= WM_T_I210) {
   8397 			/* I210 and I211 */
   8398 			sc->sc_phytype = WMPHY_210;
   8399 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8400 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8401 		} else if (sc->sc_type >= WM_T_82580) {
   8402 			/* 82580, I350 and I354 */
   8403 			sc->sc_phytype = WMPHY_82580;
   8404 			mii->mii_readreg = wm_gmii_82580_readreg;
   8405 			mii->mii_writereg = wm_gmii_82580_writereg;
   8406 		} else if (sc->sc_type >= WM_T_82544) {
   8407 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8408 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8409 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8410 		} else {
   8411 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8412 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8413 		}
   8414 		break;
   8415 	}
   8416 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8417 		/* All PCH* use _hv_ */
   8418 		mii->mii_readreg = wm_gmii_hv_readreg;
   8419 		mii->mii_writereg = wm_gmii_hv_writereg;
   8420 	}
   8421 	mii->mii_statchg = wm_gmii_statchg;
   8422 
   8423 	/* get PHY control from SMBus to PCIe */
   8424 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   8425 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   8426 		wm_smbustopci(sc);
   8427 
   8428 	wm_gmii_reset(sc);
   8429 
   8430 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8431 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8432 	    wm_gmii_mediastatus);
   8433 
   8434 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8435 	    || (sc->sc_type == WM_T_82580)
   8436 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8437 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8438 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8439 			/* Attach only one port */
   8440 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8441 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8442 		} else {
   8443 			int i, id;
   8444 			uint32_t ctrl_ext;
   8445 
   8446 			id = wm_get_phy_id_82575(sc);
   8447 			if (id != -1) {
   8448 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8449 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8450 			}
   8451 			if ((id == -1)
   8452 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8453 				/* Power on sgmii phy if it is disabled */
   8454 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8455 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8456 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8457 				CSR_WRITE_FLUSH(sc);
   8458 				delay(300*1000); /* XXX too long */
   8459 
   8460 				/* from 1 to 8 */
   8461 				for (i = 1; i < 8; i++)
   8462 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8463 					    0xffffffff, i, MII_OFFSET_ANY,
   8464 					    MIIF_DOPAUSE);
   8465 
   8466 				/* restore previous sfp cage power state */
   8467 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8468 			}
   8469 		}
   8470 	} else {
   8471 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8472 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8473 	}
   8474 
   8475 	/*
   8476 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8477 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8478 	 */
   8479 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8480 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8481 		wm_set_mdio_slow_mode_hv(sc);
   8482 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8483 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8484 	}
   8485 
   8486 	/*
   8487 	 * (For ICH8 variants)
   8488 	 * If PHY detection failed, use BM's r/w function and retry.
   8489 	 */
   8490 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8491 		/* if failed, retry with *_bm_* */
   8492 		mii->mii_readreg = wm_gmii_bm_readreg;
   8493 		mii->mii_writereg = wm_gmii_bm_writereg;
   8494 
   8495 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8496 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8497 	}
   8498 
   8499 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8500 		/* Any PHY wasn't find */
   8501 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8502 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8503 		sc->sc_phytype = WMPHY_NONE;
   8504 	} else {
   8505 		/*
   8506 		 * PHY Found!
   8507 		 * Check PHY type.
   8508 		 */
   8509 		uint32_t model;
   8510 		struct mii_softc *child;
   8511 
   8512 		child = LIST_FIRST(&mii->mii_phys);
   8513 		model = child->mii_mpd_model;
   8514 		if (model == MII_MODEL_yyINTEL_I82566)
   8515 			sc->sc_phytype = WMPHY_IGP_3;
   8516 
   8517 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8518 	}
   8519 }
   8520 
   8521 /*
   8522  * wm_gmii_mediachange:	[ifmedia interface function]
   8523  *
   8524  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8525  */
   8526 static int
   8527 wm_gmii_mediachange(struct ifnet *ifp)
   8528 {
   8529 	struct wm_softc *sc = ifp->if_softc;
   8530 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8531 	int rc;
   8532 
   8533 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8534 		device_xname(sc->sc_dev), __func__));
   8535 	if ((ifp->if_flags & IFF_UP) == 0)
   8536 		return 0;
   8537 
   8538 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8539 	sc->sc_ctrl |= CTRL_SLU;
   8540 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8541 	    || (sc->sc_type > WM_T_82543)) {
   8542 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8543 	} else {
   8544 		sc->sc_ctrl &= ~CTRL_ASDE;
   8545 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8546 		if (ife->ifm_media & IFM_FDX)
   8547 			sc->sc_ctrl |= CTRL_FD;
   8548 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8549 		case IFM_10_T:
   8550 			sc->sc_ctrl |= CTRL_SPEED_10;
   8551 			break;
   8552 		case IFM_100_TX:
   8553 			sc->sc_ctrl |= CTRL_SPEED_100;
   8554 			break;
   8555 		case IFM_1000_T:
   8556 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8557 			break;
   8558 		default:
   8559 			panic("wm_gmii_mediachange: bad media 0x%x",
   8560 			    ife->ifm_media);
   8561 		}
   8562 	}
   8563 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8564 	if (sc->sc_type <= WM_T_82543)
   8565 		wm_gmii_reset(sc);
   8566 
   8567 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8568 		return 0;
   8569 	return rc;
   8570 }
   8571 
   8572 /*
   8573  * wm_gmii_mediastatus:	[ifmedia interface function]
   8574  *
   8575  *	Get the current interface media status on a 1000BASE-T device.
   8576  */
   8577 static void
   8578 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8579 {
   8580 	struct wm_softc *sc = ifp->if_softc;
   8581 
   8582 	ether_mediastatus(ifp, ifmr);
   8583 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8584 	    | sc->sc_flowflags;
   8585 }
   8586 
   8587 #define	MDI_IO		CTRL_SWDPIN(2)
   8588 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8589 #define	MDI_CLK		CTRL_SWDPIN(3)
   8590 
   8591 static void
   8592 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8593 {
   8594 	uint32_t i, v;
   8595 
   8596 	v = CSR_READ(sc, WMREG_CTRL);
   8597 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8598 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8599 
   8600 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8601 		if (data & i)
   8602 			v |= MDI_IO;
   8603 		else
   8604 			v &= ~MDI_IO;
   8605 		CSR_WRITE(sc, WMREG_CTRL, v);
   8606 		CSR_WRITE_FLUSH(sc);
   8607 		delay(10);
   8608 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8609 		CSR_WRITE_FLUSH(sc);
   8610 		delay(10);
   8611 		CSR_WRITE(sc, WMREG_CTRL, v);
   8612 		CSR_WRITE_FLUSH(sc);
   8613 		delay(10);
   8614 	}
   8615 }
   8616 
   8617 static uint32_t
   8618 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8619 {
   8620 	uint32_t v, i, data = 0;
   8621 
   8622 	v = CSR_READ(sc, WMREG_CTRL);
   8623 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8624 	v |= CTRL_SWDPIO(3);
   8625 
   8626 	CSR_WRITE(sc, WMREG_CTRL, v);
   8627 	CSR_WRITE_FLUSH(sc);
   8628 	delay(10);
   8629 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8630 	CSR_WRITE_FLUSH(sc);
   8631 	delay(10);
   8632 	CSR_WRITE(sc, WMREG_CTRL, v);
   8633 	CSR_WRITE_FLUSH(sc);
   8634 	delay(10);
   8635 
   8636 	for (i = 0; i < 16; i++) {
   8637 		data <<= 1;
   8638 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8639 		CSR_WRITE_FLUSH(sc);
   8640 		delay(10);
   8641 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8642 			data |= 1;
   8643 		CSR_WRITE(sc, WMREG_CTRL, v);
   8644 		CSR_WRITE_FLUSH(sc);
   8645 		delay(10);
   8646 	}
   8647 
   8648 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8649 	CSR_WRITE_FLUSH(sc);
   8650 	delay(10);
   8651 	CSR_WRITE(sc, WMREG_CTRL, v);
   8652 	CSR_WRITE_FLUSH(sc);
   8653 	delay(10);
   8654 
   8655 	return data;
   8656 }
   8657 
   8658 #undef MDI_IO
   8659 #undef MDI_DIR
   8660 #undef MDI_CLK
   8661 
   8662 /*
   8663  * wm_gmii_i82543_readreg:	[mii interface function]
   8664  *
   8665  *	Read a PHY register on the GMII (i82543 version).
   8666  */
   8667 static int
   8668 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8669 {
   8670 	struct wm_softc *sc = device_private(self);
   8671 	int rv;
   8672 
   8673 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8674 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8675 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8676 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8677 
   8678 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8679 	    device_xname(sc->sc_dev), phy, reg, rv));
   8680 
   8681 	return rv;
   8682 }
   8683 
   8684 /*
   8685  * wm_gmii_i82543_writereg:	[mii interface function]
   8686  *
   8687  *	Write a PHY register on the GMII (i82543 version).
   8688  */
   8689 static void
   8690 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8691 {
   8692 	struct wm_softc *sc = device_private(self);
   8693 
   8694 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8695 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8696 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8697 	    (MII_COMMAND_START << 30), 32);
   8698 }
   8699 
   8700 /*
   8701  * wm_gmii_mdic_readreg:	[mii interface function]
   8702  *
   8703  *	Read a PHY register on the GMII.
   8704  */
   8705 static int
   8706 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   8707 {
   8708 	struct wm_softc *sc = device_private(self);
   8709 	uint32_t mdic = 0;
   8710 	int i, rv;
   8711 
   8712 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8713 	    MDIC_REGADD(reg));
   8714 
   8715 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8716 		mdic = CSR_READ(sc, WMREG_MDIC);
   8717 		if (mdic & MDIC_READY)
   8718 			break;
   8719 		delay(50);
   8720 	}
   8721 
   8722 	if ((mdic & MDIC_READY) == 0) {
   8723 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8724 		    device_xname(sc->sc_dev), phy, reg);
   8725 		rv = 0;
   8726 	} else if (mdic & MDIC_E) {
   8727 #if 0 /* This is normal if no PHY is present. */
   8728 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8729 		    device_xname(sc->sc_dev), phy, reg);
   8730 #endif
   8731 		rv = 0;
   8732 	} else {
   8733 		rv = MDIC_DATA(mdic);
   8734 		if (rv == 0xffff)
   8735 			rv = 0;
   8736 	}
   8737 
   8738 	return rv;
   8739 }
   8740 
   8741 /*
   8742  * wm_gmii_mdic_writereg:	[mii interface function]
   8743  *
   8744  *	Write a PHY register on the GMII.
   8745  */
   8746 static void
   8747 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   8748 {
   8749 	struct wm_softc *sc = device_private(self);
   8750 	uint32_t mdic = 0;
   8751 	int i;
   8752 
   8753 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8754 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8755 
   8756 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8757 		mdic = CSR_READ(sc, WMREG_MDIC);
   8758 		if (mdic & MDIC_READY)
   8759 			break;
   8760 		delay(50);
   8761 	}
   8762 
   8763 	if ((mdic & MDIC_READY) == 0)
   8764 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8765 		    device_xname(sc->sc_dev), phy, reg);
   8766 	else if (mdic & MDIC_E)
   8767 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8768 		    device_xname(sc->sc_dev), phy, reg);
   8769 }
   8770 
   8771 /*
   8772  * wm_gmii_i82544_readreg:	[mii interface function]
   8773  *
   8774  *	Read a PHY register on the GMII.
   8775  */
   8776 static int
   8777 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8778 {
   8779 	struct wm_softc *sc = device_private(self);
   8780 	int rv;
   8781 
   8782 	if (sc->phy.acquire(sc)) {
   8783 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8784 		    __func__);
   8785 		return 0;
   8786 	}
   8787 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   8788 	sc->phy.release(sc);
   8789 
   8790 	return rv;
   8791 }
   8792 
   8793 /*
   8794  * wm_gmii_i82544_writereg:	[mii interface function]
   8795  *
   8796  *	Write a PHY register on the GMII.
   8797  */
   8798 static void
   8799 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8800 {
   8801 	struct wm_softc *sc = device_private(self);
   8802 
   8803 	if (sc->phy.acquire(sc)) {
   8804 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8805 		    __func__);
   8806 	}
   8807 	wm_gmii_mdic_writereg(self, phy, reg, val);
   8808 	sc->phy.release(sc);
   8809 }
   8810 
   8811 /*
   8812  * wm_gmii_i80003_readreg:	[mii interface function]
   8813  *
   8814  *	Read a PHY register on the kumeran
   8815  * This could be handled by the PHY layer if we didn't have to lock the
   8816  * ressource ...
   8817  */
   8818 static int
   8819 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8820 {
   8821 	struct wm_softc *sc = device_private(self);
   8822 	int rv;
   8823 
   8824 	if (phy != 1) /* only one PHY on kumeran bus */
   8825 		return 0;
   8826 
   8827 	if (sc->phy.acquire(sc)) {
   8828 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8829 		    __func__);
   8830 		return 0;
   8831 	}
   8832 
   8833 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   8834 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8835 		    reg >> GG82563_PAGE_SHIFT);
   8836 	} else {
   8837 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8838 		    reg >> GG82563_PAGE_SHIFT);
   8839 	}
   8840 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8841 	delay(200);
   8842 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   8843 	delay(200);
   8844 	sc->phy.release(sc);
   8845 
   8846 	return rv;
   8847 }
   8848 
   8849 /*
   8850  * wm_gmii_i80003_writereg:	[mii interface function]
   8851  *
   8852  *	Write a PHY register on the kumeran.
   8853  * This could be handled by the PHY layer if we didn't have to lock the
   8854  * ressource ...
   8855  */
   8856 static void
   8857 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8858 {
   8859 	struct wm_softc *sc = device_private(self);
   8860 
   8861 	if (phy != 1) /* only one PHY on kumeran bus */
   8862 		return;
   8863 
   8864 	if (sc->phy.acquire(sc)) {
   8865 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8866 		    __func__);
   8867 		return;
   8868 	}
   8869 
   8870 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   8871 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8872 		    reg >> GG82563_PAGE_SHIFT);
   8873 	} else {
   8874 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8875 		    reg >> GG82563_PAGE_SHIFT);
   8876 	}
   8877 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8878 	delay(200);
   8879 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   8880 	delay(200);
   8881 
   8882 	sc->phy.release(sc);
   8883 }
   8884 
   8885 /*
   8886  * wm_gmii_bm_readreg:	[mii interface function]
   8887  *
   8888  *	Read a PHY register on the kumeran
   8889  * This could be handled by the PHY layer if we didn't have to lock the
   8890  * ressource ...
   8891  */
   8892 static int
   8893 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8894 {
   8895 	struct wm_softc *sc = device_private(self);
   8896 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   8897 	uint16_t val;
   8898 	int rv;
   8899 
   8900 	if (sc->phy.acquire(sc)) {
   8901 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8902 		    __func__);
   8903 		return 0;
   8904 	}
   8905 
   8906 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   8907 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   8908 		    || (reg == 31)) ? 1 : phy;
   8909 	/* Page 800 works differently than the rest so it has its own func */
   8910 	if (page == BM_WUC_PAGE) {
   8911 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8912 		rv = val;
   8913 		goto release;
   8914 	}
   8915 
   8916 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8917 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   8918 		    && (sc->sc_type != WM_T_82583))
   8919 			wm_gmii_mdic_writereg(self, phy,
   8920 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   8921 		else
   8922 			wm_gmii_mdic_writereg(self, phy,
   8923 			    BME1000_PHY_PAGE_SELECT, page);
   8924 	}
   8925 
   8926 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   8927 
   8928 release:
   8929 	sc->phy.release(sc);
   8930 	return rv;
   8931 }
   8932 
   8933 /*
   8934  * wm_gmii_bm_writereg:	[mii interface function]
   8935  *
   8936  *	Write a PHY register on the kumeran.
   8937  * This could be handled by the PHY layer if we didn't have to lock the
   8938  * ressource ...
   8939  */
   8940 static void
   8941 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8942 {
   8943 	struct wm_softc *sc = device_private(self);
   8944 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   8945 
   8946 	if (sc->phy.acquire(sc)) {
   8947 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8948 		    __func__);
   8949 		return;
   8950 	}
   8951 
   8952 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   8953 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   8954 		    || (reg == 31)) ? 1 : phy;
   8955 	/* Page 800 works differently than the rest so it has its own func */
   8956 	if (page == BM_WUC_PAGE) {
   8957 		uint16_t tmp;
   8958 
   8959 		tmp = val;
   8960 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8961 		goto release;
   8962 	}
   8963 
   8964 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8965 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   8966 		    && (sc->sc_type != WM_T_82583))
   8967 			wm_gmii_mdic_writereg(self, phy,
   8968 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   8969 		else
   8970 			wm_gmii_mdic_writereg(self, phy,
   8971 			    BME1000_PHY_PAGE_SELECT, page);
   8972 	}
   8973 
   8974 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   8975 
   8976 release:
   8977 	sc->phy.release(sc);
   8978 }
   8979 
   8980 static void
   8981 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8982 {
   8983 	struct wm_softc *sc = device_private(self);
   8984 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8985 	uint16_t wuce, reg;
   8986 
   8987 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8988 		device_xname(sc->sc_dev), __func__));
   8989 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8990 	if (sc->sc_type == WM_T_PCH) {
   8991 		/* XXX e1000 driver do nothing... why? */
   8992 	}
   8993 
   8994 	/*
   8995 	 * 1) Enable PHY wakeup register first.
   8996 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   8997 	 */
   8998 
   8999 	/* Set page 769 */
   9000 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9001 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9002 
   9003 	/* Read WUCE and save it */
   9004 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9005 
   9006 	reg = wuce | BM_WUC_ENABLE_BIT;
   9007 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9008 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9009 
   9010 	/* Select page 800 */
   9011 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9012 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9013 
   9014 	/*
   9015 	 * 2) Access PHY wakeup register.
   9016 	 * See e1000_access_phy_wakeup_reg_bm.
   9017 	 */
   9018 
   9019 	/* Write page 800 */
   9020 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9021 
   9022 	if (rd)
   9023 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9024 	else
   9025 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9026 
   9027 	/*
   9028 	 * 3) Disable PHY wakeup register.
   9029 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9030 	 */
   9031 	/* Set page 769 */
   9032 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9033 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9034 
   9035 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9036 }
   9037 
   9038 /*
   9039  * wm_gmii_hv_readreg:	[mii interface function]
   9040  *
   9041  *	Read a PHY register on the kumeran
   9042  * This could be handled by the PHY layer if we didn't have to lock the
   9043  * ressource ...
   9044  */
   9045 static int
   9046 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9047 {
   9048 	struct wm_softc *sc = device_private(self);
   9049 	int rv;
   9050 
   9051 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9052 		device_xname(sc->sc_dev), __func__));
   9053 	if (sc->phy.acquire(sc)) {
   9054 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9055 		    __func__);
   9056 		return 0;
   9057 	}
   9058 
   9059 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9060 	sc->phy.release(sc);
   9061 	return rv;
   9062 }
   9063 
   9064 static int
   9065 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9066 {
   9067 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9068 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9069 	uint16_t val;
   9070 	int rv;
   9071 
   9072 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9073 
   9074 	/* Page 800 works differently than the rest so it has its own func */
   9075 	if (page == BM_WUC_PAGE) {
   9076 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9077 		return val;
   9078 	}
   9079 
   9080 	/*
   9081 	 * Lower than page 768 works differently than the rest so it has its
   9082 	 * own func
   9083 	 */
   9084 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9085 		printf("gmii_hv_readreg!!!\n");
   9086 		return 0;
   9087 	}
   9088 
   9089 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9090 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9091 		    page << BME1000_PAGE_SHIFT);
   9092 	}
   9093 
   9094 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9095 	return rv;
   9096 }
   9097 
   9098 /*
   9099  * wm_gmii_hv_writereg:	[mii interface function]
   9100  *
   9101  *	Write a PHY register on the kumeran.
   9102  * This could be handled by the PHY layer if we didn't have to lock the
   9103  * ressource ...
   9104  */
   9105 static void
   9106 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9107 {
   9108 	struct wm_softc *sc = device_private(self);
   9109 
   9110 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9111 		device_xname(sc->sc_dev), __func__));
   9112 
   9113 	if (sc->phy.acquire(sc)) {
   9114 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9115 		    __func__);
   9116 		return;
   9117 	}
   9118 
   9119 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   9120 	sc->phy.release(sc);
   9121 }
   9122 
   9123 static void
   9124 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   9125 {
   9126 	struct wm_softc *sc = device_private(self);
   9127 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9128 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9129 
   9130 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9131 
   9132 	/* Page 800 works differently than the rest so it has its own func */
   9133 	if (page == BM_WUC_PAGE) {
   9134 		uint16_t tmp;
   9135 
   9136 		tmp = val;
   9137 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9138 		return;
   9139 	}
   9140 
   9141 	/*
   9142 	 * Lower than page 768 works differently than the rest so it has its
   9143 	 * own func
   9144 	 */
   9145 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9146 		printf("gmii_hv_writereg!!!\n");
   9147 		return;
   9148 	}
   9149 
   9150 	{
   9151 		/*
   9152 		 * XXX Workaround MDIO accesses being disabled after entering
   9153 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9154 		 * register is set)
   9155 		 */
   9156 		if (sc->sc_phytype == WMPHY_82578) {
   9157 			struct mii_softc *child;
   9158 
   9159 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9160 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   9161 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   9162 			    && ((val & (1 << 11)) != 0)) {
   9163 				printf("XXX need workaround\n");
   9164 			}
   9165 		}
   9166 
   9167 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9168 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9169 			    page << BME1000_PAGE_SHIFT);
   9170 		}
   9171 	}
   9172 
   9173 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   9174 }
   9175 
   9176 /*
   9177  * wm_gmii_82580_readreg:	[mii interface function]
   9178  *
   9179  *	Read a PHY register on the 82580 and I350.
   9180  * This could be handled by the PHY layer if we didn't have to lock the
   9181  * ressource ...
   9182  */
   9183 static int
   9184 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   9185 {
   9186 	struct wm_softc *sc = device_private(self);
   9187 	int rv;
   9188 
   9189 	if (sc->phy.acquire(sc) != 0) {
   9190 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9191 		    __func__);
   9192 		return 0;
   9193 	}
   9194 
   9195 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9196 
   9197 	sc->phy.release(sc);
   9198 	return rv;
   9199 }
   9200 
   9201 /*
   9202  * wm_gmii_82580_writereg:	[mii interface function]
   9203  *
   9204  *	Write a PHY register on the 82580 and I350.
   9205  * This could be handled by the PHY layer if we didn't have to lock the
   9206  * ressource ...
   9207  */
   9208 static void
   9209 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   9210 {
   9211 	struct wm_softc *sc = device_private(self);
   9212 
   9213 	if (sc->phy.acquire(sc) != 0) {
   9214 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9215 		    __func__);
   9216 		return;
   9217 	}
   9218 
   9219 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9220 
   9221 	sc->phy.release(sc);
   9222 }
   9223 
   9224 /*
   9225  * wm_gmii_gs40g_readreg:	[mii interface function]
   9226  *
   9227  *	Read a PHY register on the I2100 and I211.
   9228  * This could be handled by the PHY layer if we didn't have to lock the
   9229  * ressource ...
   9230  */
   9231 static int
   9232 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   9233 {
   9234 	struct wm_softc *sc = device_private(self);
   9235 	int page, offset;
   9236 	int rv;
   9237 
   9238 	/* Acquire semaphore */
   9239 	if (sc->phy.acquire(sc)) {
   9240 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9241 		    __func__);
   9242 		return 0;
   9243 	}
   9244 
   9245 	/* Page select */
   9246 	page = reg >> GS40G_PAGE_SHIFT;
   9247 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9248 
   9249 	/* Read reg */
   9250 	offset = reg & GS40G_OFFSET_MASK;
   9251 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   9252 
   9253 	sc->phy.release(sc);
   9254 	return rv;
   9255 }
   9256 
   9257 /*
   9258  * wm_gmii_gs40g_writereg:	[mii interface function]
   9259  *
   9260  *	Write a PHY register on the I210 and I211.
   9261  * This could be handled by the PHY layer if we didn't have to lock the
   9262  * ressource ...
   9263  */
   9264 static void
   9265 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   9266 {
   9267 	struct wm_softc *sc = device_private(self);
   9268 	int page, offset;
   9269 
   9270 	/* Acquire semaphore */
   9271 	if (sc->phy.acquire(sc)) {
   9272 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9273 		    __func__);
   9274 		return;
   9275 	}
   9276 
   9277 	/* Page select */
   9278 	page = reg >> GS40G_PAGE_SHIFT;
   9279 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9280 
   9281 	/* Write reg */
   9282 	offset = reg & GS40G_OFFSET_MASK;
   9283 	wm_gmii_mdic_writereg(self, phy, offset, val);
   9284 
   9285 	/* Release semaphore */
   9286 	sc->phy.release(sc);
   9287 }
   9288 
   9289 /*
   9290  * wm_gmii_statchg:	[mii interface function]
   9291  *
   9292  *	Callback from MII layer when media changes.
   9293  */
   9294 static void
   9295 wm_gmii_statchg(struct ifnet *ifp)
   9296 {
   9297 	struct wm_softc *sc = ifp->if_softc;
   9298 	struct mii_data *mii = &sc->sc_mii;
   9299 
   9300 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   9301 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9302 	sc->sc_fcrtl &= ~FCRTL_XONE;
   9303 
   9304 	/*
   9305 	 * Get flow control negotiation result.
   9306 	 */
   9307 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   9308 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   9309 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   9310 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   9311 	}
   9312 
   9313 	if (sc->sc_flowflags & IFM_FLOW) {
   9314 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   9315 			sc->sc_ctrl |= CTRL_TFCE;
   9316 			sc->sc_fcrtl |= FCRTL_XONE;
   9317 		}
   9318 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   9319 			sc->sc_ctrl |= CTRL_RFCE;
   9320 	}
   9321 
   9322 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   9323 		DPRINTF(WM_DEBUG_LINK,
   9324 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   9325 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9326 	} else {
   9327 		DPRINTF(WM_DEBUG_LINK,
   9328 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   9329 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9330 	}
   9331 
   9332 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9333 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9334 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   9335 						 : WMREG_FCRTL, sc->sc_fcrtl);
   9336 	if (sc->sc_type == WM_T_80003) {
   9337 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   9338 		case IFM_1000_T:
   9339 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9340 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   9341 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9342 			break;
   9343 		default:
   9344 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9345 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   9346 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   9347 			break;
   9348 		}
   9349 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   9350 	}
   9351 }
   9352 
   9353 /* kumeran related (80003, ICH* and PCH*) */
   9354 
   9355 /*
   9356  * wm_kmrn_readreg:
   9357  *
   9358  *	Read a kumeran register
   9359  */
   9360 static int
   9361 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   9362 {
   9363 	int rv;
   9364 
   9365 	if (sc->sc_type == WM_T_80003)
   9366 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9367 	else
   9368 		rv = sc->phy.acquire(sc);
   9369 	if (rv != 0) {
   9370 		aprint_error_dev(sc->sc_dev,
   9371 		    "%s: failed to get semaphore\n", __func__);
   9372 		return 0;
   9373 	}
   9374 
   9375 	rv = wm_kmrn_readreg_locked(sc, reg);
   9376 
   9377 	if (sc->sc_type == WM_T_80003)
   9378 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9379 	else
   9380 		sc->phy.release(sc);
   9381 
   9382 	return rv;
   9383 }
   9384 
   9385 static int
   9386 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   9387 {
   9388 	int rv;
   9389 
   9390 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9391 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9392 	    KUMCTRLSTA_REN);
   9393 	CSR_WRITE_FLUSH(sc);
   9394 	delay(2);
   9395 
   9396 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9397 
   9398 	return rv;
   9399 }
   9400 
   9401 /*
   9402  * wm_kmrn_writereg:
   9403  *
   9404  *	Write a kumeran register
   9405  */
   9406 static void
   9407 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9408 {
   9409 	int rv;
   9410 
   9411 	if (sc->sc_type == WM_T_80003)
   9412 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9413 	else
   9414 		rv = sc->phy.acquire(sc);
   9415 	if (rv != 0) {
   9416 		aprint_error_dev(sc->sc_dev,
   9417 		    "%s: failed to get semaphore\n", __func__);
   9418 		return;
   9419 	}
   9420 
   9421 	wm_kmrn_writereg_locked(sc, reg, val);
   9422 
   9423 	if (sc->sc_type == WM_T_80003)
   9424 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9425 	else
   9426 		sc->phy.release(sc);
   9427 }
   9428 
   9429 static void
   9430 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   9431 {
   9432 
   9433 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9434 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9435 	    (val & KUMCTRLSTA_MASK));
   9436 }
   9437 
   9438 /* SGMII related */
   9439 
   9440 /*
   9441  * wm_sgmii_uses_mdio
   9442  *
   9443  * Check whether the transaction is to the internal PHY or the external
   9444  * MDIO interface. Return true if it's MDIO.
   9445  */
   9446 static bool
   9447 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9448 {
   9449 	uint32_t reg;
   9450 	bool ismdio = false;
   9451 
   9452 	switch (sc->sc_type) {
   9453 	case WM_T_82575:
   9454 	case WM_T_82576:
   9455 		reg = CSR_READ(sc, WMREG_MDIC);
   9456 		ismdio = ((reg & MDIC_DEST) != 0);
   9457 		break;
   9458 	case WM_T_82580:
   9459 	case WM_T_I350:
   9460 	case WM_T_I354:
   9461 	case WM_T_I210:
   9462 	case WM_T_I211:
   9463 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9464 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9465 		break;
   9466 	default:
   9467 		break;
   9468 	}
   9469 
   9470 	return ismdio;
   9471 }
   9472 
   9473 /*
   9474  * wm_sgmii_readreg:	[mii interface function]
   9475  *
   9476  *	Read a PHY register on the SGMII
   9477  * This could be handled by the PHY layer if we didn't have to lock the
   9478  * ressource ...
   9479  */
   9480 static int
   9481 wm_sgmii_readreg(device_t self, int phy, int reg)
   9482 {
   9483 	struct wm_softc *sc = device_private(self);
   9484 	uint32_t i2ccmd;
   9485 	int i, rv;
   9486 
   9487 	if (sc->phy.acquire(sc)) {
   9488 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9489 		    __func__);
   9490 		return 0;
   9491 	}
   9492 
   9493 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9494 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9495 	    | I2CCMD_OPCODE_READ;
   9496 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9497 
   9498 	/* Poll the ready bit */
   9499 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9500 		delay(50);
   9501 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9502 		if (i2ccmd & I2CCMD_READY)
   9503 			break;
   9504 	}
   9505 	if ((i2ccmd & I2CCMD_READY) == 0)
   9506 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9507 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9508 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9509 
   9510 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9511 
   9512 	sc->phy.release(sc);
   9513 	return rv;
   9514 }
   9515 
   9516 /*
   9517  * wm_sgmii_writereg:	[mii interface function]
   9518  *
   9519  *	Write a PHY register on the SGMII.
   9520  * This could be handled by the PHY layer if we didn't have to lock the
   9521  * ressource ...
   9522  */
   9523 static void
   9524 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9525 {
   9526 	struct wm_softc *sc = device_private(self);
   9527 	uint32_t i2ccmd;
   9528 	int i;
   9529 	int val_swapped;
   9530 
   9531 	if (sc->phy.acquire(sc) != 0) {
   9532 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9533 		    __func__);
   9534 		return;
   9535 	}
   9536 	/* Swap the data bytes for the I2C interface */
   9537 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9538 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9539 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9540 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9541 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9542 
   9543 	/* Poll the ready bit */
   9544 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9545 		delay(50);
   9546 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9547 		if (i2ccmd & I2CCMD_READY)
   9548 			break;
   9549 	}
   9550 	if ((i2ccmd & I2CCMD_READY) == 0)
   9551 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9552 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9553 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9554 
   9555 	sc->phy.release(sc);
   9556 }
   9557 
   9558 /* TBI related */
   9559 
   9560 /*
   9561  * wm_tbi_mediainit:
   9562  *
   9563  *	Initialize media for use on 1000BASE-X devices.
   9564  */
   9565 static void
   9566 wm_tbi_mediainit(struct wm_softc *sc)
   9567 {
   9568 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9569 	const char *sep = "";
   9570 
   9571 	if (sc->sc_type < WM_T_82543)
   9572 		sc->sc_tipg = TIPG_WM_DFLT;
   9573 	else
   9574 		sc->sc_tipg = TIPG_LG_DFLT;
   9575 
   9576 	sc->sc_tbi_serdes_anegticks = 5;
   9577 
   9578 	/* Initialize our media structures */
   9579 	sc->sc_mii.mii_ifp = ifp;
   9580 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9581 
   9582 	if ((sc->sc_type >= WM_T_82575)
   9583 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9584 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9585 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9586 	else
   9587 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9588 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9589 
   9590 	/*
   9591 	 * SWD Pins:
   9592 	 *
   9593 	 *	0 = Link LED (output)
   9594 	 *	1 = Loss Of Signal (input)
   9595 	 */
   9596 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9597 
   9598 	/* XXX Perhaps this is only for TBI */
   9599 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9600 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9601 
   9602 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9603 		sc->sc_ctrl &= ~CTRL_LRST;
   9604 
   9605 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9606 
   9607 #define	ADD(ss, mm, dd)							\
   9608 do {									\
   9609 	aprint_normal("%s%s", sep, ss);					\
   9610 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9611 	sep = ", ";							\
   9612 } while (/*CONSTCOND*/0)
   9613 
   9614 	aprint_normal_dev(sc->sc_dev, "");
   9615 
   9616 	/* Only 82545 is LX */
   9617 	if (sc->sc_type == WM_T_82545) {
   9618 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9619 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9620 	} else {
   9621 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9622 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9623 	}
   9624 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9625 	aprint_normal("\n");
   9626 
   9627 #undef ADD
   9628 
   9629 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9630 }
   9631 
   9632 /*
   9633  * wm_tbi_mediachange:	[ifmedia interface function]
   9634  *
   9635  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9636  */
   9637 static int
   9638 wm_tbi_mediachange(struct ifnet *ifp)
   9639 {
   9640 	struct wm_softc *sc = ifp->if_softc;
   9641 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9642 	uint32_t status;
   9643 	int i;
   9644 
   9645 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9646 		/* XXX need some work for >= 82571 and < 82575 */
   9647 		if (sc->sc_type < WM_T_82575)
   9648 			return 0;
   9649 	}
   9650 
   9651 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9652 	    || (sc->sc_type >= WM_T_82575))
   9653 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9654 
   9655 	sc->sc_ctrl &= ~CTRL_LRST;
   9656 	sc->sc_txcw = TXCW_ANE;
   9657 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9658 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9659 	else if (ife->ifm_media & IFM_FDX)
   9660 		sc->sc_txcw |= TXCW_FD;
   9661 	else
   9662 		sc->sc_txcw |= TXCW_HD;
   9663 
   9664 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9665 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9666 
   9667 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9668 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9669 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9670 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9671 	CSR_WRITE_FLUSH(sc);
   9672 	delay(1000);
   9673 
   9674 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9675 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9676 
   9677 	/*
   9678 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9679 	 * optics detect a signal, 0 if they don't.
   9680 	 */
   9681 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9682 		/* Have signal; wait for the link to come up. */
   9683 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9684 			delay(10000);
   9685 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9686 				break;
   9687 		}
   9688 
   9689 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9690 			    device_xname(sc->sc_dev),i));
   9691 
   9692 		status = CSR_READ(sc, WMREG_STATUS);
   9693 		DPRINTF(WM_DEBUG_LINK,
   9694 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9695 			device_xname(sc->sc_dev),status, STATUS_LU));
   9696 		if (status & STATUS_LU) {
   9697 			/* Link is up. */
   9698 			DPRINTF(WM_DEBUG_LINK,
   9699 			    ("%s: LINK: set media -> link up %s\n",
   9700 			    device_xname(sc->sc_dev),
   9701 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9702 
   9703 			/*
   9704 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9705 			 * so we should update sc->sc_ctrl
   9706 			 */
   9707 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9708 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9709 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9710 			if (status & STATUS_FD)
   9711 				sc->sc_tctl |=
   9712 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9713 			else
   9714 				sc->sc_tctl |=
   9715 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9716 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9717 				sc->sc_fcrtl |= FCRTL_XONE;
   9718 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9719 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9720 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9721 				      sc->sc_fcrtl);
   9722 			sc->sc_tbi_linkup = 1;
   9723 		} else {
   9724 			if (i == WM_LINKUP_TIMEOUT)
   9725 				wm_check_for_link(sc);
   9726 			/* Link is down. */
   9727 			DPRINTF(WM_DEBUG_LINK,
   9728 			    ("%s: LINK: set media -> link down\n",
   9729 			    device_xname(sc->sc_dev)));
   9730 			sc->sc_tbi_linkup = 0;
   9731 		}
   9732 	} else {
   9733 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9734 		    device_xname(sc->sc_dev)));
   9735 		sc->sc_tbi_linkup = 0;
   9736 	}
   9737 
   9738 	wm_tbi_serdes_set_linkled(sc);
   9739 
   9740 	return 0;
   9741 }
   9742 
   9743 /*
   9744  * wm_tbi_mediastatus:	[ifmedia interface function]
   9745  *
   9746  *	Get the current interface media status on a 1000BASE-X device.
   9747  */
   9748 static void
   9749 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9750 {
   9751 	struct wm_softc *sc = ifp->if_softc;
   9752 	uint32_t ctrl, status;
   9753 
   9754 	ifmr->ifm_status = IFM_AVALID;
   9755 	ifmr->ifm_active = IFM_ETHER;
   9756 
   9757 	status = CSR_READ(sc, WMREG_STATUS);
   9758 	if ((status & STATUS_LU) == 0) {
   9759 		ifmr->ifm_active |= IFM_NONE;
   9760 		return;
   9761 	}
   9762 
   9763 	ifmr->ifm_status |= IFM_ACTIVE;
   9764 	/* Only 82545 is LX */
   9765 	if (sc->sc_type == WM_T_82545)
   9766 		ifmr->ifm_active |= IFM_1000_LX;
   9767 	else
   9768 		ifmr->ifm_active |= IFM_1000_SX;
   9769 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9770 		ifmr->ifm_active |= IFM_FDX;
   9771 	else
   9772 		ifmr->ifm_active |= IFM_HDX;
   9773 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9774 	if (ctrl & CTRL_RFCE)
   9775 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9776 	if (ctrl & CTRL_TFCE)
   9777 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9778 }
   9779 
   9780 /* XXX TBI only */
   9781 static int
   9782 wm_check_for_link(struct wm_softc *sc)
   9783 {
   9784 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9785 	uint32_t rxcw;
   9786 	uint32_t ctrl;
   9787 	uint32_t status;
   9788 	uint32_t sig;
   9789 
   9790 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9791 		/* XXX need some work for >= 82571 */
   9792 		if (sc->sc_type >= WM_T_82571) {
   9793 			sc->sc_tbi_linkup = 1;
   9794 			return 0;
   9795 		}
   9796 	}
   9797 
   9798 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9799 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9800 	status = CSR_READ(sc, WMREG_STATUS);
   9801 
   9802 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9803 
   9804 	DPRINTF(WM_DEBUG_LINK,
   9805 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9806 		device_xname(sc->sc_dev), __func__,
   9807 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9808 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   9809 
   9810 	/*
   9811 	 * SWDPIN   LU RXCW
   9812 	 *      0    0    0
   9813 	 *      0    0    1	(should not happen)
   9814 	 *      0    1    0	(should not happen)
   9815 	 *      0    1    1	(should not happen)
   9816 	 *      1    0    0	Disable autonego and force linkup
   9817 	 *      1    0    1	got /C/ but not linkup yet
   9818 	 *      1    1    0	(linkup)
   9819 	 *      1    1    1	If IFM_AUTO, back to autonego
   9820 	 *
   9821 	 */
   9822 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9823 	    && ((status & STATUS_LU) == 0)
   9824 	    && ((rxcw & RXCW_C) == 0)) {
   9825 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9826 			__func__));
   9827 		sc->sc_tbi_linkup = 0;
   9828 		/* Disable auto-negotiation in the TXCW register */
   9829 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9830 
   9831 		/*
   9832 		 * Force link-up and also force full-duplex.
   9833 		 *
   9834 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9835 		 * so we should update sc->sc_ctrl
   9836 		 */
   9837 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9838 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9839 	} else if (((status & STATUS_LU) != 0)
   9840 	    && ((rxcw & RXCW_C) != 0)
   9841 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9842 		sc->sc_tbi_linkup = 1;
   9843 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9844 			__func__));
   9845 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9846 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9847 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9848 	    && ((rxcw & RXCW_C) != 0)) {
   9849 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9850 	} else {
   9851 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9852 			status));
   9853 	}
   9854 
   9855 	return 0;
   9856 }
   9857 
   9858 /*
   9859  * wm_tbi_tick:
   9860  *
   9861  *	Check the link on TBI devices.
   9862  *	This function acts as mii_tick().
   9863  */
   9864 static void
   9865 wm_tbi_tick(struct wm_softc *sc)
   9866 {
   9867 	struct mii_data *mii = &sc->sc_mii;
   9868 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9869 	uint32_t status;
   9870 
   9871 	KASSERT(WM_CORE_LOCKED(sc));
   9872 
   9873 	status = CSR_READ(sc, WMREG_STATUS);
   9874 
   9875 	/* XXX is this needed? */
   9876 	(void)CSR_READ(sc, WMREG_RXCW);
   9877 	(void)CSR_READ(sc, WMREG_CTRL);
   9878 
   9879 	/* set link status */
   9880 	if ((status & STATUS_LU) == 0) {
   9881 		DPRINTF(WM_DEBUG_LINK,
   9882 		    ("%s: LINK: checklink -> down\n",
   9883 			device_xname(sc->sc_dev)));
   9884 		sc->sc_tbi_linkup = 0;
   9885 	} else if (sc->sc_tbi_linkup == 0) {
   9886 		DPRINTF(WM_DEBUG_LINK,
   9887 		    ("%s: LINK: checklink -> up %s\n",
   9888 			device_xname(sc->sc_dev),
   9889 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9890 		sc->sc_tbi_linkup = 1;
   9891 		sc->sc_tbi_serdes_ticks = 0;
   9892 	}
   9893 
   9894 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9895 		goto setled;
   9896 
   9897 	if ((status & STATUS_LU) == 0) {
   9898 		sc->sc_tbi_linkup = 0;
   9899 		/* If the timer expired, retry autonegotiation */
   9900 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9901 		    && (++sc->sc_tbi_serdes_ticks
   9902 			>= sc->sc_tbi_serdes_anegticks)) {
   9903 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9904 			sc->sc_tbi_serdes_ticks = 0;
   9905 			/*
   9906 			 * Reset the link, and let autonegotiation do
   9907 			 * its thing
   9908 			 */
   9909 			sc->sc_ctrl |= CTRL_LRST;
   9910 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9911 			CSR_WRITE_FLUSH(sc);
   9912 			delay(1000);
   9913 			sc->sc_ctrl &= ~CTRL_LRST;
   9914 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9915 			CSR_WRITE_FLUSH(sc);
   9916 			delay(1000);
   9917 			CSR_WRITE(sc, WMREG_TXCW,
   9918 			    sc->sc_txcw & ~TXCW_ANE);
   9919 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9920 		}
   9921 	}
   9922 
   9923 setled:
   9924 	wm_tbi_serdes_set_linkled(sc);
   9925 }
   9926 
   9927 /* SERDES related */
   9928 static void
   9929 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9930 {
   9931 	uint32_t reg;
   9932 
   9933 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9934 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9935 		return;
   9936 
   9937 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9938 	reg |= PCS_CFG_PCS_EN;
   9939 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9940 
   9941 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9942 	reg &= ~CTRL_EXT_SWDPIN(3);
   9943 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9944 	CSR_WRITE_FLUSH(sc);
   9945 }
   9946 
   9947 static int
   9948 wm_serdes_mediachange(struct ifnet *ifp)
   9949 {
   9950 	struct wm_softc *sc = ifp->if_softc;
   9951 	bool pcs_autoneg = true; /* XXX */
   9952 	uint32_t ctrl_ext, pcs_lctl, reg;
   9953 
   9954 	/* XXX Currently, this function is not called on 8257[12] */
   9955 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9956 	    || (sc->sc_type >= WM_T_82575))
   9957 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9958 
   9959 	wm_serdes_power_up_link_82575(sc);
   9960 
   9961 	sc->sc_ctrl |= CTRL_SLU;
   9962 
   9963 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9964 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9965 
   9966 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9967 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9968 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9969 	case CTRL_EXT_LINK_MODE_SGMII:
   9970 		pcs_autoneg = true;
   9971 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9972 		break;
   9973 	case CTRL_EXT_LINK_MODE_1000KX:
   9974 		pcs_autoneg = false;
   9975 		/* FALLTHROUGH */
   9976 	default:
   9977 		if ((sc->sc_type == WM_T_82575)
   9978 		    || (sc->sc_type == WM_T_82576)) {
   9979 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9980 				pcs_autoneg = false;
   9981 		}
   9982 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9983 		    | CTRL_FRCFDX;
   9984 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9985 	}
   9986 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9987 
   9988 	if (pcs_autoneg) {
   9989 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9990 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9991 
   9992 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9993 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9994 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9995 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9996 	} else
   9997 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9998 
   9999 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10000 
   10001 
   10002 	return 0;
   10003 }
   10004 
   10005 static void
   10006 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10007 {
   10008 	struct wm_softc *sc = ifp->if_softc;
   10009 	struct mii_data *mii = &sc->sc_mii;
   10010 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10011 	uint32_t pcs_adv, pcs_lpab, reg;
   10012 
   10013 	ifmr->ifm_status = IFM_AVALID;
   10014 	ifmr->ifm_active = IFM_ETHER;
   10015 
   10016 	/* Check PCS */
   10017 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10018 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10019 		ifmr->ifm_active |= IFM_NONE;
   10020 		sc->sc_tbi_linkup = 0;
   10021 		goto setled;
   10022 	}
   10023 
   10024 	sc->sc_tbi_linkup = 1;
   10025 	ifmr->ifm_status |= IFM_ACTIVE;
   10026 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10027 	if ((reg & PCS_LSTS_FDX) != 0)
   10028 		ifmr->ifm_active |= IFM_FDX;
   10029 	else
   10030 		ifmr->ifm_active |= IFM_HDX;
   10031 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10032 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10033 		/* Check flow */
   10034 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10035 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10036 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10037 			goto setled;
   10038 		}
   10039 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10040 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10041 		DPRINTF(WM_DEBUG_LINK,
   10042 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10043 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10044 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10045 			mii->mii_media_active |= IFM_FLOW
   10046 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10047 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10048 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10049 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10050 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10051 			mii->mii_media_active |= IFM_FLOW
   10052 			    | IFM_ETH_TXPAUSE;
   10053 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10054 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10055 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10056 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10057 			mii->mii_media_active |= IFM_FLOW
   10058 			    | IFM_ETH_RXPAUSE;
   10059 		} else {
   10060 		}
   10061 	}
   10062 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10063 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10064 setled:
   10065 	wm_tbi_serdes_set_linkled(sc);
   10066 }
   10067 
   10068 /*
   10069  * wm_serdes_tick:
   10070  *
   10071  *	Check the link on serdes devices.
   10072  */
   10073 static void
   10074 wm_serdes_tick(struct wm_softc *sc)
   10075 {
   10076 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10077 	struct mii_data *mii = &sc->sc_mii;
   10078 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10079 	uint32_t reg;
   10080 
   10081 	KASSERT(WM_CORE_LOCKED(sc));
   10082 
   10083 	mii->mii_media_status = IFM_AVALID;
   10084 	mii->mii_media_active = IFM_ETHER;
   10085 
   10086 	/* Check PCS */
   10087 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10088 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   10089 		mii->mii_media_status |= IFM_ACTIVE;
   10090 		sc->sc_tbi_linkup = 1;
   10091 		sc->sc_tbi_serdes_ticks = 0;
   10092 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   10093 		if ((reg & PCS_LSTS_FDX) != 0)
   10094 			mii->mii_media_active |= IFM_FDX;
   10095 		else
   10096 			mii->mii_media_active |= IFM_HDX;
   10097 	} else {
   10098 		mii->mii_media_status |= IFM_NONE;
   10099 		sc->sc_tbi_linkup = 0;
   10100 		    /* If the timer expired, retry autonegotiation */
   10101 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10102 		    && (++sc->sc_tbi_serdes_ticks
   10103 			>= sc->sc_tbi_serdes_anegticks)) {
   10104 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10105 			sc->sc_tbi_serdes_ticks = 0;
   10106 			/* XXX */
   10107 			wm_serdes_mediachange(ifp);
   10108 		}
   10109 	}
   10110 
   10111 	wm_tbi_serdes_set_linkled(sc);
   10112 }
   10113 
   10114 /* SFP related */
   10115 
   10116 static int
   10117 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   10118 {
   10119 	uint32_t i2ccmd;
   10120 	int i;
   10121 
   10122 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10123 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10124 
   10125 	/* Poll the ready bit */
   10126 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10127 		delay(50);
   10128 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10129 		if (i2ccmd & I2CCMD_READY)
   10130 			break;
   10131 	}
   10132 	if ((i2ccmd & I2CCMD_READY) == 0)
   10133 		return -1;
   10134 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10135 		return -1;
   10136 
   10137 	*data = i2ccmd & 0x00ff;
   10138 
   10139 	return 0;
   10140 }
   10141 
   10142 static uint32_t
   10143 wm_sfp_get_media_type(struct wm_softc *sc)
   10144 {
   10145 	uint32_t ctrl_ext;
   10146 	uint8_t val = 0;
   10147 	int timeout = 3;
   10148 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   10149 	int rv = -1;
   10150 
   10151 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10152 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   10153 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   10154 	CSR_WRITE_FLUSH(sc);
   10155 
   10156 	/* Read SFP module data */
   10157 	while (timeout) {
   10158 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   10159 		if (rv == 0)
   10160 			break;
   10161 		delay(100*1000); /* XXX too big */
   10162 		timeout--;
   10163 	}
   10164 	if (rv != 0)
   10165 		goto out;
   10166 	switch (val) {
   10167 	case SFF_SFP_ID_SFF:
   10168 		aprint_normal_dev(sc->sc_dev,
   10169 		    "Module/Connector soldered to board\n");
   10170 		break;
   10171 	case SFF_SFP_ID_SFP:
   10172 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   10173 		break;
   10174 	case SFF_SFP_ID_UNKNOWN:
   10175 		goto out;
   10176 	default:
   10177 		break;
   10178 	}
   10179 
   10180 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   10181 	if (rv != 0) {
   10182 		goto out;
   10183 	}
   10184 
   10185 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   10186 		mediatype = WM_MEDIATYPE_SERDES;
   10187 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   10188 		sc->sc_flags |= WM_F_SGMII;
   10189 		mediatype = WM_MEDIATYPE_COPPER;
   10190 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   10191 		sc->sc_flags |= WM_F_SGMII;
   10192 		mediatype = WM_MEDIATYPE_SERDES;
   10193 	}
   10194 
   10195 out:
   10196 	/* Restore I2C interface setting */
   10197 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10198 
   10199 	return mediatype;
   10200 }
   10201 
   10202 /*
   10203  * NVM related.
   10204  * Microwire, SPI (w/wo EERD) and Flash.
   10205  */
   10206 
   10207 /* Both spi and uwire */
   10208 
   10209 /*
   10210  * wm_eeprom_sendbits:
   10211  *
   10212  *	Send a series of bits to the EEPROM.
   10213  */
   10214 static void
   10215 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   10216 {
   10217 	uint32_t reg;
   10218 	int x;
   10219 
   10220 	reg = CSR_READ(sc, WMREG_EECD);
   10221 
   10222 	for (x = nbits; x > 0; x--) {
   10223 		if (bits & (1U << (x - 1)))
   10224 			reg |= EECD_DI;
   10225 		else
   10226 			reg &= ~EECD_DI;
   10227 		CSR_WRITE(sc, WMREG_EECD, reg);
   10228 		CSR_WRITE_FLUSH(sc);
   10229 		delay(2);
   10230 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10231 		CSR_WRITE_FLUSH(sc);
   10232 		delay(2);
   10233 		CSR_WRITE(sc, WMREG_EECD, reg);
   10234 		CSR_WRITE_FLUSH(sc);
   10235 		delay(2);
   10236 	}
   10237 }
   10238 
   10239 /*
   10240  * wm_eeprom_recvbits:
   10241  *
   10242  *	Receive a series of bits from the EEPROM.
   10243  */
   10244 static void
   10245 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   10246 {
   10247 	uint32_t reg, val;
   10248 	int x;
   10249 
   10250 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   10251 
   10252 	val = 0;
   10253 	for (x = nbits; x > 0; x--) {
   10254 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10255 		CSR_WRITE_FLUSH(sc);
   10256 		delay(2);
   10257 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   10258 			val |= (1U << (x - 1));
   10259 		CSR_WRITE(sc, WMREG_EECD, reg);
   10260 		CSR_WRITE_FLUSH(sc);
   10261 		delay(2);
   10262 	}
   10263 	*valp = val;
   10264 }
   10265 
   10266 /* Microwire */
   10267 
   10268 /*
   10269  * wm_nvm_read_uwire:
   10270  *
   10271  *	Read a word from the EEPROM using the MicroWire protocol.
   10272  */
   10273 static int
   10274 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10275 {
   10276 	uint32_t reg, val;
   10277 	int i;
   10278 
   10279 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10280 		device_xname(sc->sc_dev), __func__));
   10281 
   10282 	for (i = 0; i < wordcnt; i++) {
   10283 		/* Clear SK and DI. */
   10284 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   10285 		CSR_WRITE(sc, WMREG_EECD, reg);
   10286 
   10287 		/*
   10288 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   10289 		 * and Xen.
   10290 		 *
   10291 		 * We use this workaround only for 82540 because qemu's
   10292 		 * e1000 act as 82540.
   10293 		 */
   10294 		if (sc->sc_type == WM_T_82540) {
   10295 			reg |= EECD_SK;
   10296 			CSR_WRITE(sc, WMREG_EECD, reg);
   10297 			reg &= ~EECD_SK;
   10298 			CSR_WRITE(sc, WMREG_EECD, reg);
   10299 			CSR_WRITE_FLUSH(sc);
   10300 			delay(2);
   10301 		}
   10302 		/* XXX: end of workaround */
   10303 
   10304 		/* Set CHIP SELECT. */
   10305 		reg |= EECD_CS;
   10306 		CSR_WRITE(sc, WMREG_EECD, reg);
   10307 		CSR_WRITE_FLUSH(sc);
   10308 		delay(2);
   10309 
   10310 		/* Shift in the READ command. */
   10311 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   10312 
   10313 		/* Shift in address. */
   10314 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   10315 
   10316 		/* Shift out the data. */
   10317 		wm_eeprom_recvbits(sc, &val, 16);
   10318 		data[i] = val & 0xffff;
   10319 
   10320 		/* Clear CHIP SELECT. */
   10321 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   10322 		CSR_WRITE(sc, WMREG_EECD, reg);
   10323 		CSR_WRITE_FLUSH(sc);
   10324 		delay(2);
   10325 	}
   10326 
   10327 	return 0;
   10328 }
   10329 
   10330 /* SPI */
   10331 
   10332 /*
   10333  * Set SPI and FLASH related information from the EECD register.
   10334  * For 82541 and 82547, the word size is taken from EEPROM.
   10335  */
   10336 static int
   10337 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   10338 {
   10339 	int size;
   10340 	uint32_t reg;
   10341 	uint16_t data;
   10342 
   10343 	reg = CSR_READ(sc, WMREG_EECD);
   10344 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   10345 
   10346 	/* Read the size of NVM from EECD by default */
   10347 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10348 	switch (sc->sc_type) {
   10349 	case WM_T_82541:
   10350 	case WM_T_82541_2:
   10351 	case WM_T_82547:
   10352 	case WM_T_82547_2:
   10353 		/* Set dummy value to access EEPROM */
   10354 		sc->sc_nvm_wordsize = 64;
   10355 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   10356 		reg = data;
   10357 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10358 		if (size == 0)
   10359 			size = 6; /* 64 word size */
   10360 		else
   10361 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   10362 		break;
   10363 	case WM_T_80003:
   10364 	case WM_T_82571:
   10365 	case WM_T_82572:
   10366 	case WM_T_82573: /* SPI case */
   10367 	case WM_T_82574: /* SPI case */
   10368 	case WM_T_82583: /* SPI case */
   10369 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10370 		if (size > 14)
   10371 			size = 14;
   10372 		break;
   10373 	case WM_T_82575:
   10374 	case WM_T_82576:
   10375 	case WM_T_82580:
   10376 	case WM_T_I350:
   10377 	case WM_T_I354:
   10378 	case WM_T_I210:
   10379 	case WM_T_I211:
   10380 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10381 		if (size > 15)
   10382 			size = 15;
   10383 		break;
   10384 	default:
   10385 		aprint_error_dev(sc->sc_dev,
   10386 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   10387 		return -1;
   10388 		break;
   10389 	}
   10390 
   10391 	sc->sc_nvm_wordsize = 1 << size;
   10392 
   10393 	return 0;
   10394 }
   10395 
   10396 /*
   10397  * wm_nvm_ready_spi:
   10398  *
   10399  *	Wait for a SPI EEPROM to be ready for commands.
   10400  */
   10401 static int
   10402 wm_nvm_ready_spi(struct wm_softc *sc)
   10403 {
   10404 	uint32_t val;
   10405 	int usec;
   10406 
   10407 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10408 		device_xname(sc->sc_dev), __func__));
   10409 
   10410 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10411 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10412 		wm_eeprom_recvbits(sc, &val, 8);
   10413 		if ((val & SPI_SR_RDY) == 0)
   10414 			break;
   10415 	}
   10416 	if (usec >= SPI_MAX_RETRIES) {
   10417 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10418 		return 1;
   10419 	}
   10420 	return 0;
   10421 }
   10422 
   10423 /*
   10424  * wm_nvm_read_spi:
   10425  *
   10426  *	Read a work from the EEPROM using the SPI protocol.
   10427  */
   10428 static int
   10429 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10430 {
   10431 	uint32_t reg, val;
   10432 	int i;
   10433 	uint8_t opc;
   10434 
   10435 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10436 		device_xname(sc->sc_dev), __func__));
   10437 
   10438 	/* Clear SK and CS. */
   10439 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10440 	CSR_WRITE(sc, WMREG_EECD, reg);
   10441 	CSR_WRITE_FLUSH(sc);
   10442 	delay(2);
   10443 
   10444 	if (wm_nvm_ready_spi(sc))
   10445 		return 1;
   10446 
   10447 	/* Toggle CS to flush commands. */
   10448 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10449 	CSR_WRITE_FLUSH(sc);
   10450 	delay(2);
   10451 	CSR_WRITE(sc, WMREG_EECD, reg);
   10452 	CSR_WRITE_FLUSH(sc);
   10453 	delay(2);
   10454 
   10455 	opc = SPI_OPC_READ;
   10456 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10457 		opc |= SPI_OPC_A8;
   10458 
   10459 	wm_eeprom_sendbits(sc, opc, 8);
   10460 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10461 
   10462 	for (i = 0; i < wordcnt; i++) {
   10463 		wm_eeprom_recvbits(sc, &val, 16);
   10464 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10465 	}
   10466 
   10467 	/* Raise CS and clear SK. */
   10468 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10469 	CSR_WRITE(sc, WMREG_EECD, reg);
   10470 	CSR_WRITE_FLUSH(sc);
   10471 	delay(2);
   10472 
   10473 	return 0;
   10474 }
   10475 
   10476 /* Using with EERD */
   10477 
   10478 static int
   10479 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10480 {
   10481 	uint32_t attempts = 100000;
   10482 	uint32_t i, reg = 0;
   10483 	int32_t done = -1;
   10484 
   10485 	for (i = 0; i < attempts; i++) {
   10486 		reg = CSR_READ(sc, rw);
   10487 
   10488 		if (reg & EERD_DONE) {
   10489 			done = 0;
   10490 			break;
   10491 		}
   10492 		delay(5);
   10493 	}
   10494 
   10495 	return done;
   10496 }
   10497 
   10498 static int
   10499 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   10500     uint16_t *data)
   10501 {
   10502 	int i, eerd = 0;
   10503 	int error = 0;
   10504 
   10505 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10506 		device_xname(sc->sc_dev), __func__));
   10507 
   10508 	for (i = 0; i < wordcnt; i++) {
   10509 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   10510 
   10511 		CSR_WRITE(sc, WMREG_EERD, eerd);
   10512 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   10513 		if (error != 0)
   10514 			break;
   10515 
   10516 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   10517 	}
   10518 
   10519 	return error;
   10520 }
   10521 
   10522 /* Flash */
   10523 
   10524 static int
   10525 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10526 {
   10527 	uint32_t eecd;
   10528 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10529 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10530 	uint8_t sig_byte = 0;
   10531 
   10532 	switch (sc->sc_type) {
   10533 	case WM_T_PCH_SPT:
   10534 		/*
   10535 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   10536 		 * sector valid bits from the NVM.
   10537 		 */
   10538 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   10539 		if ((*bank == 0) || (*bank == 1)) {
   10540 			aprint_error_dev(sc->sc_dev,
   10541 			    "%s: no valid NVM bank present (%u)\n", __func__,
   10542 				*bank);
   10543 			return -1;
   10544 		} else {
   10545 			*bank = *bank - 2;
   10546 			return 0;
   10547 		}
   10548 	case WM_T_ICH8:
   10549 	case WM_T_ICH9:
   10550 		eecd = CSR_READ(sc, WMREG_EECD);
   10551 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10552 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10553 			return 0;
   10554 		}
   10555 		/* FALLTHROUGH */
   10556 	default:
   10557 		/* Default to 0 */
   10558 		*bank = 0;
   10559 
   10560 		/* Check bank 0 */
   10561 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10562 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10563 			*bank = 0;
   10564 			return 0;
   10565 		}
   10566 
   10567 		/* Check bank 1 */
   10568 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10569 		    &sig_byte);
   10570 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10571 			*bank = 1;
   10572 			return 0;
   10573 		}
   10574 	}
   10575 
   10576 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10577 		device_xname(sc->sc_dev)));
   10578 	return -1;
   10579 }
   10580 
   10581 /******************************************************************************
   10582  * This function does initial flash setup so that a new read/write/erase cycle
   10583  * can be started.
   10584  *
   10585  * sc - The pointer to the hw structure
   10586  ****************************************************************************/
   10587 static int32_t
   10588 wm_ich8_cycle_init(struct wm_softc *sc)
   10589 {
   10590 	uint16_t hsfsts;
   10591 	int32_t error = 1;
   10592 	int32_t i     = 0;
   10593 
   10594 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10595 
   10596 	/* May be check the Flash Des Valid bit in Hw status */
   10597 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10598 		return error;
   10599 	}
   10600 
   10601 	/* Clear FCERR in Hw status by writing 1 */
   10602 	/* Clear DAEL in Hw status by writing a 1 */
   10603 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10604 
   10605 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10606 
   10607 	/*
   10608 	 * Either we should have a hardware SPI cycle in progress bit to check
   10609 	 * against, in order to start a new cycle or FDONE bit should be
   10610 	 * changed in the hardware so that it is 1 after harware reset, which
   10611 	 * can then be used as an indication whether a cycle is in progress or
   10612 	 * has been completed .. we should also have some software semaphore
   10613 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10614 	 * threads access to those bits can be sequentiallized or a way so that
   10615 	 * 2 threads dont start the cycle at the same time
   10616 	 */
   10617 
   10618 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10619 		/*
   10620 		 * There is no cycle running at present, so we can start a
   10621 		 * cycle
   10622 		 */
   10623 
   10624 		/* Begin by setting Flash Cycle Done. */
   10625 		hsfsts |= HSFSTS_DONE;
   10626 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10627 		error = 0;
   10628 	} else {
   10629 		/*
   10630 		 * otherwise poll for sometime so the current cycle has a
   10631 		 * chance to end before giving up.
   10632 		 */
   10633 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10634 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10635 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10636 				error = 0;
   10637 				break;
   10638 			}
   10639 			delay(1);
   10640 		}
   10641 		if (error == 0) {
   10642 			/*
   10643 			 * Successful in waiting for previous cycle to timeout,
   10644 			 * now set the Flash Cycle Done.
   10645 			 */
   10646 			hsfsts |= HSFSTS_DONE;
   10647 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10648 		}
   10649 	}
   10650 	return error;
   10651 }
   10652 
   10653 /******************************************************************************
   10654  * This function starts a flash cycle and waits for its completion
   10655  *
   10656  * sc - The pointer to the hw structure
   10657  ****************************************************************************/
   10658 static int32_t
   10659 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10660 {
   10661 	uint16_t hsflctl;
   10662 	uint16_t hsfsts;
   10663 	int32_t error = 1;
   10664 	uint32_t i = 0;
   10665 
   10666 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10667 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10668 	hsflctl |= HSFCTL_GO;
   10669 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10670 
   10671 	/* Wait till FDONE bit is set to 1 */
   10672 	do {
   10673 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10674 		if (hsfsts & HSFSTS_DONE)
   10675 			break;
   10676 		delay(1);
   10677 		i++;
   10678 	} while (i < timeout);
   10679 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10680 		error = 0;
   10681 
   10682 	return error;
   10683 }
   10684 
   10685 /******************************************************************************
   10686  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   10687  *
   10688  * sc - The pointer to the hw structure
   10689  * index - The index of the byte or word to read.
   10690  * size - Size of data to read, 1=byte 2=word, 4=dword
   10691  * data - Pointer to the word to store the value read.
   10692  *****************************************************************************/
   10693 static int32_t
   10694 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10695     uint32_t size, uint32_t *data)
   10696 {
   10697 	uint16_t hsfsts;
   10698 	uint16_t hsflctl;
   10699 	uint32_t flash_linear_address;
   10700 	uint32_t flash_data = 0;
   10701 	int32_t error = 1;
   10702 	int32_t count = 0;
   10703 
   10704 	if (size < 1  || size > 4 || data == 0x0 ||
   10705 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10706 		return error;
   10707 
   10708 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10709 	    sc->sc_ich8_flash_base;
   10710 
   10711 	do {
   10712 		delay(1);
   10713 		/* Steps */
   10714 		error = wm_ich8_cycle_init(sc);
   10715 		if (error)
   10716 			break;
   10717 
   10718 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10719 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10720 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10721 		    & HSFCTL_BCOUNT_MASK;
   10722 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10723 		if (sc->sc_type == WM_T_PCH_SPT) {
   10724 			/*
   10725 			 * In SPT, This register is in Lan memory space, not
   10726 			 * flash. Therefore, only 32 bit access is supported.
   10727 			 */
   10728 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   10729 			    (uint32_t)hsflctl);
   10730 		} else
   10731 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10732 
   10733 		/*
   10734 		 * Write the last 24 bits of index into Flash Linear address
   10735 		 * field in Flash Address
   10736 		 */
   10737 		/* TODO: TBD maybe check the index against the size of flash */
   10738 
   10739 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10740 
   10741 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10742 
   10743 		/*
   10744 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10745 		 * the whole sequence a few more times, else read in (shift in)
   10746 		 * the Flash Data0, the order is least significant byte first
   10747 		 * msb to lsb
   10748 		 */
   10749 		if (error == 0) {
   10750 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10751 			if (size == 1)
   10752 				*data = (uint8_t)(flash_data & 0x000000FF);
   10753 			else if (size == 2)
   10754 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10755 			else if (size == 4)
   10756 				*data = (uint32_t)flash_data;
   10757 			break;
   10758 		} else {
   10759 			/*
   10760 			 * If we've gotten here, then things are probably
   10761 			 * completely hosed, but if the error condition is
   10762 			 * detected, it won't hurt to give it another try...
   10763 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10764 			 */
   10765 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10766 			if (hsfsts & HSFSTS_ERR) {
   10767 				/* Repeat for some time before giving up. */
   10768 				continue;
   10769 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10770 				break;
   10771 		}
   10772 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10773 
   10774 	return error;
   10775 }
   10776 
   10777 /******************************************************************************
   10778  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10779  *
   10780  * sc - pointer to wm_hw structure
   10781  * index - The index of the byte to read.
   10782  * data - Pointer to a byte to store the value read.
   10783  *****************************************************************************/
   10784 static int32_t
   10785 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10786 {
   10787 	int32_t status;
   10788 	uint32_t word = 0;
   10789 
   10790 	status = wm_read_ich8_data(sc, index, 1, &word);
   10791 	if (status == 0)
   10792 		*data = (uint8_t)word;
   10793 	else
   10794 		*data = 0;
   10795 
   10796 	return status;
   10797 }
   10798 
   10799 /******************************************************************************
   10800  * Reads a word from the NVM using the ICH8 flash access registers.
   10801  *
   10802  * sc - pointer to wm_hw structure
   10803  * index - The starting byte index of the word to read.
   10804  * data - Pointer to a word to store the value read.
   10805  *****************************************************************************/
   10806 static int32_t
   10807 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10808 {
   10809 	int32_t status;
   10810 	uint32_t word = 0;
   10811 
   10812 	status = wm_read_ich8_data(sc, index, 2, &word);
   10813 	if (status == 0)
   10814 		*data = (uint16_t)word;
   10815 	else
   10816 		*data = 0;
   10817 
   10818 	return status;
   10819 }
   10820 
   10821 /******************************************************************************
   10822  * Reads a dword from the NVM using the ICH8 flash access registers.
   10823  *
   10824  * sc - pointer to wm_hw structure
   10825  * index - The starting byte index of the word to read.
   10826  * data - Pointer to a word to store the value read.
   10827  *****************************************************************************/
   10828 static int32_t
   10829 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   10830 {
   10831 	int32_t status;
   10832 
   10833 	status = wm_read_ich8_data(sc, index, 4, data);
   10834 	return status;
   10835 }
   10836 
   10837 /******************************************************************************
   10838  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10839  * register.
   10840  *
   10841  * sc - Struct containing variables accessed by shared code
   10842  * offset - offset of word in the EEPROM to read
   10843  * data - word read from the EEPROM
   10844  * words - number of words to read
   10845  *****************************************************************************/
   10846 static int
   10847 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10848 {
   10849 	int32_t  error = 0;
   10850 	uint32_t flash_bank = 0;
   10851 	uint32_t act_offset = 0;
   10852 	uint32_t bank_offset = 0;
   10853 	uint16_t word = 0;
   10854 	uint16_t i = 0;
   10855 
   10856 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10857 		device_xname(sc->sc_dev), __func__));
   10858 
   10859 	/*
   10860 	 * We need to know which is the valid flash bank.  In the event
   10861 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10862 	 * managing flash_bank.  So it cannot be trusted and needs
   10863 	 * to be updated with each read.
   10864 	 */
   10865 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10866 	if (error) {
   10867 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10868 			device_xname(sc->sc_dev)));
   10869 		flash_bank = 0;
   10870 	}
   10871 
   10872 	/*
   10873 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10874 	 * size
   10875 	 */
   10876 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10877 
   10878 	error = wm_get_swfwhw_semaphore(sc);
   10879 	if (error) {
   10880 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10881 		    __func__);
   10882 		return error;
   10883 	}
   10884 
   10885 	for (i = 0; i < words; i++) {
   10886 		/* The NVM part needs a byte offset, hence * 2 */
   10887 		act_offset = bank_offset + ((offset + i) * 2);
   10888 		error = wm_read_ich8_word(sc, act_offset, &word);
   10889 		if (error) {
   10890 			aprint_error_dev(sc->sc_dev,
   10891 			    "%s: failed to read NVM\n", __func__);
   10892 			break;
   10893 		}
   10894 		data[i] = word;
   10895 	}
   10896 
   10897 	wm_put_swfwhw_semaphore(sc);
   10898 	return error;
   10899 }
   10900 
   10901 /******************************************************************************
   10902  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   10903  * register.
   10904  *
   10905  * sc - Struct containing variables accessed by shared code
   10906  * offset - offset of word in the EEPROM to read
   10907  * data - word read from the EEPROM
   10908  * words - number of words to read
   10909  *****************************************************************************/
   10910 static int
   10911 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10912 {
   10913 	int32_t  error = 0;
   10914 	uint32_t flash_bank = 0;
   10915 	uint32_t act_offset = 0;
   10916 	uint32_t bank_offset = 0;
   10917 	uint32_t dword = 0;
   10918 	uint16_t i = 0;
   10919 
   10920 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10921 		device_xname(sc->sc_dev), __func__));
   10922 
   10923 	/*
   10924 	 * We need to know which is the valid flash bank.  In the event
   10925 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10926 	 * managing flash_bank.  So it cannot be trusted and needs
   10927 	 * to be updated with each read.
   10928 	 */
   10929 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10930 	if (error) {
   10931 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10932 			device_xname(sc->sc_dev)));
   10933 		flash_bank = 0;
   10934 	}
   10935 
   10936 	/*
   10937 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10938 	 * size
   10939 	 */
   10940 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10941 
   10942 	error = wm_get_swfwhw_semaphore(sc);
   10943 	if (error) {
   10944 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10945 		    __func__);
   10946 		return error;
   10947 	}
   10948 
   10949 	for (i = 0; i < words; i++) {
   10950 		/* The NVM part needs a byte offset, hence * 2 */
   10951 		act_offset = bank_offset + ((offset + i) * 2);
   10952 		/* but we must read dword aligned, so mask ... */
   10953 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   10954 		if (error) {
   10955 			aprint_error_dev(sc->sc_dev,
   10956 			    "%s: failed to read NVM\n", __func__);
   10957 			break;
   10958 		}
   10959 		/* ... and pick out low or high word */
   10960 		if ((act_offset & 0x2) == 0)
   10961 			data[i] = (uint16_t)(dword & 0xFFFF);
   10962 		else
   10963 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   10964 	}
   10965 
   10966 	wm_put_swfwhw_semaphore(sc);
   10967 	return error;
   10968 }
   10969 
   10970 /* iNVM */
   10971 
   10972 static int
   10973 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   10974 {
   10975 	int32_t  rv = 0;
   10976 	uint32_t invm_dword;
   10977 	uint16_t i;
   10978 	uint8_t record_type, word_address;
   10979 
   10980 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10981 		device_xname(sc->sc_dev), __func__));
   10982 
   10983 	for (i = 0; i < INVM_SIZE; i++) {
   10984 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   10985 		/* Get record type */
   10986 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   10987 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   10988 			break;
   10989 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   10990 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   10991 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   10992 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   10993 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   10994 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   10995 			if (word_address == address) {
   10996 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   10997 				rv = 0;
   10998 				break;
   10999 			}
   11000 		}
   11001 	}
   11002 
   11003 	return rv;
   11004 }
   11005 
   11006 static int
   11007 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11008 {
   11009 	int rv = 0;
   11010 	int i;
   11011 
   11012 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11013 		device_xname(sc->sc_dev), __func__));
   11014 
   11015 	for (i = 0; i < words; i++) {
   11016 		switch (offset + i) {
   11017 		case NVM_OFF_MACADDR:
   11018 		case NVM_OFF_MACADDR1:
   11019 		case NVM_OFF_MACADDR2:
   11020 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11021 			if (rv != 0) {
   11022 				data[i] = 0xffff;
   11023 				rv = -1;
   11024 			}
   11025 			break;
   11026 		case NVM_OFF_CFG2:
   11027 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11028 			if (rv != 0) {
   11029 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11030 				rv = 0;
   11031 			}
   11032 			break;
   11033 		case NVM_OFF_CFG4:
   11034 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11035 			if (rv != 0) {
   11036 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11037 				rv = 0;
   11038 			}
   11039 			break;
   11040 		case NVM_OFF_LED_1_CFG:
   11041 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11042 			if (rv != 0) {
   11043 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11044 				rv = 0;
   11045 			}
   11046 			break;
   11047 		case NVM_OFF_LED_0_2_CFG:
   11048 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11049 			if (rv != 0) {
   11050 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11051 				rv = 0;
   11052 			}
   11053 			break;
   11054 		case NVM_OFF_ID_LED_SETTINGS:
   11055 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11056 			if (rv != 0) {
   11057 				*data = ID_LED_RESERVED_FFFF;
   11058 				rv = 0;
   11059 			}
   11060 			break;
   11061 		default:
   11062 			DPRINTF(WM_DEBUG_NVM,
   11063 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11064 			*data = NVM_RESERVED_WORD;
   11065 			break;
   11066 		}
   11067 	}
   11068 
   11069 	return rv;
   11070 }
   11071 
   11072 /* Lock, detecting NVM type, validate checksum, version and read */
   11073 
   11074 /*
   11075  * wm_nvm_acquire:
   11076  *
   11077  *	Perform the EEPROM handshake required on some chips.
   11078  */
   11079 static int
   11080 wm_nvm_acquire(struct wm_softc *sc)
   11081 {
   11082 	uint32_t reg;
   11083 	int x;
   11084 	int ret = 0;
   11085 
   11086 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11087 		device_xname(sc->sc_dev), __func__));
   11088 
   11089 	if (sc->sc_type >= WM_T_ICH8) {
   11090 		ret = wm_get_nvm_ich8lan(sc);
   11091 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   11092 		ret = wm_get_swfwhw_semaphore(sc);
   11093 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   11094 		/* This will also do wm_get_swsm_semaphore() if needed */
   11095 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   11096 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11097 		ret = wm_get_swsm_semaphore(sc);
   11098 	}
   11099 
   11100 	if (ret) {
   11101 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11102 			__func__);
   11103 		return 1;
   11104 	}
   11105 
   11106 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11107 		reg = CSR_READ(sc, WMREG_EECD);
   11108 
   11109 		/* Request EEPROM access. */
   11110 		reg |= EECD_EE_REQ;
   11111 		CSR_WRITE(sc, WMREG_EECD, reg);
   11112 
   11113 		/* ..and wait for it to be granted. */
   11114 		for (x = 0; x < 1000; x++) {
   11115 			reg = CSR_READ(sc, WMREG_EECD);
   11116 			if (reg & EECD_EE_GNT)
   11117 				break;
   11118 			delay(5);
   11119 		}
   11120 		if ((reg & EECD_EE_GNT) == 0) {
   11121 			aprint_error_dev(sc->sc_dev,
   11122 			    "could not acquire EEPROM GNT\n");
   11123 			reg &= ~EECD_EE_REQ;
   11124 			CSR_WRITE(sc, WMREG_EECD, reg);
   11125 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11126 				wm_put_swfwhw_semaphore(sc);
   11127 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   11128 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11129 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11130 				wm_put_swsm_semaphore(sc);
   11131 			return 1;
   11132 		}
   11133 	}
   11134 
   11135 	return 0;
   11136 }
   11137 
   11138 /*
   11139  * wm_nvm_release:
   11140  *
   11141  *	Release the EEPROM mutex.
   11142  */
   11143 static void
   11144 wm_nvm_release(struct wm_softc *sc)
   11145 {
   11146 	uint32_t reg;
   11147 
   11148 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11149 		device_xname(sc->sc_dev), __func__));
   11150 
   11151 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11152 		reg = CSR_READ(sc, WMREG_EECD);
   11153 		reg &= ~EECD_EE_REQ;
   11154 		CSR_WRITE(sc, WMREG_EECD, reg);
   11155 	}
   11156 
   11157 	if (sc->sc_type >= WM_T_ICH8) {
   11158 		wm_put_nvm_ich8lan(sc);
   11159 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11160 		wm_put_swfwhw_semaphore(sc);
   11161 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   11162 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11163 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11164 		wm_put_swsm_semaphore(sc);
   11165 }
   11166 
   11167 static int
   11168 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   11169 {
   11170 	uint32_t eecd = 0;
   11171 
   11172 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   11173 	    || sc->sc_type == WM_T_82583) {
   11174 		eecd = CSR_READ(sc, WMREG_EECD);
   11175 
   11176 		/* Isolate bits 15 & 16 */
   11177 		eecd = ((eecd >> 15) & 0x03);
   11178 
   11179 		/* If both bits are set, device is Flash type */
   11180 		if (eecd == 0x03)
   11181 			return 0;
   11182 	}
   11183 	return 1;
   11184 }
   11185 
   11186 static int
   11187 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   11188 {
   11189 	uint32_t eec;
   11190 
   11191 	eec = CSR_READ(sc, WMREG_EEC);
   11192 	if ((eec & EEC_FLASH_DETECTED) != 0)
   11193 		return 1;
   11194 
   11195 	return 0;
   11196 }
   11197 
   11198 /*
   11199  * wm_nvm_validate_checksum
   11200  *
   11201  * The checksum is defined as the sum of the first 64 (16 bit) words.
   11202  */
   11203 static int
   11204 wm_nvm_validate_checksum(struct wm_softc *sc)
   11205 {
   11206 	uint16_t checksum;
   11207 	uint16_t eeprom_data;
   11208 #ifdef WM_DEBUG
   11209 	uint16_t csum_wordaddr, valid_checksum;
   11210 #endif
   11211 	int i;
   11212 
   11213 	checksum = 0;
   11214 
   11215 	/* Don't check for I211 */
   11216 	if (sc->sc_type == WM_T_I211)
   11217 		return 0;
   11218 
   11219 #ifdef WM_DEBUG
   11220 	if (sc->sc_type == WM_T_PCH_LPT) {
   11221 		csum_wordaddr = NVM_OFF_COMPAT;
   11222 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   11223 	} else {
   11224 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   11225 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   11226 	}
   11227 
   11228 	/* Dump EEPROM image for debug */
   11229 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11230 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11231 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   11232 		/* XXX PCH_SPT? */
   11233 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   11234 		if ((eeprom_data & valid_checksum) == 0) {
   11235 			DPRINTF(WM_DEBUG_NVM,
   11236 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   11237 				device_xname(sc->sc_dev), eeprom_data,
   11238 				    valid_checksum));
   11239 		}
   11240 	}
   11241 
   11242 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   11243 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   11244 		for (i = 0; i < NVM_SIZE; i++) {
   11245 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11246 				printf("XXXX ");
   11247 			else
   11248 				printf("%04hx ", eeprom_data);
   11249 			if (i % 8 == 7)
   11250 				printf("\n");
   11251 		}
   11252 	}
   11253 
   11254 #endif /* WM_DEBUG */
   11255 
   11256 	for (i = 0; i < NVM_SIZE; i++) {
   11257 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11258 			return 1;
   11259 		checksum += eeprom_data;
   11260 	}
   11261 
   11262 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   11263 #ifdef WM_DEBUG
   11264 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   11265 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   11266 #endif
   11267 	}
   11268 
   11269 	return 0;
   11270 }
   11271 
   11272 static void
   11273 wm_nvm_version_invm(struct wm_softc *sc)
   11274 {
   11275 	uint32_t dword;
   11276 
   11277 	/*
   11278 	 * Linux's code to decode version is very strange, so we don't
   11279 	 * obey that algorithm and just use word 61 as the document.
   11280 	 * Perhaps it's not perfect though...
   11281 	 *
   11282 	 * Example:
   11283 	 *
   11284 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   11285 	 */
   11286 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   11287 	dword = __SHIFTOUT(dword, INVM_VER_1);
   11288 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   11289 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   11290 }
   11291 
   11292 static void
   11293 wm_nvm_version(struct wm_softc *sc)
   11294 {
   11295 	uint16_t major, minor, build, patch;
   11296 	uint16_t uid0, uid1;
   11297 	uint16_t nvm_data;
   11298 	uint16_t off;
   11299 	bool check_version = false;
   11300 	bool check_optionrom = false;
   11301 	bool have_build = false;
   11302 
   11303 	/*
   11304 	 * Version format:
   11305 	 *
   11306 	 * XYYZ
   11307 	 * X0YZ
   11308 	 * X0YY
   11309 	 *
   11310 	 * Example:
   11311 	 *
   11312 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   11313 	 *	82571	0x50a6	5.10.6?
   11314 	 *	82572	0x506a	5.6.10?
   11315 	 *	82572EI	0x5069	5.6.9?
   11316 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   11317 	 *		0x2013	2.1.3?
   11318 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   11319 	 */
   11320 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   11321 	switch (sc->sc_type) {
   11322 	case WM_T_82571:
   11323 	case WM_T_82572:
   11324 	case WM_T_82574:
   11325 	case WM_T_82583:
   11326 		check_version = true;
   11327 		check_optionrom = true;
   11328 		have_build = true;
   11329 		break;
   11330 	case WM_T_82575:
   11331 	case WM_T_82576:
   11332 	case WM_T_82580:
   11333 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   11334 			check_version = true;
   11335 		break;
   11336 	case WM_T_I211:
   11337 		wm_nvm_version_invm(sc);
   11338 		goto printver;
   11339 	case WM_T_I210:
   11340 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   11341 			wm_nvm_version_invm(sc);
   11342 			goto printver;
   11343 		}
   11344 		/* FALLTHROUGH */
   11345 	case WM_T_I350:
   11346 	case WM_T_I354:
   11347 		check_version = true;
   11348 		check_optionrom = true;
   11349 		break;
   11350 	default:
   11351 		return;
   11352 	}
   11353 	if (check_version) {
   11354 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   11355 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   11356 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   11357 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   11358 			build = nvm_data & NVM_BUILD_MASK;
   11359 			have_build = true;
   11360 		} else
   11361 			minor = nvm_data & 0x00ff;
   11362 
   11363 		/* Decimal */
   11364 		minor = (minor / 16) * 10 + (minor % 16);
   11365 		sc->sc_nvm_ver_major = major;
   11366 		sc->sc_nvm_ver_minor = minor;
   11367 
   11368 printver:
   11369 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   11370 		    sc->sc_nvm_ver_minor);
   11371 		if (have_build) {
   11372 			sc->sc_nvm_ver_build = build;
   11373 			aprint_verbose(".%d", build);
   11374 		}
   11375 	}
   11376 	if (check_optionrom) {
   11377 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   11378 		/* Option ROM Version */
   11379 		if ((off != 0x0000) && (off != 0xffff)) {
   11380 			off += NVM_COMBO_VER_OFF;
   11381 			wm_nvm_read(sc, off + 1, 1, &uid1);
   11382 			wm_nvm_read(sc, off, 1, &uid0);
   11383 			if ((uid0 != 0) && (uid0 != 0xffff)
   11384 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   11385 				/* 16bits */
   11386 				major = uid0 >> 8;
   11387 				build = (uid0 << 8) | (uid1 >> 8);
   11388 				patch = uid1 & 0x00ff;
   11389 				aprint_verbose(", option ROM Version %d.%d.%d",
   11390 				    major, build, patch);
   11391 			}
   11392 		}
   11393 	}
   11394 
   11395 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   11396 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   11397 }
   11398 
   11399 /*
   11400  * wm_nvm_read:
   11401  *
   11402  *	Read data from the serial EEPROM.
   11403  */
   11404 static int
   11405 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11406 {
   11407 	int rv;
   11408 
   11409 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11410 		device_xname(sc->sc_dev), __func__));
   11411 
   11412 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   11413 		return 1;
   11414 
   11415 	if (wm_nvm_acquire(sc))
   11416 		return 1;
   11417 
   11418 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11419 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11420 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   11421 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   11422 	else if (sc->sc_type == WM_T_PCH_SPT)
   11423 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11424 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11425 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11426 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11427 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11428 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11429 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11430 	else
   11431 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11432 
   11433 	wm_nvm_release(sc);
   11434 	return rv;
   11435 }
   11436 
   11437 /*
   11438  * Hardware semaphores.
   11439  * Very complexed...
   11440  */
   11441 
   11442 static int
   11443 wm_get_null(struct wm_softc *sc)
   11444 {
   11445 
   11446 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11447 		device_xname(sc->sc_dev), __func__));
   11448 	return 0;
   11449 }
   11450 
   11451 static void
   11452 wm_put_null(struct wm_softc *sc)
   11453 {
   11454 
   11455 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11456 		device_xname(sc->sc_dev), __func__));
   11457 	return;
   11458 }
   11459 
   11460 /*
   11461  * Get hardware semaphore.
   11462  * Same as e1000_get_hw_semaphore_generic()
   11463  */
   11464 static int
   11465 wm_get_swsm_semaphore(struct wm_softc *sc)
   11466 {
   11467 	int32_t timeout;
   11468 	uint32_t swsm;
   11469 
   11470 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11471 		device_xname(sc->sc_dev), __func__));
   11472 	KASSERT(sc->sc_nvm_wordsize > 0);
   11473 
   11474 	/* Get the SW semaphore. */
   11475 	timeout = sc->sc_nvm_wordsize + 1;
   11476 	while (timeout) {
   11477 		swsm = CSR_READ(sc, WMREG_SWSM);
   11478 
   11479 		if ((swsm & SWSM_SMBI) == 0)
   11480 			break;
   11481 
   11482 		delay(50);
   11483 		timeout--;
   11484 	}
   11485 
   11486 	if (timeout == 0) {
   11487 		aprint_error_dev(sc->sc_dev,
   11488 		    "could not acquire SWSM SMBI\n");
   11489 		return 1;
   11490 	}
   11491 
   11492 	/* Get the FW semaphore. */
   11493 	timeout = sc->sc_nvm_wordsize + 1;
   11494 	while (timeout) {
   11495 		swsm = CSR_READ(sc, WMREG_SWSM);
   11496 		swsm |= SWSM_SWESMBI;
   11497 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   11498 		/* If we managed to set the bit we got the semaphore. */
   11499 		swsm = CSR_READ(sc, WMREG_SWSM);
   11500 		if (swsm & SWSM_SWESMBI)
   11501 			break;
   11502 
   11503 		delay(50);
   11504 		timeout--;
   11505 	}
   11506 
   11507 	if (timeout == 0) {
   11508 		aprint_error_dev(sc->sc_dev,
   11509 		    "could not acquire SWSM SWESMBI\n");
   11510 		/* Release semaphores */
   11511 		wm_put_swsm_semaphore(sc);
   11512 		return 1;
   11513 	}
   11514 	return 0;
   11515 }
   11516 
   11517 /*
   11518  * Put hardware semaphore.
   11519  * Same as e1000_put_hw_semaphore_generic()
   11520  */
   11521 static void
   11522 wm_put_swsm_semaphore(struct wm_softc *sc)
   11523 {
   11524 	uint32_t swsm;
   11525 
   11526 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11527 		device_xname(sc->sc_dev), __func__));
   11528 
   11529 	swsm = CSR_READ(sc, WMREG_SWSM);
   11530 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   11531 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   11532 }
   11533 
   11534 /*
   11535  * Get SW/FW semaphore.
   11536  * Same as e1000_acquire_swfw_sync_82575().
   11537  */
   11538 static int
   11539 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11540 {
   11541 	uint32_t swfw_sync;
   11542 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   11543 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   11544 	int timeout = 200;
   11545 
   11546 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11547 		device_xname(sc->sc_dev), __func__));
   11548 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11549 
   11550 	for (timeout = 0; timeout < 200; timeout++) {
   11551 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11552 			if (wm_get_swsm_semaphore(sc)) {
   11553 				aprint_error_dev(sc->sc_dev,
   11554 				    "%s: failed to get semaphore\n",
   11555 				    __func__);
   11556 				return 1;
   11557 			}
   11558 		}
   11559 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11560 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   11561 			swfw_sync |= swmask;
   11562 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11563 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   11564 				wm_put_swsm_semaphore(sc);
   11565 			return 0;
   11566 		}
   11567 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   11568 			wm_put_swsm_semaphore(sc);
   11569 		delay(5000);
   11570 	}
   11571 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   11572 	    device_xname(sc->sc_dev), mask, swfw_sync);
   11573 	return 1;
   11574 }
   11575 
   11576 static void
   11577 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11578 {
   11579 	uint32_t swfw_sync;
   11580 
   11581 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11582 		device_xname(sc->sc_dev), __func__));
   11583 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11584 
   11585 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11586 		while (wm_get_swsm_semaphore(sc) != 0)
   11587 			continue;
   11588 	}
   11589 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11590 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   11591 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11592 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   11593 		wm_put_swsm_semaphore(sc);
   11594 }
   11595 
   11596 static int
   11597 wm_get_phy_82575(struct wm_softc *sc)
   11598 {
   11599 
   11600 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11601 		device_xname(sc->sc_dev), __func__));
   11602 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11603 }
   11604 
   11605 static void
   11606 wm_put_phy_82575(struct wm_softc *sc)
   11607 {
   11608 
   11609 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11610 		device_xname(sc->sc_dev), __func__));
   11611 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11612 }
   11613 
   11614 static int
   11615 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   11616 {
   11617 	uint32_t ext_ctrl;
   11618 	int timeout = 200;
   11619 
   11620 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11621 		device_xname(sc->sc_dev), __func__));
   11622 
   11623 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11624 	for (timeout = 0; timeout < 200; timeout++) {
   11625 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11626 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11627 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11628 
   11629 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11630 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11631 			return 0;
   11632 		delay(5000);
   11633 	}
   11634 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   11635 	    device_xname(sc->sc_dev), ext_ctrl);
   11636 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11637 	return 1;
   11638 }
   11639 
   11640 static void
   11641 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   11642 {
   11643 	uint32_t ext_ctrl;
   11644 
   11645 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11646 		device_xname(sc->sc_dev), __func__));
   11647 
   11648 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11649 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11650 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11651 
   11652 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11653 }
   11654 
   11655 static int
   11656 wm_get_swflag_ich8lan(struct wm_softc *sc)
   11657 {
   11658 	uint32_t ext_ctrl;
   11659 	int timeout;
   11660 
   11661 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11662 		device_xname(sc->sc_dev), __func__));
   11663 	mutex_enter(sc->sc_ich_phymtx);
   11664 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   11665 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11666 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   11667 			break;
   11668 		delay(1000);
   11669 	}
   11670 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   11671 		printf("%s: SW has already locked the resource\n",
   11672 		    device_xname(sc->sc_dev));
   11673 		goto out;
   11674 	}
   11675 
   11676 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11677 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11678 	for (timeout = 0; timeout < 1000; timeout++) {
   11679 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11680 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11681 			break;
   11682 		delay(1000);
   11683 	}
   11684 	if (timeout >= 1000) {
   11685 		printf("%s: failed to acquire semaphore\n",
   11686 		    device_xname(sc->sc_dev));
   11687 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11688 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11689 		goto out;
   11690 	}
   11691 	return 0;
   11692 
   11693 out:
   11694 	mutex_exit(sc->sc_ich_phymtx);
   11695 	return 1;
   11696 }
   11697 
   11698 static void
   11699 wm_put_swflag_ich8lan(struct wm_softc *sc)
   11700 {
   11701 	uint32_t ext_ctrl;
   11702 
   11703 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11704 		device_xname(sc->sc_dev), __func__));
   11705 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11706 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   11707 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11708 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11709 	} else {
   11710 		printf("%s: Semaphore unexpectedly released\n",
   11711 		    device_xname(sc->sc_dev));
   11712 	}
   11713 
   11714 	mutex_exit(sc->sc_ich_phymtx);
   11715 }
   11716 
   11717 static int
   11718 wm_get_nvm_ich8lan(struct wm_softc *sc)
   11719 {
   11720 
   11721 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11722 		device_xname(sc->sc_dev), __func__));
   11723 	mutex_enter(sc->sc_ich_nvmmtx);
   11724 
   11725 	return 0;
   11726 }
   11727 
   11728 static void
   11729 wm_put_nvm_ich8lan(struct wm_softc *sc)
   11730 {
   11731 
   11732 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11733 		device_xname(sc->sc_dev), __func__));
   11734 	mutex_exit(sc->sc_ich_nvmmtx);
   11735 }
   11736 
   11737 static int
   11738 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   11739 {
   11740 	int i = 0;
   11741 	uint32_t reg;
   11742 
   11743 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11744 		device_xname(sc->sc_dev), __func__));
   11745 
   11746 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11747 	do {
   11748 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   11749 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   11750 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11751 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   11752 			break;
   11753 		delay(2*1000);
   11754 		i++;
   11755 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   11756 
   11757 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   11758 		wm_put_hw_semaphore_82573(sc);
   11759 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   11760 		    device_xname(sc->sc_dev));
   11761 		return -1;
   11762 	}
   11763 
   11764 	return 0;
   11765 }
   11766 
   11767 static void
   11768 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   11769 {
   11770 	uint32_t reg;
   11771 
   11772 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11773 		device_xname(sc->sc_dev), __func__));
   11774 
   11775 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11776 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11777 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11778 }
   11779 
   11780 /*
   11781  * Management mode and power management related subroutines.
   11782  * BMC, AMT, suspend/resume and EEE.
   11783  */
   11784 
   11785 #ifdef WM_WOL
   11786 static int
   11787 wm_check_mng_mode(struct wm_softc *sc)
   11788 {
   11789 	int rv;
   11790 
   11791 	switch (sc->sc_type) {
   11792 	case WM_T_ICH8:
   11793 	case WM_T_ICH9:
   11794 	case WM_T_ICH10:
   11795 	case WM_T_PCH:
   11796 	case WM_T_PCH2:
   11797 	case WM_T_PCH_LPT:
   11798 	case WM_T_PCH_SPT:
   11799 		rv = wm_check_mng_mode_ich8lan(sc);
   11800 		break;
   11801 	case WM_T_82574:
   11802 	case WM_T_82583:
   11803 		rv = wm_check_mng_mode_82574(sc);
   11804 		break;
   11805 	case WM_T_82571:
   11806 	case WM_T_82572:
   11807 	case WM_T_82573:
   11808 	case WM_T_80003:
   11809 		rv = wm_check_mng_mode_generic(sc);
   11810 		break;
   11811 	default:
   11812 		/* noting to do */
   11813 		rv = 0;
   11814 		break;
   11815 	}
   11816 
   11817 	return rv;
   11818 }
   11819 
   11820 static int
   11821 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   11822 {
   11823 	uint32_t fwsm;
   11824 
   11825 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11826 
   11827 	if (((fwsm & FWSM_FW_VALID) != 0)
   11828 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11829 		return 1;
   11830 
   11831 	return 0;
   11832 }
   11833 
   11834 static int
   11835 wm_check_mng_mode_82574(struct wm_softc *sc)
   11836 {
   11837 	uint16_t data;
   11838 
   11839 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11840 
   11841 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   11842 		return 1;
   11843 
   11844 	return 0;
   11845 }
   11846 
   11847 static int
   11848 wm_check_mng_mode_generic(struct wm_softc *sc)
   11849 {
   11850 	uint32_t fwsm;
   11851 
   11852 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11853 
   11854 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   11855 		return 1;
   11856 
   11857 	return 0;
   11858 }
   11859 #endif /* WM_WOL */
   11860 
   11861 static int
   11862 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11863 {
   11864 	uint32_t manc, fwsm, factps;
   11865 
   11866 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11867 		return 0;
   11868 
   11869 	manc = CSR_READ(sc, WMREG_MANC);
   11870 
   11871 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11872 		device_xname(sc->sc_dev), manc));
   11873 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11874 		return 0;
   11875 
   11876 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11877 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11878 		factps = CSR_READ(sc, WMREG_FACTPS);
   11879 		if (((factps & FACTPS_MNGCG) == 0)
   11880 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11881 			return 1;
   11882 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11883 		uint16_t data;
   11884 
   11885 		factps = CSR_READ(sc, WMREG_FACTPS);
   11886 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11887 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11888 			device_xname(sc->sc_dev), factps, data));
   11889 		if (((factps & FACTPS_MNGCG) == 0)
   11890 		    && ((data & NVM_CFG2_MNGM_MASK)
   11891 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11892 			return 1;
   11893 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11894 	    && ((manc & MANC_ASF_EN) == 0))
   11895 		return 1;
   11896 
   11897 	return 0;
   11898 }
   11899 
   11900 static bool
   11901 wm_phy_resetisblocked(struct wm_softc *sc)
   11902 {
   11903 	bool blocked = false;
   11904 	uint32_t reg;
   11905 	int i = 0;
   11906 
   11907 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11908 		device_xname(sc->sc_dev), __func__));
   11909 
   11910 	switch (sc->sc_type) {
   11911 	case WM_T_ICH8:
   11912 	case WM_T_ICH9:
   11913 	case WM_T_ICH10:
   11914 	case WM_T_PCH:
   11915 	case WM_T_PCH2:
   11916 	case WM_T_PCH_LPT:
   11917 	case WM_T_PCH_SPT:
   11918 		do {
   11919 			reg = CSR_READ(sc, WMREG_FWSM);
   11920 			if ((reg & FWSM_RSPCIPHY) == 0) {
   11921 				blocked = true;
   11922 				delay(10*1000);
   11923 				continue;
   11924 			}
   11925 			blocked = false;
   11926 		} while (blocked && (i++ < 30));
   11927 		return blocked;
   11928 		break;
   11929 	case WM_T_82571:
   11930 	case WM_T_82572:
   11931 	case WM_T_82573:
   11932 	case WM_T_82574:
   11933 	case WM_T_82583:
   11934 	case WM_T_80003:
   11935 		reg = CSR_READ(sc, WMREG_MANC);
   11936 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   11937 			return true;
   11938 		else
   11939 			return false;
   11940 		break;
   11941 	default:
   11942 		/* no problem */
   11943 		break;
   11944 	}
   11945 
   11946 	return false;
   11947 }
   11948 
   11949 static void
   11950 wm_get_hw_control(struct wm_softc *sc)
   11951 {
   11952 	uint32_t reg;
   11953 
   11954 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11955 		device_xname(sc->sc_dev), __func__));
   11956 
   11957 	if (sc->sc_type == WM_T_82573) {
   11958 		reg = CSR_READ(sc, WMREG_SWSM);
   11959 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   11960 	} else if (sc->sc_type >= WM_T_82571) {
   11961 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11962 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   11963 	}
   11964 }
   11965 
   11966 static void
   11967 wm_release_hw_control(struct wm_softc *sc)
   11968 {
   11969 	uint32_t reg;
   11970 
   11971 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11972 		device_xname(sc->sc_dev), __func__));
   11973 
   11974 	if (sc->sc_type == WM_T_82573) {
   11975 		reg = CSR_READ(sc, WMREG_SWSM);
   11976 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   11977 	} else if (sc->sc_type >= WM_T_82571) {
   11978 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11979 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   11980 	}
   11981 }
   11982 
   11983 static void
   11984 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   11985 {
   11986 	uint32_t reg;
   11987 
   11988 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11989 		device_xname(sc->sc_dev), __func__));
   11990 
   11991 	if (sc->sc_type < WM_T_PCH2)
   11992 		return;
   11993 
   11994 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11995 
   11996 	if (gate)
   11997 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   11998 	else
   11999 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12000 
   12001 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12002 }
   12003 
   12004 static void
   12005 wm_smbustopci(struct wm_softc *sc)
   12006 {
   12007 	uint32_t fwsm, reg;
   12008 	int rv = 0;
   12009 
   12010 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12011 		device_xname(sc->sc_dev), __func__));
   12012 
   12013 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12014 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12015 
   12016 	/* Disable ULP */
   12017 	wm_ulp_disable(sc);
   12018 
   12019 	/* Acquire PHY semaphore */
   12020 	sc->phy.acquire(sc);
   12021 
   12022 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12023 	switch (sc->sc_type) {
   12024 	case WM_T_PCH_LPT:
   12025 	case WM_T_PCH_SPT:
   12026 		if (wm_phy_is_accessible_pchlan(sc))
   12027 			break;
   12028 
   12029 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12030 		reg |= CTRL_EXT_FORCE_SMBUS;
   12031 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12032 #if 0
   12033 		/* XXX Isn't this required??? */
   12034 		CSR_WRITE_FLUSH(sc);
   12035 #endif
   12036 		delay(50 * 1000);
   12037 		/* FALLTHROUGH */
   12038 	case WM_T_PCH2:
   12039 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12040 			break;
   12041 		/* FALLTHROUGH */
   12042 	case WM_T_PCH:
   12043 		if (sc->sc_type == WM_T_PCH)
   12044 			if ((fwsm & FWSM_FW_VALID) != 0)
   12045 				break;
   12046 
   12047 		if (wm_phy_resetisblocked(sc) == true) {
   12048 			printf("XXX reset is blocked(3)\n");
   12049 			break;
   12050 		}
   12051 
   12052 		wm_toggle_lanphypc_pch_lpt(sc);
   12053 
   12054 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12055 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12056 				break;
   12057 
   12058 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12059 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12060 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12061 
   12062 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12063 				break;
   12064 			rv = -1;
   12065 		}
   12066 		break;
   12067 	default:
   12068 		break;
   12069 	}
   12070 
   12071 	/* Release semaphore */
   12072 	sc->phy.release(sc);
   12073 
   12074 	if (rv == 0) {
   12075 		if (wm_phy_resetisblocked(sc)) {
   12076 			printf("XXX reset is blocked(4)\n");
   12077 			goto out;
   12078 		}
   12079 		wm_reset_phy(sc);
   12080 		if (wm_phy_resetisblocked(sc))
   12081 			printf("XXX reset is blocked(4)\n");
   12082 	}
   12083 
   12084 out:
   12085 	/*
   12086 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   12087 	 */
   12088 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   12089 		delay(10*1000);
   12090 		wm_gate_hw_phy_config_ich8lan(sc, false);
   12091 	}
   12092 }
   12093 
   12094 static void
   12095 wm_init_manageability(struct wm_softc *sc)
   12096 {
   12097 
   12098 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12099 		device_xname(sc->sc_dev), __func__));
   12100 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12101 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   12102 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12103 
   12104 		/* Disable hardware interception of ARP */
   12105 		manc &= ~MANC_ARP_EN;
   12106 
   12107 		/* Enable receiving management packets to the host */
   12108 		if (sc->sc_type >= WM_T_82571) {
   12109 			manc |= MANC_EN_MNG2HOST;
   12110 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   12111 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   12112 		}
   12113 
   12114 		CSR_WRITE(sc, WMREG_MANC, manc);
   12115 	}
   12116 }
   12117 
   12118 static void
   12119 wm_release_manageability(struct wm_softc *sc)
   12120 {
   12121 
   12122 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12123 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12124 
   12125 		manc |= MANC_ARP_EN;
   12126 		if (sc->sc_type >= WM_T_82571)
   12127 			manc &= ~MANC_EN_MNG2HOST;
   12128 
   12129 		CSR_WRITE(sc, WMREG_MANC, manc);
   12130 	}
   12131 }
   12132 
   12133 static void
   12134 wm_get_wakeup(struct wm_softc *sc)
   12135 {
   12136 
   12137 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   12138 	switch (sc->sc_type) {
   12139 	case WM_T_82573:
   12140 	case WM_T_82583:
   12141 		sc->sc_flags |= WM_F_HAS_AMT;
   12142 		/* FALLTHROUGH */
   12143 	case WM_T_80003:
   12144 	case WM_T_82575:
   12145 	case WM_T_82576:
   12146 	case WM_T_82580:
   12147 	case WM_T_I350:
   12148 	case WM_T_I354:
   12149 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   12150 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   12151 		/* FALLTHROUGH */
   12152 	case WM_T_82541:
   12153 	case WM_T_82541_2:
   12154 	case WM_T_82547:
   12155 	case WM_T_82547_2:
   12156 	case WM_T_82571:
   12157 	case WM_T_82572:
   12158 	case WM_T_82574:
   12159 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12160 		break;
   12161 	case WM_T_ICH8:
   12162 	case WM_T_ICH9:
   12163 	case WM_T_ICH10:
   12164 	case WM_T_PCH:
   12165 	case WM_T_PCH2:
   12166 	case WM_T_PCH_LPT:
   12167 	case WM_T_PCH_SPT:
   12168 		sc->sc_flags |= WM_F_HAS_AMT;
   12169 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12170 		break;
   12171 	default:
   12172 		break;
   12173 	}
   12174 
   12175 	/* 1: HAS_MANAGE */
   12176 	if (wm_enable_mng_pass_thru(sc) != 0)
   12177 		sc->sc_flags |= WM_F_HAS_MANAGE;
   12178 
   12179 #ifdef WM_DEBUG
   12180 	printf("\n");
   12181 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   12182 		printf("HAS_AMT,");
   12183 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   12184 		printf("ARC_SUBSYS_VALID,");
   12185 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   12186 		printf("ASF_FIRMWARE_PRES,");
   12187 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   12188 		printf("HAS_MANAGE,");
   12189 	printf("\n");
   12190 #endif
   12191 	/*
   12192 	 * Note that the WOL flags is set after the resetting of the eeprom
   12193 	 * stuff
   12194 	 */
   12195 }
   12196 
   12197 /*
   12198  * Unconfigure Ultra Low Power mode.
   12199  * Only for I217 and newer (see below).
   12200  */
   12201 static void
   12202 wm_ulp_disable(struct wm_softc *sc)
   12203 {
   12204 	uint32_t reg;
   12205 	int i = 0;
   12206 
   12207 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12208 		device_xname(sc->sc_dev), __func__));
   12209 	/* Exclude old devices */
   12210 	if ((sc->sc_type < WM_T_PCH_LPT)
   12211 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   12212 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   12213 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   12214 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   12215 		return;
   12216 
   12217 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   12218 		/* Request ME un-configure ULP mode in the PHY */
   12219 		reg = CSR_READ(sc, WMREG_H2ME);
   12220 		reg &= ~H2ME_ULP;
   12221 		reg |= H2ME_ENFORCE_SETTINGS;
   12222 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12223 
   12224 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   12225 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   12226 			if (i++ == 30) {
   12227 				printf("%s timed out\n", __func__);
   12228 				return;
   12229 			}
   12230 			delay(10 * 1000);
   12231 		}
   12232 		reg = CSR_READ(sc, WMREG_H2ME);
   12233 		reg &= ~H2ME_ENFORCE_SETTINGS;
   12234 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12235 
   12236 		return;
   12237 	}
   12238 
   12239 	/* Acquire semaphore */
   12240 	sc->phy.acquire(sc);
   12241 
   12242 	/* Toggle LANPHYPC */
   12243 	wm_toggle_lanphypc_pch_lpt(sc);
   12244 
   12245 	/* Unforce SMBus mode in PHY */
   12246 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12247 	if (reg == 0x0000 || reg == 0xffff) {
   12248 		uint32_t reg2;
   12249 
   12250 		printf("%s: Force SMBus first.\n", __func__);
   12251 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   12252 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   12253 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   12254 		delay(50 * 1000);
   12255 
   12256 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12257 	}
   12258 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   12259 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   12260 
   12261 	/* Unforce SMBus mode in MAC */
   12262 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12263 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   12264 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12265 
   12266 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   12267 	reg |= HV_PM_CTRL_K1_ENA;
   12268 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   12269 
   12270 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   12271 	reg &= ~(I218_ULP_CONFIG1_IND
   12272 	    | I218_ULP_CONFIG1_STICKY_ULP
   12273 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   12274 	    | I218_ULP_CONFIG1_WOL_HOST
   12275 	    | I218_ULP_CONFIG1_INBAND_EXIT
   12276 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   12277 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   12278 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   12279 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12280 	reg |= I218_ULP_CONFIG1_START;
   12281 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12282 
   12283 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   12284 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   12285 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   12286 
   12287 	/* Release semaphore */
   12288 	sc->phy.release(sc);
   12289 	wm_gmii_reset(sc);
   12290 	delay(50 * 1000);
   12291 }
   12292 
   12293 /* WOL in the newer chipset interfaces (pchlan) */
   12294 static void
   12295 wm_enable_phy_wakeup(struct wm_softc *sc)
   12296 {
   12297 #if 0
   12298 	uint16_t preg;
   12299 
   12300 	/* Copy MAC RARs to PHY RARs */
   12301 
   12302 	/* Copy MAC MTA to PHY MTA */
   12303 
   12304 	/* Configure PHY Rx Control register */
   12305 
   12306 	/* Enable PHY wakeup in MAC register */
   12307 
   12308 	/* Configure and enable PHY wakeup in PHY registers */
   12309 
   12310 	/* Activate PHY wakeup */
   12311 
   12312 	/* XXX */
   12313 #endif
   12314 }
   12315 
   12316 /* Power down workaround on D3 */
   12317 static void
   12318 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   12319 {
   12320 	uint32_t reg;
   12321 	int i;
   12322 
   12323 	for (i = 0; i < 2; i++) {
   12324 		/* Disable link */
   12325 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12326 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12327 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12328 
   12329 		/*
   12330 		 * Call gig speed drop workaround on Gig disable before
   12331 		 * accessing any PHY registers
   12332 		 */
   12333 		if (sc->sc_type == WM_T_ICH8)
   12334 			wm_gig_downshift_workaround_ich8lan(sc);
   12335 
   12336 		/* Write VR power-down enable */
   12337 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12338 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12339 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   12340 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   12341 
   12342 		/* Read it back and test */
   12343 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12344 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12345 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   12346 			break;
   12347 
   12348 		/* Issue PHY reset and repeat at most one more time */
   12349 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   12350 	}
   12351 }
   12352 
   12353 static void
   12354 wm_enable_wakeup(struct wm_softc *sc)
   12355 {
   12356 	uint32_t reg, pmreg;
   12357 	pcireg_t pmode;
   12358 
   12359 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12360 		device_xname(sc->sc_dev), __func__));
   12361 
   12362 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12363 		&pmreg, NULL) == 0)
   12364 		return;
   12365 
   12366 	/* Advertise the wakeup capability */
   12367 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   12368 	    | CTRL_SWDPIN(3));
   12369 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   12370 
   12371 	/* ICH workaround */
   12372 	switch (sc->sc_type) {
   12373 	case WM_T_ICH8:
   12374 	case WM_T_ICH9:
   12375 	case WM_T_ICH10:
   12376 	case WM_T_PCH:
   12377 	case WM_T_PCH2:
   12378 	case WM_T_PCH_LPT:
   12379 	case WM_T_PCH_SPT:
   12380 		/* Disable gig during WOL */
   12381 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12382 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   12383 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12384 		if (sc->sc_type == WM_T_PCH)
   12385 			wm_gmii_reset(sc);
   12386 
   12387 		/* Power down workaround */
   12388 		if (sc->sc_phytype == WMPHY_82577) {
   12389 			struct mii_softc *child;
   12390 
   12391 			/* Assume that the PHY is copper */
   12392 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12393 			if (child->mii_mpd_rev <= 2)
   12394 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   12395 				    (768 << 5) | 25, 0x0444); /* magic num */
   12396 		}
   12397 		break;
   12398 	default:
   12399 		break;
   12400 	}
   12401 
   12402 	/* Keep the laser running on fiber adapters */
   12403 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   12404 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12405 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12406 		reg |= CTRL_EXT_SWDPIN(3);
   12407 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12408 	}
   12409 
   12410 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   12411 #if 0	/* for the multicast packet */
   12412 	reg |= WUFC_MC;
   12413 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   12414 #endif
   12415 
   12416 	if (sc->sc_type >= WM_T_PCH)
   12417 		wm_enable_phy_wakeup(sc);
   12418 	else {
   12419 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   12420 		CSR_WRITE(sc, WMREG_WUFC, reg);
   12421 	}
   12422 
   12423 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12424 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12425 		|| (sc->sc_type == WM_T_PCH2))
   12426 		    && (sc->sc_phytype == WMPHY_IGP_3))
   12427 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   12428 
   12429 	/* Request PME */
   12430 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   12431 #if 0
   12432 	/* Disable WOL */
   12433 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   12434 #else
   12435 	/* For WOL */
   12436 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   12437 #endif
   12438 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   12439 }
   12440 
   12441 /* LPLU */
   12442 
   12443 static void
   12444 wm_lplu_d0_disable(struct wm_softc *sc)
   12445 {
   12446 	uint32_t reg;
   12447 
   12448 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12449 		device_xname(sc->sc_dev), __func__));
   12450 
   12451 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12452 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   12453 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12454 }
   12455 
   12456 static void
   12457 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   12458 {
   12459 	uint32_t reg;
   12460 
   12461 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12462 		device_xname(sc->sc_dev), __func__));
   12463 
   12464 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   12465 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   12466 	reg |= HV_OEM_BITS_ANEGNOW;
   12467 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   12468 }
   12469 
   12470 /* EEE */
   12471 
   12472 static void
   12473 wm_set_eee_i350(struct wm_softc *sc)
   12474 {
   12475 	uint32_t ipcnfg, eeer;
   12476 
   12477 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   12478 	eeer = CSR_READ(sc, WMREG_EEER);
   12479 
   12480 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   12481 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12482 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12483 		    | EEER_LPI_FC);
   12484 	} else {
   12485 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12486 		ipcnfg &= ~IPCNFG_10BASE_TE;
   12487 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12488 		    | EEER_LPI_FC);
   12489 	}
   12490 
   12491 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   12492 	CSR_WRITE(sc, WMREG_EEER, eeer);
   12493 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   12494 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   12495 }
   12496 
   12497 /*
   12498  * Workarounds (mainly PHY related).
   12499  * Basically, PHY's workarounds are in the PHY drivers.
   12500  */
   12501 
   12502 /* Work-around for 82566 Kumeran PCS lock loss */
   12503 static void
   12504 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   12505 {
   12506 #if 0
   12507 	int miistatus, active, i;
   12508 	int reg;
   12509 
   12510 	miistatus = sc->sc_mii.mii_media_status;
   12511 
   12512 	/* If the link is not up, do nothing */
   12513 	if ((miistatus & IFM_ACTIVE) == 0)
   12514 		return;
   12515 
   12516 	active = sc->sc_mii.mii_media_active;
   12517 
   12518 	/* Nothing to do if the link is other than 1Gbps */
   12519 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   12520 		return;
   12521 
   12522 	for (i = 0; i < 10; i++) {
   12523 		/* read twice */
   12524 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12525 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12526 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   12527 			goto out;	/* GOOD! */
   12528 
   12529 		/* Reset the PHY */
   12530 		wm_gmii_reset(sc);
   12531 		delay(5*1000);
   12532 	}
   12533 
   12534 	/* Disable GigE link negotiation */
   12535 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12536 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12537 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12538 
   12539 	/*
   12540 	 * Call gig speed drop workaround on Gig disable before accessing
   12541 	 * any PHY registers.
   12542 	 */
   12543 	wm_gig_downshift_workaround_ich8lan(sc);
   12544 
   12545 out:
   12546 	return;
   12547 #endif
   12548 }
   12549 
   12550 /* WOL from S5 stops working */
   12551 static void
   12552 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   12553 {
   12554 	uint16_t kmrn_reg;
   12555 
   12556 	/* Only for igp3 */
   12557 	if (sc->sc_phytype == WMPHY_IGP_3) {
   12558 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   12559 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   12560 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12561 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   12562 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12563 	}
   12564 }
   12565 
   12566 /*
   12567  * Workaround for pch's PHYs
   12568  * XXX should be moved to new PHY driver?
   12569  */
   12570 static void
   12571 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   12572 {
   12573 
   12574 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12575 		device_xname(sc->sc_dev), __func__));
   12576 	KASSERT(sc->sc_type == WM_T_PCH);
   12577 
   12578 	if (sc->sc_phytype == WMPHY_82577)
   12579 		wm_set_mdio_slow_mode_hv(sc);
   12580 
   12581 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   12582 
   12583 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   12584 
   12585 	/* 82578 */
   12586 	if (sc->sc_phytype == WMPHY_82578) {
   12587 		struct mii_softc *child;
   12588 
   12589 		/*
   12590 		 * Return registers to default by doing a soft reset then
   12591 		 * writing 0x3140 to the control register
   12592 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   12593 		 */
   12594 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12595 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   12596 			PHY_RESET(child);
   12597 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   12598 			    0x3140);
   12599 		}
   12600 	}
   12601 
   12602 	/* Select page 0 */
   12603 	sc->phy.acquire(sc);
   12604 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   12605 	sc->phy.release(sc);
   12606 
   12607 	/*
   12608 	 * Configure the K1 Si workaround during phy reset assuming there is
   12609 	 * link so that it disables K1 if link is in 1Gbps.
   12610 	 */
   12611 	wm_k1_gig_workaround_hv(sc, 1);
   12612 }
   12613 
   12614 static void
   12615 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   12616 {
   12617 
   12618 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12619 		device_xname(sc->sc_dev), __func__));
   12620 	KASSERT(sc->sc_type == WM_T_PCH2);
   12621 
   12622 	wm_set_mdio_slow_mode_hv(sc);
   12623 }
   12624 
   12625 static int
   12626 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   12627 {
   12628 	int k1_enable = sc->sc_nvm_k1_enabled;
   12629 
   12630 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12631 		device_xname(sc->sc_dev), __func__));
   12632 
   12633 	if (sc->phy.acquire(sc) != 0)
   12634 		return -1;
   12635 
   12636 	if (link) {
   12637 		k1_enable = 0;
   12638 
   12639 		/* Link stall fix for link up */
   12640 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   12641 	} else {
   12642 		/* Link stall fix for link down */
   12643 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   12644 	}
   12645 
   12646 	wm_configure_k1_ich8lan(sc, k1_enable);
   12647 	sc->phy.release(sc);
   12648 
   12649 	return 0;
   12650 }
   12651 
   12652 static void
   12653 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   12654 {
   12655 	uint32_t reg;
   12656 
   12657 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   12658 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   12659 	    reg | HV_KMRN_MDIO_SLOW);
   12660 }
   12661 
   12662 static void
   12663 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   12664 {
   12665 	uint32_t ctrl, ctrl_ext, tmp;
   12666 	uint16_t kmrn_reg;
   12667 
   12668 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   12669 
   12670 	if (k1_enable)
   12671 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   12672 	else
   12673 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   12674 
   12675 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   12676 
   12677 	delay(20);
   12678 
   12679 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12680 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12681 
   12682 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   12683 	tmp |= CTRL_FRCSPD;
   12684 
   12685 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   12686 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   12687 	CSR_WRITE_FLUSH(sc);
   12688 	delay(20);
   12689 
   12690 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   12691 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12692 	CSR_WRITE_FLUSH(sc);
   12693 	delay(20);
   12694 }
   12695 
   12696 /* special case - for 82575 - need to do manual init ... */
   12697 static void
   12698 wm_reset_init_script_82575(struct wm_softc *sc)
   12699 {
   12700 	/*
   12701 	 * remark: this is untested code - we have no board without EEPROM
   12702 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   12703 	 */
   12704 
   12705 	/* SerDes configuration via SERDESCTRL */
   12706 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   12707 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   12708 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   12709 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   12710 
   12711 	/* CCM configuration via CCMCTL register */
   12712 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   12713 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   12714 
   12715 	/* PCIe lanes configuration */
   12716 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   12717 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   12718 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   12719 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   12720 
   12721 	/* PCIe PLL Configuration */
   12722 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   12723 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   12724 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   12725 }
   12726 
   12727 static void
   12728 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   12729 {
   12730 	uint32_t reg;
   12731 	uint16_t nvmword;
   12732 	int rv;
   12733 
   12734 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   12735 		return;
   12736 
   12737 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   12738 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   12739 	if (rv != 0) {
   12740 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   12741 		    __func__);
   12742 		return;
   12743 	}
   12744 
   12745 	reg = CSR_READ(sc, WMREG_MDICNFG);
   12746 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   12747 		reg |= MDICNFG_DEST;
   12748 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   12749 		reg |= MDICNFG_COM_MDIO;
   12750 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12751 }
   12752 
   12753 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   12754 
   12755 static bool
   12756 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   12757 {
   12758 	int i;
   12759 	uint32_t reg;
   12760 	uint16_t id1, id2;
   12761 
   12762 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12763 		device_xname(sc->sc_dev), __func__));
   12764 	id1 = id2 = 0xffff;
   12765 	for (i = 0; i < 2; i++) {
   12766 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   12767 		if (MII_INVALIDID(id1))
   12768 			continue;
   12769 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   12770 		if (MII_INVALIDID(id2))
   12771 			continue;
   12772 		break;
   12773 	}
   12774 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   12775 		goto out;
   12776 	}
   12777 
   12778 	if (sc->sc_type < WM_T_PCH_LPT) {
   12779 		sc->phy.release(sc);
   12780 		wm_set_mdio_slow_mode_hv(sc);
   12781 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   12782 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   12783 		sc->phy.acquire(sc);
   12784 	}
   12785 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   12786 		printf("XXX return with false\n");
   12787 		return false;
   12788 	}
   12789 out:
   12790 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   12791 		/* Only unforce SMBus if ME is not active */
   12792 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   12793 			/* Unforce SMBus mode in PHY */
   12794 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   12795 			    CV_SMB_CTRL);
   12796 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   12797 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   12798 			    CV_SMB_CTRL, reg);
   12799 
   12800 			/* Unforce SMBus mode in MAC */
   12801 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12802 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12803 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12804 		}
   12805 	}
   12806 	return true;
   12807 }
   12808 
   12809 static void
   12810 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   12811 {
   12812 	uint32_t reg;
   12813 	int i;
   12814 
   12815 	/* Set PHY Config Counter to 50msec */
   12816 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   12817 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   12818 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   12819 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   12820 
   12821 	/* Toggle LANPHYPC */
   12822 	reg = CSR_READ(sc, WMREG_CTRL);
   12823 	reg |= CTRL_LANPHYPC_OVERRIDE;
   12824 	reg &= ~CTRL_LANPHYPC_VALUE;
   12825 	CSR_WRITE(sc, WMREG_CTRL, reg);
   12826 	CSR_WRITE_FLUSH(sc);
   12827 	delay(1000);
   12828 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   12829 	CSR_WRITE(sc, WMREG_CTRL, reg);
   12830 	CSR_WRITE_FLUSH(sc);
   12831 
   12832 	if (sc->sc_type < WM_T_PCH_LPT)
   12833 		delay(50 * 1000);
   12834 	else {
   12835 		i = 20;
   12836 
   12837 		do {
   12838 			delay(5 * 1000);
   12839 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   12840 		    && i--);
   12841 
   12842 		delay(30 * 1000);
   12843 	}
   12844 }
   12845 
   12846 static int
   12847 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   12848 {
   12849 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   12850 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   12851 	uint32_t rxa;
   12852 	uint16_t scale = 0, lat_enc = 0;
   12853 	int64_t lat_ns, value;
   12854 
   12855 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12856 		device_xname(sc->sc_dev), __func__));
   12857 
   12858 	if (link) {
   12859 		pcireg_t preg;
   12860 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   12861 
   12862 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   12863 
   12864 		/*
   12865 		 * Determine the maximum latency tolerated by the device.
   12866 		 *
   12867 		 * Per the PCIe spec, the tolerated latencies are encoded as
   12868 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   12869 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   12870 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   12871 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   12872 		 */
   12873 		lat_ns = ((int64_t)rxa * 1024 -
   12874 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   12875 		if (lat_ns < 0)
   12876 			lat_ns = 0;
   12877 		else {
   12878 			uint32_t status;
   12879 			uint16_t speed;
   12880 
   12881 			status = CSR_READ(sc, WMREG_STATUS);
   12882 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   12883 			case STATUS_SPEED_10:
   12884 				speed = 10;
   12885 				break;
   12886 			case STATUS_SPEED_100:
   12887 				speed = 100;
   12888 				break;
   12889 			case STATUS_SPEED_1000:
   12890 				speed = 1000;
   12891 				break;
   12892 			default:
   12893 				printf("%s: Unknown speed (status = %08x)\n",
   12894 				    device_xname(sc->sc_dev), status);
   12895 				return -1;
   12896 			}
   12897 			lat_ns /= speed;
   12898 		}
   12899 		value = lat_ns;
   12900 
   12901 		while (value > LTRV_VALUE) {
   12902 			scale ++;
   12903 			value = howmany(value, __BIT(5));
   12904 		}
   12905 		if (scale > LTRV_SCALE_MAX) {
   12906 			printf("%s: Invalid LTR latency scale %d\n",
   12907 			    device_xname(sc->sc_dev), scale);
   12908 			return -1;
   12909 		}
   12910 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   12911 
   12912 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   12913 		    WM_PCI_LTR_CAP_LPT);
   12914 		max_snoop = preg & 0xffff;
   12915 		max_nosnoop = preg >> 16;
   12916 
   12917 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   12918 
   12919 		if (lat_enc > max_ltr_enc) {
   12920 			lat_enc = max_ltr_enc;
   12921 		}
   12922 	}
   12923 	/* Snoop and No-Snoop latencies the same */
   12924 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   12925 	CSR_WRITE(sc, WMREG_LTRV, reg);
   12926 
   12927 	return 0;
   12928 }
   12929 
   12930 /*
   12931  * I210 Errata 25 and I211 Errata 10
   12932  * Slow System Clock.
   12933  */
   12934 static void
   12935 wm_pll_workaround_i210(struct wm_softc *sc)
   12936 {
   12937 	uint32_t mdicnfg, wuc;
   12938 	uint32_t reg;
   12939 	pcireg_t pcireg;
   12940 	uint32_t pmreg;
   12941 	uint16_t nvmword, tmp_nvmword;
   12942 	int phyval;
   12943 	bool wa_done = false;
   12944 	int i;
   12945 
   12946 	/* Save WUC and MDICNFG registers */
   12947 	wuc = CSR_READ(sc, WMREG_WUC);
   12948 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   12949 
   12950 	reg = mdicnfg & ~MDICNFG_DEST;
   12951 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12952 
   12953 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   12954 		nvmword = INVM_DEFAULT_AL;
   12955 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   12956 
   12957 	/* Get Power Management cap offset */
   12958 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12959 		&pmreg, NULL) == 0)
   12960 		return;
   12961 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   12962 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   12963 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   12964 
   12965 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   12966 			break; /* OK */
   12967 		}
   12968 
   12969 		wa_done = true;
   12970 		/* Directly reset the internal PHY */
   12971 		reg = CSR_READ(sc, WMREG_CTRL);
   12972 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   12973 
   12974 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12975 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   12976 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12977 
   12978 		CSR_WRITE(sc, WMREG_WUC, 0);
   12979 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   12980 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12981 
   12982 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   12983 		    pmreg + PCI_PMCSR);
   12984 		pcireg |= PCI_PMCSR_STATE_D3;
   12985 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12986 		    pmreg + PCI_PMCSR, pcireg);
   12987 		delay(1000);
   12988 		pcireg &= ~PCI_PMCSR_STATE_D3;
   12989 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12990 		    pmreg + PCI_PMCSR, pcireg);
   12991 
   12992 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   12993 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12994 
   12995 		/* Restore WUC register */
   12996 		CSR_WRITE(sc, WMREG_WUC, wuc);
   12997 	}
   12998 
   12999 	/* Restore MDICNFG setting */
   13000 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13001 	if (wa_done)
   13002 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13003 }
   13004