Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.448
      1 /*	$NetBSD: if_wm.c,v 1.448 2016/11/16 09:27:49 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Advanced Receive Descriptor
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.448 2016/11/16 09:27:49 msaitoh Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #endif
    165 
    166 /*
    167  * This device driver's max interrupt numbers.
    168  */
    169 #define WM_MAX_NQUEUEINTR	16
    170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    171 
    172 /*
    173  * Transmit descriptor list size.  Due to errata, we can only have
    174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    175  * on >= 82544.  We tell the upper layers that they can queue a lot
    176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    177  * of them at a time.
    178  *
    179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    180  * chains containing many small mbufs have been observed in zero-copy
    181  * situations with jumbo frames.
    182  */
    183 #define	WM_NTXSEGS		256
    184 #define	WM_IFQUEUELEN		256
    185 #define	WM_TXQUEUELEN_MAX	64
    186 #define	WM_TXQUEUELEN_MAX_82547	16
    187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    190 #define	WM_NTXDESC_82542	256
    191 #define	WM_NTXDESC_82544	4096
    192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    197 
    198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    199 
    200 #define	WM_TXINTERQSIZE		256
    201 
    202 /*
    203  * Receive descriptor list size.  We have one Rx buffer for normal
    204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    205  * packet.  We allocate 256 receive descriptors, each with a 2k
    206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    207  */
    208 #define	WM_NRXDESC		256
    209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    212 
    213 typedef union txdescs {
    214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    216 } txdescs_t;
    217 
    218 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    219 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    220 
    221 /*
    222  * Software state for transmit jobs.
    223  */
    224 struct wm_txsoft {
    225 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    226 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    227 	int txs_firstdesc;		/* first descriptor in packet */
    228 	int txs_lastdesc;		/* last descriptor in packet */
    229 	int txs_ndesc;			/* # of descriptors used */
    230 };
    231 
    232 /*
    233  * Software state for receive buffers.  Each descriptor gets a
    234  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    235  * more than one buffer, we chain them together.
    236  */
    237 struct wm_rxsoft {
    238 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    239 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    240 };
    241 
    242 #define WM_LINKUP_TIMEOUT	50
    243 
    244 static uint16_t swfwphysem[] = {
    245 	SWFW_PHY0_SM,
    246 	SWFW_PHY1_SM,
    247 	SWFW_PHY2_SM,
    248 	SWFW_PHY3_SM
    249 };
    250 
    251 static const uint32_t wm_82580_rxpbs_table[] = {
    252 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    253 };
    254 
    255 struct wm_softc;
    256 
    257 #ifdef WM_EVENT_COUNTERS
    258 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    259 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    260 	struct evcnt qname##_ev_##evname;
    261 
    262 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    263 	do{								\
    264 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    265 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    266 		    "%s%02d%s", #qname, (qnum), #evname);		\
    267 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    268 		    (evtype), NULL, (xname),				\
    269 		    (q)->qname##_##evname##_evcnt_name);		\
    270 	}while(0)
    271 
    272 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    273 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    274 
    275 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    276 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    277 #endif /* WM_EVENT_COUNTERS */
    278 
    279 struct wm_txqueue {
    280 	kmutex_t *txq_lock;		/* lock for tx operations */
    281 
    282 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    283 
    284 	/* Software state for the transmit descriptors. */
    285 	int txq_num;			/* must be a power of two */
    286 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    287 
    288 	/* TX control data structures. */
    289 	int txq_ndesc;			/* must be a power of two */
    290 	size_t txq_descsize;		/* a tx descriptor size */
    291 	txdescs_t *txq_descs_u;
    292         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    293 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    294 	int txq_desc_rseg;		/* real number of control segment */
    295 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    296 #define	txq_descs	txq_descs_u->sctxu_txdescs
    297 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    298 
    299 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    300 
    301 	int txq_free;			/* number of free Tx descriptors */
    302 	int txq_next;			/* next ready Tx descriptor */
    303 
    304 	int txq_sfree;			/* number of free Tx jobs */
    305 	int txq_snext;			/* next free Tx job */
    306 	int txq_sdirty;			/* dirty Tx jobs */
    307 
    308 	/* These 4 variables are used only on the 82547. */
    309 	int txq_fifo_size;		/* Tx FIFO size */
    310 	int txq_fifo_head;		/* current head of FIFO */
    311 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    312 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    313 
    314 	/*
    315 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    316 	 * CPUs. This queue intermediate them without block.
    317 	 */
    318 	pcq_t *txq_interq;
    319 
    320 	/*
    321 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    322 	 * to manage Tx H/W queue's busy flag.
    323 	 */
    324 	int txq_flags;			/* flags for H/W queue, see below */
    325 #define	WM_TXQ_NO_SPACE	0x1
    326 
    327 	bool txq_stopping;
    328 
    329 #ifdef WM_EVENT_COUNTERS
    330 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    331 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    332 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    333 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    334 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    335 						/* XXX not used? */
    336 
    337 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    338 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    339 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    340 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    341 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    342 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    343 
    344 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    345 
    346 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    347 
    348 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    349 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    350 #endif /* WM_EVENT_COUNTERS */
    351 };
    352 
    353 struct wm_rxqueue {
    354 	kmutex_t *rxq_lock;		/* lock for rx operations */
    355 
    356 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    357 
    358 	/* Software state for the receive descriptors. */
    359 	wiseman_rxdesc_t *rxq_descs;
    360 
    361 	/* RX control data structures. */
    362 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    363 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    364 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    365 	int rxq_desc_rseg;		/* real number of control segment */
    366 	size_t rxq_desc_size;		/* control data size */
    367 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    368 
    369 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    370 
    371 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    372 	int rxq_discard;
    373 	int rxq_len;
    374 	struct mbuf *rxq_head;
    375 	struct mbuf *rxq_tail;
    376 	struct mbuf **rxq_tailp;
    377 
    378 	bool rxq_stopping;
    379 
    380 #ifdef WM_EVENT_COUNTERS
    381 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    382 
    383 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    384 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    385 #endif
    386 };
    387 
    388 struct wm_queue {
    389 	int wmq_id;			/* index of transmit and receive queues */
    390 	int wmq_intr_idx;		/* index of MSI-X tables */
    391 
    392 	struct wm_txqueue wmq_txq;
    393 	struct wm_rxqueue wmq_rxq;
    394 };
    395 
    396 struct wm_phyop {
    397 	int (*acquire)(struct wm_softc *);
    398 	void (*release)(struct wm_softc *);
    399 	int reset_delay_us;
    400 };
    401 
    402 /*
    403  * Software state per device.
    404  */
    405 struct wm_softc {
    406 	device_t sc_dev;		/* generic device information */
    407 	bus_space_tag_t sc_st;		/* bus space tag */
    408 	bus_space_handle_t sc_sh;	/* bus space handle */
    409 	bus_size_t sc_ss;		/* bus space size */
    410 	bus_space_tag_t sc_iot;		/* I/O space tag */
    411 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    412 	bus_size_t sc_ios;		/* I/O space size */
    413 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    414 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    415 	bus_size_t sc_flashs;		/* flash registers space size */
    416 	off_t sc_flashreg_offset;	/*
    417 					 * offset to flash registers from
    418 					 * start of BAR
    419 					 */
    420 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    421 
    422 	struct ethercom sc_ethercom;	/* ethernet common data */
    423 	struct mii_data sc_mii;		/* MII/media information */
    424 
    425 	pci_chipset_tag_t sc_pc;
    426 	pcitag_t sc_pcitag;
    427 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    428 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    429 
    430 	uint16_t sc_pcidevid;		/* PCI device ID */
    431 	wm_chip_type sc_type;		/* MAC type */
    432 	int sc_rev;			/* MAC revision */
    433 	wm_phy_type sc_phytype;		/* PHY type */
    434 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    435 #define	WM_MEDIATYPE_UNKNOWN		0x00
    436 #define	WM_MEDIATYPE_FIBER		0x01
    437 #define	WM_MEDIATYPE_COPPER		0x02
    438 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    439 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    440 	int sc_flags;			/* flags; see below */
    441 	int sc_if_flags;		/* last if_flags */
    442 	int sc_flowflags;		/* 802.3x flow control flags */
    443 	int sc_align_tweak;
    444 
    445 	void *sc_ihs[WM_MAX_NINTR];	/*
    446 					 * interrupt cookie.
    447 					 * legacy and msi use sc_ihs[0].
    448 					 */
    449 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    450 	int sc_nintrs;			/* number of interrupts */
    451 
    452 	int sc_link_intr_idx;		/* index of MSI-X tables */
    453 
    454 	callout_t sc_tick_ch;		/* tick callout */
    455 	bool sc_core_stopping;
    456 
    457 	int sc_nvm_ver_major;
    458 	int sc_nvm_ver_minor;
    459 	int sc_nvm_ver_build;
    460 	int sc_nvm_addrbits;		/* NVM address bits */
    461 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    462 	int sc_ich8_flash_base;
    463 	int sc_ich8_flash_bank_size;
    464 	int sc_nvm_k1_enabled;
    465 
    466 	int sc_nqueues;
    467 	struct wm_queue *sc_queue;
    468 
    469 	int sc_affinity_offset;
    470 
    471 #ifdef WM_EVENT_COUNTERS
    472 	/* Event counters. */
    473 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    474 
    475         /* WM_T_82542_2_1 only */
    476 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    477 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    478 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    479 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    480 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    481 #endif /* WM_EVENT_COUNTERS */
    482 
    483 	/* This variable are used only on the 82547. */
    484 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    485 
    486 	uint32_t sc_ctrl;		/* prototype CTRL register */
    487 #if 0
    488 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    489 #endif
    490 	uint32_t sc_icr;		/* prototype interrupt bits */
    491 	uint32_t sc_itr;		/* prototype intr throttling reg */
    492 	uint32_t sc_tctl;		/* prototype TCTL register */
    493 	uint32_t sc_rctl;		/* prototype RCTL register */
    494 	uint32_t sc_txcw;		/* prototype TXCW register */
    495 	uint32_t sc_tipg;		/* prototype TIPG register */
    496 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    497 	uint32_t sc_pba;		/* prototype PBA register */
    498 
    499 	int sc_tbi_linkup;		/* TBI link status */
    500 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    501 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    502 
    503 	int sc_mchash_type;		/* multicast filter offset */
    504 
    505 	krndsource_t rnd_source;	/* random source */
    506 
    507 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    508 
    509 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    510 	kmutex_t *sc_ich_phymtx;	/*
    511 					 * 82574/82583/ICH/PCH specific PHY
    512 					 * mutex. For 82574/82583, the mutex
    513 					 * is used for both PHY and NVM.
    514 					 */
    515 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    516 
    517 	struct wm_phyop phy;
    518 };
    519 
    520 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    521 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    522 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    523 
    524 #ifdef WM_MPSAFE
    525 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    526 #else
    527 #define CALLOUT_FLAGS	0
    528 #endif
    529 
    530 #define	WM_RXCHAIN_RESET(rxq)						\
    531 do {									\
    532 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    533 	*(rxq)->rxq_tailp = NULL;					\
    534 	(rxq)->rxq_len = 0;						\
    535 } while (/*CONSTCOND*/0)
    536 
    537 #define	WM_RXCHAIN_LINK(rxq, m)						\
    538 do {									\
    539 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    540 	(rxq)->rxq_tailp = &(m)->m_next;				\
    541 } while (/*CONSTCOND*/0)
    542 
    543 #ifdef WM_EVENT_COUNTERS
    544 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    545 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    546 
    547 #define WM_Q_EVCNT_INCR(qname, evname)			\
    548 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    549 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    550 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    551 #else /* !WM_EVENT_COUNTERS */
    552 #define	WM_EVCNT_INCR(ev)	/* nothing */
    553 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    554 
    555 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    556 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    557 #endif /* !WM_EVENT_COUNTERS */
    558 
    559 #define	CSR_READ(sc, reg)						\
    560 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    561 #define	CSR_WRITE(sc, reg, val)						\
    562 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    563 #define	CSR_WRITE_FLUSH(sc)						\
    564 	(void) CSR_READ((sc), WMREG_STATUS)
    565 
    566 #define ICH8_FLASH_READ32(sc, reg)					\
    567 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    568 	    (reg) + sc->sc_flashreg_offset)
    569 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    570 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    571 	    (reg) + sc->sc_flashreg_offset, (data))
    572 
    573 #define ICH8_FLASH_READ16(sc, reg)					\
    574 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    575 	    (reg) + sc->sc_flashreg_offset)
    576 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    577 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    578 	    (reg) + sc->sc_flashreg_offset, (data))
    579 
    580 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    581 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    582 
    583 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    584 #define	WM_CDTXADDR_HI(txq, x)						\
    585 	(sizeof(bus_addr_t) == 8 ?					\
    586 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    587 
    588 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    589 #define	WM_CDRXADDR_HI(rxq, x)						\
    590 	(sizeof(bus_addr_t) == 8 ?					\
    591 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    592 
    593 /*
    594  * Register read/write functions.
    595  * Other than CSR_{READ|WRITE}().
    596  */
    597 #if 0
    598 static inline uint32_t wm_io_read(struct wm_softc *, int);
    599 #endif
    600 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    601 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    602 	uint32_t, uint32_t);
    603 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    604 
    605 /*
    606  * Descriptor sync/init functions.
    607  */
    608 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    609 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    610 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    611 
    612 /*
    613  * Device driver interface functions and commonly used functions.
    614  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    615  */
    616 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    617 static int	wm_match(device_t, cfdata_t, void *);
    618 static void	wm_attach(device_t, device_t, void *);
    619 static int	wm_detach(device_t, int);
    620 static bool	wm_suspend(device_t, const pmf_qual_t *);
    621 static bool	wm_resume(device_t, const pmf_qual_t *);
    622 static void	wm_watchdog(struct ifnet *);
    623 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    624 static void	wm_tick(void *);
    625 static int	wm_ifflags_cb(struct ethercom *);
    626 static int	wm_ioctl(struct ifnet *, u_long, void *);
    627 /* MAC address related */
    628 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    629 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    630 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    631 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    632 static void	wm_set_filter(struct wm_softc *);
    633 /* Reset and init related */
    634 static void	wm_set_vlan(struct wm_softc *);
    635 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    636 static void	wm_get_auto_rd_done(struct wm_softc *);
    637 static void	wm_lan_init_done(struct wm_softc *);
    638 static void	wm_get_cfg_done(struct wm_softc *);
    639 static void	wm_initialize_hardware_bits(struct wm_softc *);
    640 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    641 static void	wm_reset_phy(struct wm_softc *);
    642 static void	wm_flush_desc_rings(struct wm_softc *);
    643 static void	wm_reset(struct wm_softc *);
    644 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    645 static void	wm_rxdrain(struct wm_rxqueue *);
    646 static void	wm_rss_getkey(uint8_t *);
    647 static void	wm_init_rss(struct wm_softc *);
    648 static void	wm_adjust_qnum(struct wm_softc *, int);
    649 static int	wm_setup_legacy(struct wm_softc *);
    650 static int	wm_setup_msix(struct wm_softc *);
    651 static int	wm_init(struct ifnet *);
    652 static int	wm_init_locked(struct ifnet *);
    653 static void	wm_turnon(struct wm_softc *);
    654 static void	wm_turnoff(struct wm_softc *);
    655 static void	wm_stop(struct ifnet *, int);
    656 static void	wm_stop_locked(struct ifnet *, int);
    657 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    658 static void	wm_82547_txfifo_stall(void *);
    659 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    660 /* DMA related */
    661 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    662 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    663 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    664 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    665     struct wm_txqueue *);
    666 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    667 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    668 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    669     struct wm_rxqueue *);
    670 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    671 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    672 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    673 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    674 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    675 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    676 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    677     struct wm_txqueue *);
    678 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    679     struct wm_rxqueue *);
    680 static int	wm_alloc_txrx_queues(struct wm_softc *);
    681 static void	wm_free_txrx_queues(struct wm_softc *);
    682 static int	wm_init_txrx_queues(struct wm_softc *);
    683 /* Start */
    684 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    685     uint32_t *, uint8_t *);
    686 static void	wm_start(struct ifnet *);
    687 static void	wm_start_locked(struct ifnet *);
    688 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    689     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    690 static void	wm_nq_start(struct ifnet *);
    691 static void	wm_nq_start_locked(struct ifnet *);
    692 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    693 static inline int	wm_nq_select_txqueue(struct ifnet *, struct mbuf *);
    694 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    695 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    696 /* Interrupt */
    697 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    698 static void	wm_rxeof(struct wm_rxqueue *);
    699 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    700 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    701 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    702 static void	wm_linkintr(struct wm_softc *, uint32_t);
    703 static int	wm_intr_legacy(void *);
    704 static int	wm_txrxintr_msix(void *);
    705 static int	wm_linkintr_msix(void *);
    706 
    707 /*
    708  * Media related.
    709  * GMII, SGMII, TBI, SERDES and SFP.
    710  */
    711 /* Common */
    712 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    713 /* GMII related */
    714 static void	wm_gmii_reset(struct wm_softc *);
    715 static int	wm_get_phy_id_82575(struct wm_softc *);
    716 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    717 static int	wm_gmii_mediachange(struct ifnet *);
    718 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    719 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    720 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    721 static int	wm_gmii_i82543_readreg(device_t, int, int);
    722 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    723 static int	wm_gmii_mdic_readreg(device_t, int, int);
    724 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    725 static int	wm_gmii_i82544_readreg(device_t, int, int);
    726 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    727 static int	wm_gmii_i80003_readreg(device_t, int, int);
    728 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    729 static int	wm_gmii_bm_readreg(device_t, int, int);
    730 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    731 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    732 static int	wm_gmii_hv_readreg(device_t, int, int);
    733 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    734 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    735 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    736 static int	wm_gmii_82580_readreg(device_t, int, int);
    737 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    738 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    739 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    740 static void	wm_gmii_statchg(struct ifnet *);
    741 static int	wm_kmrn_readreg(struct wm_softc *, int);
    742 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    743 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    744 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    745 /* SGMII */
    746 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    747 static int	wm_sgmii_readreg(device_t, int, int);
    748 static void	wm_sgmii_writereg(device_t, int, int, int);
    749 /* TBI related */
    750 static void	wm_tbi_mediainit(struct wm_softc *);
    751 static int	wm_tbi_mediachange(struct ifnet *);
    752 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    753 static int	wm_check_for_link(struct wm_softc *);
    754 static void	wm_tbi_tick(struct wm_softc *);
    755 /* SERDES related */
    756 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    757 static int	wm_serdes_mediachange(struct ifnet *);
    758 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    759 static void	wm_serdes_tick(struct wm_softc *);
    760 /* SFP related */
    761 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    762 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    763 
    764 /*
    765  * NVM related.
    766  * Microwire, SPI (w/wo EERD) and Flash.
    767  */
    768 /* Misc functions */
    769 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    770 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    771 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    772 /* Microwire */
    773 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    774 /* SPI */
    775 static int	wm_nvm_ready_spi(struct wm_softc *);
    776 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    777 /* Using with EERD */
    778 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    779 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    780 /* Flash */
    781 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    782     unsigned int *);
    783 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    784 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    785 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    786 	uint32_t *);
    787 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    788 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    789 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    790 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    791 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    792 /* iNVM */
    793 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    794 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    795 /* Lock, detecting NVM type, validate checksum and read */
    796 static int	wm_nvm_acquire(struct wm_softc *);
    797 static void	wm_nvm_release(struct wm_softc *);
    798 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    799 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    800 static int	wm_nvm_validate_checksum(struct wm_softc *);
    801 static void	wm_nvm_version_invm(struct wm_softc *);
    802 static void	wm_nvm_version(struct wm_softc *);
    803 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    804 
    805 /*
    806  * Hardware semaphores.
    807  * Very complexed...
    808  */
    809 static int	wm_get_null(struct wm_softc *);
    810 static void	wm_put_null(struct wm_softc *);
    811 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    812 static void	wm_put_swsm_semaphore(struct wm_softc *);
    813 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    814 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    815 static int	wm_get_phy_82575(struct wm_softc *);
    816 static void	wm_put_phy_82575(struct wm_softc *);
    817 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    818 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    819 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    820 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    821 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    822 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    823 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    824 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    825 
    826 /*
    827  * Management mode and power management related subroutines.
    828  * BMC, AMT, suspend/resume and EEE.
    829  */
    830 #if 0
    831 static int	wm_check_mng_mode(struct wm_softc *);
    832 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    833 static int	wm_check_mng_mode_82574(struct wm_softc *);
    834 static int	wm_check_mng_mode_generic(struct wm_softc *);
    835 #endif
    836 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    837 static bool	wm_phy_resetisblocked(struct wm_softc *);
    838 static void	wm_get_hw_control(struct wm_softc *);
    839 static void	wm_release_hw_control(struct wm_softc *);
    840 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    841 static void	wm_smbustopci(struct wm_softc *);
    842 static void	wm_init_manageability(struct wm_softc *);
    843 static void	wm_release_manageability(struct wm_softc *);
    844 static void	wm_get_wakeup(struct wm_softc *);
    845 static void	wm_ulp_disable(struct wm_softc *);
    846 static void	wm_enable_phy_wakeup(struct wm_softc *);
    847 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    848 static void	wm_enable_wakeup(struct wm_softc *);
    849 /* LPLU (Low Power Link Up) */
    850 static void	wm_lplu_d0_disable(struct wm_softc *);
    851 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    852 /* EEE */
    853 static void	wm_set_eee_i350(struct wm_softc *);
    854 
    855 /*
    856  * Workarounds (mainly PHY related).
    857  * Basically, PHY's workarounds are in the PHY drivers.
    858  */
    859 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    860 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    861 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    862 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    863 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    864 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    865 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    866 static void	wm_reset_init_script_82575(struct wm_softc *);
    867 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    868 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    869 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    870 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    871 static void	wm_pll_workaround_i210(struct wm_softc *);
    872 
    873 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    874     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    875 
    876 /*
    877  * Devices supported by this driver.
    878  */
    879 static const struct wm_product {
    880 	pci_vendor_id_t		wmp_vendor;
    881 	pci_product_id_t	wmp_product;
    882 	const char		*wmp_name;
    883 	wm_chip_type		wmp_type;
    884 	uint32_t		wmp_flags;
    885 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    886 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    887 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    888 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    889 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    890 } wm_products[] = {
    891 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    892 	  "Intel i82542 1000BASE-X Ethernet",
    893 	  WM_T_82542_2_1,	WMP_F_FIBER },
    894 
    895 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    896 	  "Intel i82543GC 1000BASE-X Ethernet",
    897 	  WM_T_82543,		WMP_F_FIBER },
    898 
    899 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    900 	  "Intel i82543GC 1000BASE-T Ethernet",
    901 	  WM_T_82543,		WMP_F_COPPER },
    902 
    903 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    904 	  "Intel i82544EI 1000BASE-T Ethernet",
    905 	  WM_T_82544,		WMP_F_COPPER },
    906 
    907 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    908 	  "Intel i82544EI 1000BASE-X Ethernet",
    909 	  WM_T_82544,		WMP_F_FIBER },
    910 
    911 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    912 	  "Intel i82544GC 1000BASE-T Ethernet",
    913 	  WM_T_82544,		WMP_F_COPPER },
    914 
    915 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    916 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    917 	  WM_T_82544,		WMP_F_COPPER },
    918 
    919 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    920 	  "Intel i82540EM 1000BASE-T Ethernet",
    921 	  WM_T_82540,		WMP_F_COPPER },
    922 
    923 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    924 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    925 	  WM_T_82540,		WMP_F_COPPER },
    926 
    927 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    928 	  "Intel i82540EP 1000BASE-T Ethernet",
    929 	  WM_T_82540,		WMP_F_COPPER },
    930 
    931 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    932 	  "Intel i82540EP 1000BASE-T Ethernet",
    933 	  WM_T_82540,		WMP_F_COPPER },
    934 
    935 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    936 	  "Intel i82540EP 1000BASE-T Ethernet",
    937 	  WM_T_82540,		WMP_F_COPPER },
    938 
    939 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    940 	  "Intel i82545EM 1000BASE-T Ethernet",
    941 	  WM_T_82545,		WMP_F_COPPER },
    942 
    943 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    944 	  "Intel i82545GM 1000BASE-T Ethernet",
    945 	  WM_T_82545_3,		WMP_F_COPPER },
    946 
    947 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    948 	  "Intel i82545GM 1000BASE-X Ethernet",
    949 	  WM_T_82545_3,		WMP_F_FIBER },
    950 
    951 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    952 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    953 	  WM_T_82545_3,		WMP_F_SERDES },
    954 
    955 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    956 	  "Intel i82546EB 1000BASE-T Ethernet",
    957 	  WM_T_82546,		WMP_F_COPPER },
    958 
    959 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    960 	  "Intel i82546EB 1000BASE-T Ethernet",
    961 	  WM_T_82546,		WMP_F_COPPER },
    962 
    963 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    964 	  "Intel i82545EM 1000BASE-X Ethernet",
    965 	  WM_T_82545,		WMP_F_FIBER },
    966 
    967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    968 	  "Intel i82546EB 1000BASE-X Ethernet",
    969 	  WM_T_82546,		WMP_F_FIBER },
    970 
    971 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    972 	  "Intel i82546GB 1000BASE-T Ethernet",
    973 	  WM_T_82546_3,		WMP_F_COPPER },
    974 
    975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    976 	  "Intel i82546GB 1000BASE-X Ethernet",
    977 	  WM_T_82546_3,		WMP_F_FIBER },
    978 
    979 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    980 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    981 	  WM_T_82546_3,		WMP_F_SERDES },
    982 
    983 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    984 	  "i82546GB quad-port Gigabit Ethernet",
    985 	  WM_T_82546_3,		WMP_F_COPPER },
    986 
    987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    988 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    989 	  WM_T_82546_3,		WMP_F_COPPER },
    990 
    991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    992 	  "Intel PRO/1000MT (82546GB)",
    993 	  WM_T_82546_3,		WMP_F_COPPER },
    994 
    995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    996 	  "Intel i82541EI 1000BASE-T Ethernet",
    997 	  WM_T_82541,		WMP_F_COPPER },
    998 
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1000 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1001 	  WM_T_82541,		WMP_F_COPPER },
   1002 
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1004 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1005 	  WM_T_82541,		WMP_F_COPPER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1008 	  "Intel i82541ER 1000BASE-T Ethernet",
   1009 	  WM_T_82541_2,		WMP_F_COPPER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1012 	  "Intel i82541GI 1000BASE-T Ethernet",
   1013 	  WM_T_82541_2,		WMP_F_COPPER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1016 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1017 	  WM_T_82541_2,		WMP_F_COPPER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1020 	  "Intel i82541PI 1000BASE-T Ethernet",
   1021 	  WM_T_82541_2,		WMP_F_COPPER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1024 	  "Intel i82547EI 1000BASE-T Ethernet",
   1025 	  WM_T_82547,		WMP_F_COPPER },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1028 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1029 	  WM_T_82547,		WMP_F_COPPER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1032 	  "Intel i82547GI 1000BASE-T Ethernet",
   1033 	  WM_T_82547_2,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1036 	  "Intel PRO/1000 PT (82571EB)",
   1037 	  WM_T_82571,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1040 	  "Intel PRO/1000 PF (82571EB)",
   1041 	  WM_T_82571,		WMP_F_FIBER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1044 	  "Intel PRO/1000 PB (82571EB)",
   1045 	  WM_T_82571,		WMP_F_SERDES },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1048 	  "Intel PRO/1000 QT (82571EB)",
   1049 	  WM_T_82571,		WMP_F_COPPER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1052 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1053 	  WM_T_82571,		WMP_F_COPPER, },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1056 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1057 	  WM_T_82571,		WMP_F_COPPER, },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1060 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1061 	  WM_T_82571,		WMP_F_SERDES, },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1064 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1065 	  WM_T_82571,		WMP_F_SERDES, },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1068 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1069 	  WM_T_82571,		WMP_F_FIBER, },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1072 	  "Intel i82572EI 1000baseT Ethernet",
   1073 	  WM_T_82572,		WMP_F_COPPER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1076 	  "Intel i82572EI 1000baseX Ethernet",
   1077 	  WM_T_82572,		WMP_F_FIBER },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1080 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1081 	  WM_T_82572,		WMP_F_SERDES },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1084 	  "Intel i82572EI 1000baseT Ethernet",
   1085 	  WM_T_82572,		WMP_F_COPPER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1088 	  "Intel i82573E",
   1089 	  WM_T_82573,		WMP_F_COPPER },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1092 	  "Intel i82573E IAMT",
   1093 	  WM_T_82573,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1096 	  "Intel i82573L Gigabit Ethernet",
   1097 	  WM_T_82573,		WMP_F_COPPER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1100 	  "Intel i82574L",
   1101 	  WM_T_82574,		WMP_F_COPPER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1104 	  "Intel i82574L",
   1105 	  WM_T_82574,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1108 	  "Intel i82583V",
   1109 	  WM_T_82583,		WMP_F_COPPER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1112 	  "i80003 dual 1000baseT Ethernet",
   1113 	  WM_T_80003,		WMP_F_COPPER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1116 	  "i80003 dual 1000baseX Ethernet",
   1117 	  WM_T_80003,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1120 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1121 	  WM_T_80003,		WMP_F_SERDES },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1124 	  "Intel i80003 1000baseT Ethernet",
   1125 	  WM_T_80003,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1128 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1129 	  WM_T_80003,		WMP_F_SERDES },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1132 	  "Intel i82801H (M_AMT) LAN Controller",
   1133 	  WM_T_ICH8,		WMP_F_COPPER },
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1135 	  "Intel i82801H (AMT) LAN Controller",
   1136 	  WM_T_ICH8,		WMP_F_COPPER },
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1138 	  "Intel i82801H LAN Controller",
   1139 	  WM_T_ICH8,		WMP_F_COPPER },
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1141 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1142 	  WM_T_ICH8,		WMP_F_COPPER },
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1144 	  "Intel i82801H (M) LAN Controller",
   1145 	  WM_T_ICH8,		WMP_F_COPPER },
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1147 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1148 	  WM_T_ICH8,		WMP_F_COPPER },
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1150 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1151 	  WM_T_ICH8,		WMP_F_COPPER },
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1153 	  "82567V-3 LAN Controller",
   1154 	  WM_T_ICH8,		WMP_F_COPPER },
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1156 	  "82801I (AMT) LAN Controller",
   1157 	  WM_T_ICH9,		WMP_F_COPPER },
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1159 	  "82801I 10/100 LAN Controller",
   1160 	  WM_T_ICH9,		WMP_F_COPPER },
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1162 	  "82801I (G) 10/100 LAN Controller",
   1163 	  WM_T_ICH9,		WMP_F_COPPER },
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1165 	  "82801I (GT) 10/100 LAN Controller",
   1166 	  WM_T_ICH9,		WMP_F_COPPER },
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1168 	  "82801I (C) LAN Controller",
   1169 	  WM_T_ICH9,		WMP_F_COPPER },
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1171 	  "82801I mobile LAN Controller",
   1172 	  WM_T_ICH9,		WMP_F_COPPER },
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1174 	  "82801I mobile (V) LAN Controller",
   1175 	  WM_T_ICH9,		WMP_F_COPPER },
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1177 	  "82801I mobile (AMT) LAN Controller",
   1178 	  WM_T_ICH9,		WMP_F_COPPER },
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1180 	  "82567LM-4 LAN Controller",
   1181 	  WM_T_ICH9,		WMP_F_COPPER },
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1183 	  "82567LM-2 LAN Controller",
   1184 	  WM_T_ICH10,		WMP_F_COPPER },
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1186 	  "82567LF-2 LAN Controller",
   1187 	  WM_T_ICH10,		WMP_F_COPPER },
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1189 	  "82567LM-3 LAN Controller",
   1190 	  WM_T_ICH10,		WMP_F_COPPER },
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1192 	  "82567LF-3 LAN Controller",
   1193 	  WM_T_ICH10,		WMP_F_COPPER },
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1195 	  "82567V-2 LAN Controller",
   1196 	  WM_T_ICH10,		WMP_F_COPPER },
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1198 	  "82567V-3? LAN Controller",
   1199 	  WM_T_ICH10,		WMP_F_COPPER },
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1201 	  "HANKSVILLE LAN Controller",
   1202 	  WM_T_ICH10,		WMP_F_COPPER },
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1204 	  "PCH LAN (82577LM) Controller",
   1205 	  WM_T_PCH,		WMP_F_COPPER },
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1207 	  "PCH LAN (82577LC) Controller",
   1208 	  WM_T_PCH,		WMP_F_COPPER },
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1210 	  "PCH LAN (82578DM) Controller",
   1211 	  WM_T_PCH,		WMP_F_COPPER },
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1213 	  "PCH LAN (82578DC) Controller",
   1214 	  WM_T_PCH,		WMP_F_COPPER },
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1216 	  "PCH2 LAN (82579LM) Controller",
   1217 	  WM_T_PCH2,		WMP_F_COPPER },
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1219 	  "PCH2 LAN (82579V) Controller",
   1220 	  WM_T_PCH2,		WMP_F_COPPER },
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1222 	  "82575EB dual-1000baseT Ethernet",
   1223 	  WM_T_82575,		WMP_F_COPPER },
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1225 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1226 	  WM_T_82575,		WMP_F_SERDES },
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1228 	  "82575GB quad-1000baseT Ethernet",
   1229 	  WM_T_82575,		WMP_F_COPPER },
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1231 	  "82575GB quad-1000baseT Ethernet (PM)",
   1232 	  WM_T_82575,		WMP_F_COPPER },
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1234 	  "82576 1000BaseT Ethernet",
   1235 	  WM_T_82576,		WMP_F_COPPER },
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1237 	  "82576 1000BaseX Ethernet",
   1238 	  WM_T_82576,		WMP_F_FIBER },
   1239 
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1241 	  "82576 gigabit Ethernet (SERDES)",
   1242 	  WM_T_82576,		WMP_F_SERDES },
   1243 
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1245 	  "82576 quad-1000BaseT Ethernet",
   1246 	  WM_T_82576,		WMP_F_COPPER },
   1247 
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1249 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1250 	  WM_T_82576,		WMP_F_COPPER },
   1251 
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1253 	  "82576 gigabit Ethernet",
   1254 	  WM_T_82576,		WMP_F_COPPER },
   1255 
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1257 	  "82576 gigabit Ethernet (SERDES)",
   1258 	  WM_T_82576,		WMP_F_SERDES },
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1260 	  "82576 quad-gigabit Ethernet (SERDES)",
   1261 	  WM_T_82576,		WMP_F_SERDES },
   1262 
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1264 	  "82580 1000BaseT Ethernet",
   1265 	  WM_T_82580,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1267 	  "82580 1000BaseX Ethernet",
   1268 	  WM_T_82580,		WMP_F_FIBER },
   1269 
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1271 	  "82580 1000BaseT Ethernet (SERDES)",
   1272 	  WM_T_82580,		WMP_F_SERDES },
   1273 
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1275 	  "82580 gigabit Ethernet (SGMII)",
   1276 	  WM_T_82580,		WMP_F_COPPER },
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1278 	  "82580 dual-1000BaseT Ethernet",
   1279 	  WM_T_82580,		WMP_F_COPPER },
   1280 
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1282 	  "82580 quad-1000BaseX Ethernet",
   1283 	  WM_T_82580,		WMP_F_FIBER },
   1284 
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1286 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1287 	  WM_T_82580,		WMP_F_COPPER },
   1288 
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1290 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1291 	  WM_T_82580,		WMP_F_SERDES },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1294 	  "DH89XXCC 1000BASE-KX Ethernet",
   1295 	  WM_T_82580,		WMP_F_SERDES },
   1296 
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1298 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1299 	  WM_T_82580,		WMP_F_SERDES },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1302 	  "I350 Gigabit Network Connection",
   1303 	  WM_T_I350,		WMP_F_COPPER },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1306 	  "I350 Gigabit Fiber Network Connection",
   1307 	  WM_T_I350,		WMP_F_FIBER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1310 	  "I350 Gigabit Backplane Connection",
   1311 	  WM_T_I350,		WMP_F_SERDES },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1314 	  "I350 Quad Port Gigabit Ethernet",
   1315 	  WM_T_I350,		WMP_F_SERDES },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1318 	  "I350 Gigabit Connection",
   1319 	  WM_T_I350,		WMP_F_COPPER },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1322 	  "I354 Gigabit Ethernet (KX)",
   1323 	  WM_T_I354,		WMP_F_SERDES },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1326 	  "I354 Gigabit Ethernet (SGMII)",
   1327 	  WM_T_I354,		WMP_F_COPPER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1330 	  "I354 Gigabit Ethernet (2.5G)",
   1331 	  WM_T_I354,		WMP_F_COPPER },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1334 	  "I210-T1 Ethernet Server Adapter",
   1335 	  WM_T_I210,		WMP_F_COPPER },
   1336 
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1338 	  "I210 Ethernet (Copper OEM)",
   1339 	  WM_T_I210,		WMP_F_COPPER },
   1340 
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1342 	  "I210 Ethernet (Copper IT)",
   1343 	  WM_T_I210,		WMP_F_COPPER },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1346 	  "I210 Ethernet (FLASH less)",
   1347 	  WM_T_I210,		WMP_F_COPPER },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1350 	  "I210 Gigabit Ethernet (Fiber)",
   1351 	  WM_T_I210,		WMP_F_FIBER },
   1352 
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1354 	  "I210 Gigabit Ethernet (SERDES)",
   1355 	  WM_T_I210,		WMP_F_SERDES },
   1356 
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1358 	  "I210 Gigabit Ethernet (FLASH less)",
   1359 	  WM_T_I210,		WMP_F_SERDES },
   1360 
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1362 	  "I210 Gigabit Ethernet (SGMII)",
   1363 	  WM_T_I210,		WMP_F_COPPER },
   1364 
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1366 	  "I211 Ethernet (COPPER)",
   1367 	  WM_T_I211,		WMP_F_COPPER },
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1369 	  "I217 V Ethernet Connection",
   1370 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1372 	  "I217 LM Ethernet Connection",
   1373 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1375 	  "I218 V Ethernet Connection",
   1376 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1378 	  "I218 V Ethernet Connection",
   1379 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1381 	  "I218 V Ethernet Connection",
   1382 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1384 	  "I218 LM Ethernet Connection",
   1385 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1387 	  "I218 LM Ethernet Connection",
   1388 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1390 	  "I218 LM Ethernet Connection",
   1391 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1392 #if 0
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1394 	  "I219 V Ethernet Connection",
   1395 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1397 	  "I219 V Ethernet Connection",
   1398 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1400 	  "I219 V Ethernet Connection",
   1401 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1403 	  "I219 V Ethernet Connection",
   1404 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1406 	  "I219 LM Ethernet Connection",
   1407 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1409 	  "I219 LM Ethernet Connection",
   1410 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1412 	  "I219 LM Ethernet Connection",
   1413 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1415 	  "I219 LM Ethernet Connection",
   1416 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1418 	  "I219 LM Ethernet Connection",
   1419 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1420 #endif
   1421 	{ 0,			0,
   1422 	  NULL,
   1423 	  0,			0 },
   1424 };
   1425 
   1426 /*
   1427  * Register read/write functions.
   1428  * Other than CSR_{READ|WRITE}().
   1429  */
   1430 
   1431 #if 0 /* Not currently used */
   1432 static inline uint32_t
   1433 wm_io_read(struct wm_softc *sc, int reg)
   1434 {
   1435 
   1436 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1437 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1438 }
   1439 #endif
   1440 
   1441 static inline void
   1442 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1443 {
   1444 
   1445 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1446 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1447 }
   1448 
   1449 static inline void
   1450 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1451     uint32_t data)
   1452 {
   1453 	uint32_t regval;
   1454 	int i;
   1455 
   1456 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1457 
   1458 	CSR_WRITE(sc, reg, regval);
   1459 
   1460 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1461 		delay(5);
   1462 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1463 			break;
   1464 	}
   1465 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1466 		aprint_error("%s: WARNING:"
   1467 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1468 		    device_xname(sc->sc_dev), reg);
   1469 	}
   1470 }
   1471 
   1472 static inline void
   1473 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1474 {
   1475 	wa->wa_low = htole32(v & 0xffffffffU);
   1476 	if (sizeof(bus_addr_t) == 8)
   1477 		wa->wa_high = htole32((uint64_t) v >> 32);
   1478 	else
   1479 		wa->wa_high = 0;
   1480 }
   1481 
   1482 /*
   1483  * Descriptor sync/init functions.
   1484  */
   1485 static inline void
   1486 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1487 {
   1488 	struct wm_softc *sc = txq->txq_sc;
   1489 
   1490 	/* If it will wrap around, sync to the end of the ring. */
   1491 	if ((start + num) > WM_NTXDESC(txq)) {
   1492 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1493 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1494 		    (WM_NTXDESC(txq) - start), ops);
   1495 		num -= (WM_NTXDESC(txq) - start);
   1496 		start = 0;
   1497 	}
   1498 
   1499 	/* Now sync whatever is left. */
   1500 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1501 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1502 }
   1503 
   1504 static inline void
   1505 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1506 {
   1507 	struct wm_softc *sc = rxq->rxq_sc;
   1508 
   1509 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1510 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1511 }
   1512 
   1513 static inline void
   1514 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1515 {
   1516 	struct wm_softc *sc = rxq->rxq_sc;
   1517 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1518 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1519 	struct mbuf *m = rxs->rxs_mbuf;
   1520 
   1521 	/*
   1522 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1523 	 * so that the payload after the Ethernet header is aligned
   1524 	 * to a 4-byte boundary.
   1525 
   1526 	 * XXX BRAINDAMAGE ALERT!
   1527 	 * The stupid chip uses the same size for every buffer, which
   1528 	 * is set in the Receive Control register.  We are using the 2K
   1529 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1530 	 * reason, we can't "scoot" packets longer than the standard
   1531 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1532 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1533 	 * the upper layer copy the headers.
   1534 	 */
   1535 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1536 
   1537 	wm_set_dma_addr(&rxd->wrx_addr,
   1538 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1539 	rxd->wrx_len = 0;
   1540 	rxd->wrx_cksum = 0;
   1541 	rxd->wrx_status = 0;
   1542 	rxd->wrx_errors = 0;
   1543 	rxd->wrx_special = 0;
   1544 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1545 
   1546 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1547 }
   1548 
   1549 /*
   1550  * Device driver interface functions and commonly used functions.
   1551  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1552  */
   1553 
   1554 /* Lookup supported device table */
   1555 static const struct wm_product *
   1556 wm_lookup(const struct pci_attach_args *pa)
   1557 {
   1558 	const struct wm_product *wmp;
   1559 
   1560 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1561 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1562 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1563 			return wmp;
   1564 	}
   1565 	return NULL;
   1566 }
   1567 
   1568 /* The match function (ca_match) */
   1569 static int
   1570 wm_match(device_t parent, cfdata_t cf, void *aux)
   1571 {
   1572 	struct pci_attach_args *pa = aux;
   1573 
   1574 	if (wm_lookup(pa) != NULL)
   1575 		return 1;
   1576 
   1577 	return 0;
   1578 }
   1579 
   1580 /* The attach function (ca_attach) */
   1581 static void
   1582 wm_attach(device_t parent, device_t self, void *aux)
   1583 {
   1584 	struct wm_softc *sc = device_private(self);
   1585 	struct pci_attach_args *pa = aux;
   1586 	prop_dictionary_t dict;
   1587 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1588 	pci_chipset_tag_t pc = pa->pa_pc;
   1589 	int counts[PCI_INTR_TYPE_SIZE];
   1590 	pci_intr_type_t max_type;
   1591 	const char *eetype, *xname;
   1592 	bus_space_tag_t memt;
   1593 	bus_space_handle_t memh;
   1594 	bus_size_t memsize;
   1595 	int memh_valid;
   1596 	int i, error;
   1597 	const struct wm_product *wmp;
   1598 	prop_data_t ea;
   1599 	prop_number_t pn;
   1600 	uint8_t enaddr[ETHER_ADDR_LEN];
   1601 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1602 	pcireg_t preg, memtype;
   1603 	uint16_t eeprom_data, apme_mask;
   1604 	bool force_clear_smbi;
   1605 	uint32_t link_mode;
   1606 	uint32_t reg;
   1607 
   1608 	sc->sc_dev = self;
   1609 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1610 	sc->sc_core_stopping = false;
   1611 
   1612 	wmp = wm_lookup(pa);
   1613 #ifdef DIAGNOSTIC
   1614 	if (wmp == NULL) {
   1615 		printf("\n");
   1616 		panic("wm_attach: impossible");
   1617 	}
   1618 #endif
   1619 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1620 
   1621 	sc->sc_pc = pa->pa_pc;
   1622 	sc->sc_pcitag = pa->pa_tag;
   1623 
   1624 	if (pci_dma64_available(pa))
   1625 		sc->sc_dmat = pa->pa_dmat64;
   1626 	else
   1627 		sc->sc_dmat = pa->pa_dmat;
   1628 
   1629 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1630 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1631 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1632 
   1633 	sc->sc_type = wmp->wmp_type;
   1634 
   1635 	/* Set default function pointers */
   1636 	sc->phy.acquire = wm_get_null;
   1637 	sc->phy.release = wm_put_null;
   1638 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1639 
   1640 	if (sc->sc_type < WM_T_82543) {
   1641 		if (sc->sc_rev < 2) {
   1642 			aprint_error_dev(sc->sc_dev,
   1643 			    "i82542 must be at least rev. 2\n");
   1644 			return;
   1645 		}
   1646 		if (sc->sc_rev < 3)
   1647 			sc->sc_type = WM_T_82542_2_0;
   1648 	}
   1649 
   1650 	/*
   1651 	 * Disable MSI for Errata:
   1652 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1653 	 *
   1654 	 *  82544: Errata 25
   1655 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1656 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1657 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1658 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1659 	 *
   1660 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1661 	 *
   1662 	 *  82571 & 82572: Errata 63
   1663 	 */
   1664 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1665 	    || (sc->sc_type == WM_T_82572))
   1666 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1667 
   1668 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1669 	    || (sc->sc_type == WM_T_82580)
   1670 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1671 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1672 		sc->sc_flags |= WM_F_NEWQUEUE;
   1673 
   1674 	/* Set device properties (mactype) */
   1675 	dict = device_properties(sc->sc_dev);
   1676 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1677 
   1678 	/*
   1679 	 * Map the device.  All devices support memory-mapped acccess,
   1680 	 * and it is really required for normal operation.
   1681 	 */
   1682 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1683 	switch (memtype) {
   1684 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1685 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1686 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1687 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1688 		break;
   1689 	default:
   1690 		memh_valid = 0;
   1691 		break;
   1692 	}
   1693 
   1694 	if (memh_valid) {
   1695 		sc->sc_st = memt;
   1696 		sc->sc_sh = memh;
   1697 		sc->sc_ss = memsize;
   1698 	} else {
   1699 		aprint_error_dev(sc->sc_dev,
   1700 		    "unable to map device registers\n");
   1701 		return;
   1702 	}
   1703 
   1704 	/*
   1705 	 * In addition, i82544 and later support I/O mapped indirect
   1706 	 * register access.  It is not desirable (nor supported in
   1707 	 * this driver) to use it for normal operation, though it is
   1708 	 * required to work around bugs in some chip versions.
   1709 	 */
   1710 	if (sc->sc_type >= WM_T_82544) {
   1711 		/* First we have to find the I/O BAR. */
   1712 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1713 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1714 			if (memtype == PCI_MAPREG_TYPE_IO)
   1715 				break;
   1716 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1717 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1718 				i += 4;	/* skip high bits, too */
   1719 		}
   1720 		if (i < PCI_MAPREG_END) {
   1721 			/*
   1722 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1723 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1724 			 * It's no problem because newer chips has no this
   1725 			 * bug.
   1726 			 *
   1727 			 * The i8254x doesn't apparently respond when the
   1728 			 * I/O BAR is 0, which looks somewhat like it's not
   1729 			 * been configured.
   1730 			 */
   1731 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1732 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1733 				aprint_error_dev(sc->sc_dev,
   1734 				    "WARNING: I/O BAR at zero.\n");
   1735 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1736 					0, &sc->sc_iot, &sc->sc_ioh,
   1737 					NULL, &sc->sc_ios) == 0) {
   1738 				sc->sc_flags |= WM_F_IOH_VALID;
   1739 			} else {
   1740 				aprint_error_dev(sc->sc_dev,
   1741 				    "WARNING: unable to map I/O space\n");
   1742 			}
   1743 		}
   1744 
   1745 	}
   1746 
   1747 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1748 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1749 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1750 	if (sc->sc_type < WM_T_82542_2_1)
   1751 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1752 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1753 
   1754 	/* power up chip */
   1755 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1756 	    NULL)) && error != EOPNOTSUPP) {
   1757 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1758 		return;
   1759 	}
   1760 
   1761 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1762 
   1763 	/* Allocation settings */
   1764 	max_type = PCI_INTR_TYPE_MSIX;
   1765 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1766 	counts[PCI_INTR_TYPE_MSI] = 1;
   1767 	counts[PCI_INTR_TYPE_INTX] = 1;
   1768 
   1769 alloc_retry:
   1770 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1771 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1772 		return;
   1773 	}
   1774 
   1775 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1776 		error = wm_setup_msix(sc);
   1777 		if (error) {
   1778 			pci_intr_release(pc, sc->sc_intrs,
   1779 			    counts[PCI_INTR_TYPE_MSIX]);
   1780 
   1781 			/* Setup for MSI: Disable MSI-X */
   1782 			max_type = PCI_INTR_TYPE_MSI;
   1783 			counts[PCI_INTR_TYPE_MSI] = 1;
   1784 			counts[PCI_INTR_TYPE_INTX] = 1;
   1785 			goto alloc_retry;
   1786 		}
   1787 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1788 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1789 		error = wm_setup_legacy(sc);
   1790 		if (error) {
   1791 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1792 			    counts[PCI_INTR_TYPE_MSI]);
   1793 
   1794 			/* The next try is for INTx: Disable MSI */
   1795 			max_type = PCI_INTR_TYPE_INTX;
   1796 			counts[PCI_INTR_TYPE_INTX] = 1;
   1797 			goto alloc_retry;
   1798 		}
   1799 	} else {
   1800 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1801 		error = wm_setup_legacy(sc);
   1802 		if (error) {
   1803 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1804 			    counts[PCI_INTR_TYPE_INTX]);
   1805 			return;
   1806 		}
   1807 	}
   1808 
   1809 	/*
   1810 	 * Check the function ID (unit number of the chip).
   1811 	 */
   1812 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1813 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1814 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1815 	    || (sc->sc_type == WM_T_82580)
   1816 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1817 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1818 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1819 	else
   1820 		sc->sc_funcid = 0;
   1821 
   1822 	/*
   1823 	 * Determine a few things about the bus we're connected to.
   1824 	 */
   1825 	if (sc->sc_type < WM_T_82543) {
   1826 		/* We don't really know the bus characteristics here. */
   1827 		sc->sc_bus_speed = 33;
   1828 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1829 		/*
   1830 		 * CSA (Communication Streaming Architecture) is about as fast
   1831 		 * a 32-bit 66MHz PCI Bus.
   1832 		 */
   1833 		sc->sc_flags |= WM_F_CSA;
   1834 		sc->sc_bus_speed = 66;
   1835 		aprint_verbose_dev(sc->sc_dev,
   1836 		    "Communication Streaming Architecture\n");
   1837 		if (sc->sc_type == WM_T_82547) {
   1838 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1839 			callout_setfunc(&sc->sc_txfifo_ch,
   1840 					wm_82547_txfifo_stall, sc);
   1841 			aprint_verbose_dev(sc->sc_dev,
   1842 			    "using 82547 Tx FIFO stall work-around\n");
   1843 		}
   1844 	} else if (sc->sc_type >= WM_T_82571) {
   1845 		sc->sc_flags |= WM_F_PCIE;
   1846 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1847 		    && (sc->sc_type != WM_T_ICH10)
   1848 		    && (sc->sc_type != WM_T_PCH)
   1849 		    && (sc->sc_type != WM_T_PCH2)
   1850 		    && (sc->sc_type != WM_T_PCH_LPT)
   1851 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1852 			/* ICH* and PCH* have no PCIe capability registers */
   1853 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1854 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1855 				NULL) == 0)
   1856 				aprint_error_dev(sc->sc_dev,
   1857 				    "unable to find PCIe capability\n");
   1858 		}
   1859 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1860 	} else {
   1861 		reg = CSR_READ(sc, WMREG_STATUS);
   1862 		if (reg & STATUS_BUS64)
   1863 			sc->sc_flags |= WM_F_BUS64;
   1864 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1865 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1866 
   1867 			sc->sc_flags |= WM_F_PCIX;
   1868 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1869 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1870 				aprint_error_dev(sc->sc_dev,
   1871 				    "unable to find PCIX capability\n");
   1872 			else if (sc->sc_type != WM_T_82545_3 &&
   1873 				 sc->sc_type != WM_T_82546_3) {
   1874 				/*
   1875 				 * Work around a problem caused by the BIOS
   1876 				 * setting the max memory read byte count
   1877 				 * incorrectly.
   1878 				 */
   1879 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1880 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1881 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1882 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1883 
   1884 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1885 				    PCIX_CMD_BYTECNT_SHIFT;
   1886 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1887 				    PCIX_STATUS_MAXB_SHIFT;
   1888 				if (bytecnt > maxb) {
   1889 					aprint_verbose_dev(sc->sc_dev,
   1890 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1891 					    512 << bytecnt, 512 << maxb);
   1892 					pcix_cmd = (pcix_cmd &
   1893 					    ~PCIX_CMD_BYTECNT_MASK) |
   1894 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1895 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1896 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1897 					    pcix_cmd);
   1898 				}
   1899 			}
   1900 		}
   1901 		/*
   1902 		 * The quad port adapter is special; it has a PCIX-PCIX
   1903 		 * bridge on the board, and can run the secondary bus at
   1904 		 * a higher speed.
   1905 		 */
   1906 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1907 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1908 								      : 66;
   1909 		} else if (sc->sc_flags & WM_F_PCIX) {
   1910 			switch (reg & STATUS_PCIXSPD_MASK) {
   1911 			case STATUS_PCIXSPD_50_66:
   1912 				sc->sc_bus_speed = 66;
   1913 				break;
   1914 			case STATUS_PCIXSPD_66_100:
   1915 				sc->sc_bus_speed = 100;
   1916 				break;
   1917 			case STATUS_PCIXSPD_100_133:
   1918 				sc->sc_bus_speed = 133;
   1919 				break;
   1920 			default:
   1921 				aprint_error_dev(sc->sc_dev,
   1922 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1923 				    reg & STATUS_PCIXSPD_MASK);
   1924 				sc->sc_bus_speed = 66;
   1925 				break;
   1926 			}
   1927 		} else
   1928 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1929 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1930 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1931 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1932 	}
   1933 
   1934 	/* clear interesting stat counters */
   1935 	CSR_READ(sc, WMREG_COLC);
   1936 	CSR_READ(sc, WMREG_RXERRC);
   1937 
   1938 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   1939 	    || (sc->sc_type >= WM_T_ICH8))
   1940 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1941 	if (sc->sc_type >= WM_T_ICH8)
   1942 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1943 
   1944 	/* Set PHY, NVM mutex related stuff */
   1945 	switch (sc->sc_type) {
   1946 	case WM_T_82542_2_0:
   1947 	case WM_T_82542_2_1:
   1948 	case WM_T_82543:
   1949 	case WM_T_82544:
   1950 		/* Microwire */
   1951 		sc->sc_nvm_wordsize = 64;
   1952 		sc->sc_nvm_addrbits = 6;
   1953 		break;
   1954 	case WM_T_82540:
   1955 	case WM_T_82545:
   1956 	case WM_T_82545_3:
   1957 	case WM_T_82546:
   1958 	case WM_T_82546_3:
   1959 		/* Microwire */
   1960 		reg = CSR_READ(sc, WMREG_EECD);
   1961 		if (reg & EECD_EE_SIZE) {
   1962 			sc->sc_nvm_wordsize = 256;
   1963 			sc->sc_nvm_addrbits = 8;
   1964 		} else {
   1965 			sc->sc_nvm_wordsize = 64;
   1966 			sc->sc_nvm_addrbits = 6;
   1967 		}
   1968 		sc->sc_flags |= WM_F_LOCK_EECD;
   1969 		break;
   1970 	case WM_T_82541:
   1971 	case WM_T_82541_2:
   1972 	case WM_T_82547:
   1973 	case WM_T_82547_2:
   1974 		sc->sc_flags |= WM_F_LOCK_EECD;
   1975 		reg = CSR_READ(sc, WMREG_EECD);
   1976 		if (reg & EECD_EE_TYPE) {
   1977 			/* SPI */
   1978 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1979 			wm_nvm_set_addrbits_size_eecd(sc);
   1980 		} else {
   1981 			/* Microwire */
   1982 			if ((reg & EECD_EE_ABITS) != 0) {
   1983 				sc->sc_nvm_wordsize = 256;
   1984 				sc->sc_nvm_addrbits = 8;
   1985 			} else {
   1986 				sc->sc_nvm_wordsize = 64;
   1987 				sc->sc_nvm_addrbits = 6;
   1988 			}
   1989 		}
   1990 		break;
   1991 	case WM_T_82571:
   1992 	case WM_T_82572:
   1993 		/* SPI */
   1994 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1995 		wm_nvm_set_addrbits_size_eecd(sc);
   1996 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1997 		sc->phy.acquire = wm_get_swsm_semaphore;
   1998 		sc->phy.release = wm_put_swsm_semaphore;
   1999 		break;
   2000 	case WM_T_82573:
   2001 	case WM_T_82574:
   2002 	case WM_T_82583:
   2003 		if (sc->sc_type == WM_T_82573) {
   2004 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2005 			sc->phy.acquire = wm_get_swsm_semaphore;
   2006 			sc->phy.release = wm_put_swsm_semaphore;
   2007 		} else {
   2008 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2009 			/* Both PHY and NVM use the same semaphore. */
   2010 			sc->phy.acquire
   2011 			    = wm_get_swfwhw_semaphore;
   2012 			sc->phy.release
   2013 			    = wm_put_swfwhw_semaphore;
   2014 		}
   2015 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2016 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2017 			sc->sc_nvm_wordsize = 2048;
   2018 		} else {
   2019 			/* SPI */
   2020 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2021 			wm_nvm_set_addrbits_size_eecd(sc);
   2022 		}
   2023 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2024 		break;
   2025 	case WM_T_82575:
   2026 	case WM_T_82576:
   2027 	case WM_T_82580:
   2028 	case WM_T_I350:
   2029 	case WM_T_I354:
   2030 	case WM_T_80003:
   2031 		/* SPI */
   2032 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2033 		wm_nvm_set_addrbits_size_eecd(sc);
   2034 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2035 		    | WM_F_LOCK_SWSM;
   2036 		sc->phy.acquire = wm_get_phy_82575;
   2037 		sc->phy.release = wm_put_phy_82575;
   2038 		break;
   2039 	case WM_T_ICH8:
   2040 	case WM_T_ICH9:
   2041 	case WM_T_ICH10:
   2042 	case WM_T_PCH:
   2043 	case WM_T_PCH2:
   2044 	case WM_T_PCH_LPT:
   2045 		/* FLASH */
   2046 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2047 		sc->sc_nvm_wordsize = 2048;
   2048 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2049 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2050 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2051 			aprint_error_dev(sc->sc_dev,
   2052 			    "can't map FLASH registers\n");
   2053 			goto out;
   2054 		}
   2055 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2056 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2057 		    ICH_FLASH_SECTOR_SIZE;
   2058 		sc->sc_ich8_flash_bank_size =
   2059 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2060 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2061 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2062 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2063 		sc->sc_flashreg_offset = 0;
   2064 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2065 		sc->phy.release = wm_put_swflag_ich8lan;
   2066 		break;
   2067 	case WM_T_PCH_SPT:
   2068 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2069 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2070 		sc->sc_flasht = sc->sc_st;
   2071 		sc->sc_flashh = sc->sc_sh;
   2072 		sc->sc_ich8_flash_base = 0;
   2073 		sc->sc_nvm_wordsize =
   2074 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2075 			* NVM_SIZE_MULTIPLIER;
   2076 		/* It is size in bytes, we want words */
   2077 		sc->sc_nvm_wordsize /= 2;
   2078 		/* assume 2 banks */
   2079 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2080 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2081 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2082 		sc->phy.release = wm_put_swflag_ich8lan;
   2083 		break;
   2084 	case WM_T_I210:
   2085 	case WM_T_I211:
   2086 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2087 			wm_nvm_set_addrbits_size_eecd(sc);
   2088 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2089 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2090 		} else {
   2091 			sc->sc_nvm_wordsize = INVM_SIZE;
   2092 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2093 		}
   2094 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2095 		sc->phy.acquire = wm_get_phy_82575;
   2096 		sc->phy.release = wm_put_phy_82575;
   2097 		break;
   2098 	default:
   2099 		break;
   2100 	}
   2101 
   2102 	/* Reset the chip to a known state. */
   2103 	wm_reset(sc);
   2104 
   2105 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2106 	switch (sc->sc_type) {
   2107 	case WM_T_82571:
   2108 	case WM_T_82572:
   2109 		reg = CSR_READ(sc, WMREG_SWSM2);
   2110 		if ((reg & SWSM2_LOCK) == 0) {
   2111 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2112 			force_clear_smbi = true;
   2113 		} else
   2114 			force_clear_smbi = false;
   2115 		break;
   2116 	case WM_T_82573:
   2117 	case WM_T_82574:
   2118 	case WM_T_82583:
   2119 		force_clear_smbi = true;
   2120 		break;
   2121 	default:
   2122 		force_clear_smbi = false;
   2123 		break;
   2124 	}
   2125 	if (force_clear_smbi) {
   2126 		reg = CSR_READ(sc, WMREG_SWSM);
   2127 		if ((reg & SWSM_SMBI) != 0)
   2128 			aprint_error_dev(sc->sc_dev,
   2129 			    "Please update the Bootagent\n");
   2130 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2131 	}
   2132 
   2133 	/*
   2134 	 * Defer printing the EEPROM type until after verifying the checksum
   2135 	 * This allows the EEPROM type to be printed correctly in the case
   2136 	 * that no EEPROM is attached.
   2137 	 */
   2138 	/*
   2139 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2140 	 * this for later, so we can fail future reads from the EEPROM.
   2141 	 */
   2142 	if (wm_nvm_validate_checksum(sc)) {
   2143 		/*
   2144 		 * Read twice again because some PCI-e parts fail the
   2145 		 * first check due to the link being in sleep state.
   2146 		 */
   2147 		if (wm_nvm_validate_checksum(sc))
   2148 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2149 	}
   2150 
   2151 	/* Set device properties (macflags) */
   2152 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2153 
   2154 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2155 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2156 	else {
   2157 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2158 		    sc->sc_nvm_wordsize);
   2159 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2160 			aprint_verbose("iNVM");
   2161 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2162 			aprint_verbose("FLASH(HW)");
   2163 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2164 			aprint_verbose("FLASH");
   2165 		else {
   2166 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2167 				eetype = "SPI";
   2168 			else
   2169 				eetype = "MicroWire";
   2170 			aprint_verbose("(%d address bits) %s EEPROM",
   2171 			    sc->sc_nvm_addrbits, eetype);
   2172 		}
   2173 	}
   2174 	wm_nvm_version(sc);
   2175 	aprint_verbose("\n");
   2176 
   2177 	/* Check for I21[01] PLL workaround */
   2178 	if (sc->sc_type == WM_T_I210)
   2179 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2180 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2181 		/* NVM image release 3.25 has a workaround */
   2182 		if ((sc->sc_nvm_ver_major < 3)
   2183 		    || ((sc->sc_nvm_ver_major == 3)
   2184 			&& (sc->sc_nvm_ver_minor < 25))) {
   2185 			aprint_verbose_dev(sc->sc_dev,
   2186 			    "ROM image version %d.%d is older than 3.25\n",
   2187 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2188 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2189 		}
   2190 	}
   2191 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2192 		wm_pll_workaround_i210(sc);
   2193 
   2194 	wm_get_wakeup(sc);
   2195 
   2196 	/* Non-AMT based hardware can now take control from firmware */
   2197 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2198 		wm_get_hw_control(sc);
   2199 
   2200 	/*
   2201 	 * Read the Ethernet address from the EEPROM, if not first found
   2202 	 * in device properties.
   2203 	 */
   2204 	ea = prop_dictionary_get(dict, "mac-address");
   2205 	if (ea != NULL) {
   2206 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2207 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2208 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2209 	} else {
   2210 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2211 			aprint_error_dev(sc->sc_dev,
   2212 			    "unable to read Ethernet address\n");
   2213 			goto out;
   2214 		}
   2215 	}
   2216 
   2217 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2218 	    ether_sprintf(enaddr));
   2219 
   2220 	/*
   2221 	 * Read the config info from the EEPROM, and set up various
   2222 	 * bits in the control registers based on their contents.
   2223 	 */
   2224 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2225 	if (pn != NULL) {
   2226 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2227 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2228 	} else {
   2229 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2230 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2231 			goto out;
   2232 		}
   2233 	}
   2234 
   2235 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2236 	if (pn != NULL) {
   2237 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2238 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2239 	} else {
   2240 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2241 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2242 			goto out;
   2243 		}
   2244 	}
   2245 
   2246 	/* check for WM_F_WOL */
   2247 	switch (sc->sc_type) {
   2248 	case WM_T_82542_2_0:
   2249 	case WM_T_82542_2_1:
   2250 	case WM_T_82543:
   2251 		/* dummy? */
   2252 		eeprom_data = 0;
   2253 		apme_mask = NVM_CFG3_APME;
   2254 		break;
   2255 	case WM_T_82544:
   2256 		apme_mask = NVM_CFG2_82544_APM_EN;
   2257 		eeprom_data = cfg2;
   2258 		break;
   2259 	case WM_T_82546:
   2260 	case WM_T_82546_3:
   2261 	case WM_T_82571:
   2262 	case WM_T_82572:
   2263 	case WM_T_82573:
   2264 	case WM_T_82574:
   2265 	case WM_T_82583:
   2266 	case WM_T_80003:
   2267 	default:
   2268 		apme_mask = NVM_CFG3_APME;
   2269 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2270 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2271 		break;
   2272 	case WM_T_82575:
   2273 	case WM_T_82576:
   2274 	case WM_T_82580:
   2275 	case WM_T_I350:
   2276 	case WM_T_I354: /* XXX ok? */
   2277 	case WM_T_ICH8:
   2278 	case WM_T_ICH9:
   2279 	case WM_T_ICH10:
   2280 	case WM_T_PCH:
   2281 	case WM_T_PCH2:
   2282 	case WM_T_PCH_LPT:
   2283 	case WM_T_PCH_SPT:
   2284 		/* XXX The funcid should be checked on some devices */
   2285 		apme_mask = WUC_APME;
   2286 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2287 		break;
   2288 	}
   2289 
   2290 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2291 	if ((eeprom_data & apme_mask) != 0)
   2292 		sc->sc_flags |= WM_F_WOL;
   2293 #ifdef WM_DEBUG
   2294 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2295 		printf("WOL\n");
   2296 #endif
   2297 
   2298 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2299 		/* Check NVM for autonegotiation */
   2300 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2301 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2302 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2303 		}
   2304 	}
   2305 
   2306 	/*
   2307 	 * XXX need special handling for some multiple port cards
   2308 	 * to disable a paticular port.
   2309 	 */
   2310 
   2311 	if (sc->sc_type >= WM_T_82544) {
   2312 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2313 		if (pn != NULL) {
   2314 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2315 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2316 		} else {
   2317 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2318 				aprint_error_dev(sc->sc_dev,
   2319 				    "unable to read SWDPIN\n");
   2320 				goto out;
   2321 			}
   2322 		}
   2323 	}
   2324 
   2325 	if (cfg1 & NVM_CFG1_ILOS)
   2326 		sc->sc_ctrl |= CTRL_ILOS;
   2327 
   2328 	/*
   2329 	 * XXX
   2330 	 * This code isn't correct because pin 2 and 3 are located
   2331 	 * in different position on newer chips. Check all datasheet.
   2332 	 *
   2333 	 * Until resolve this problem, check if a chip < 82580
   2334 	 */
   2335 	if (sc->sc_type <= WM_T_82580) {
   2336 		if (sc->sc_type >= WM_T_82544) {
   2337 			sc->sc_ctrl |=
   2338 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2339 			    CTRL_SWDPIO_SHIFT;
   2340 			sc->sc_ctrl |=
   2341 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2342 			    CTRL_SWDPINS_SHIFT;
   2343 		} else {
   2344 			sc->sc_ctrl |=
   2345 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2346 			    CTRL_SWDPIO_SHIFT;
   2347 		}
   2348 	}
   2349 
   2350 	/* XXX For other than 82580? */
   2351 	if (sc->sc_type == WM_T_82580) {
   2352 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2353 		if (nvmword & __BIT(13))
   2354 			sc->sc_ctrl |= CTRL_ILOS;
   2355 	}
   2356 
   2357 #if 0
   2358 	if (sc->sc_type >= WM_T_82544) {
   2359 		if (cfg1 & NVM_CFG1_IPS0)
   2360 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2361 		if (cfg1 & NVM_CFG1_IPS1)
   2362 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2363 		sc->sc_ctrl_ext |=
   2364 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2365 		    CTRL_EXT_SWDPIO_SHIFT;
   2366 		sc->sc_ctrl_ext |=
   2367 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2368 		    CTRL_EXT_SWDPINS_SHIFT;
   2369 	} else {
   2370 		sc->sc_ctrl_ext |=
   2371 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2372 		    CTRL_EXT_SWDPIO_SHIFT;
   2373 	}
   2374 #endif
   2375 
   2376 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2377 #if 0
   2378 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2379 #endif
   2380 
   2381 	if (sc->sc_type == WM_T_PCH) {
   2382 		uint16_t val;
   2383 
   2384 		/* Save the NVM K1 bit setting */
   2385 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2386 
   2387 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2388 			sc->sc_nvm_k1_enabled = 1;
   2389 		else
   2390 			sc->sc_nvm_k1_enabled = 0;
   2391 	}
   2392 
   2393 	/*
   2394 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2395 	 * media structures accordingly.
   2396 	 */
   2397 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2398 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2399 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2400 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2401 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2402 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2403 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2404 	} else if (sc->sc_type < WM_T_82543 ||
   2405 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2406 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2407 			aprint_error_dev(sc->sc_dev,
   2408 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2409 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2410 		}
   2411 		wm_tbi_mediainit(sc);
   2412 	} else {
   2413 		switch (sc->sc_type) {
   2414 		case WM_T_82575:
   2415 		case WM_T_82576:
   2416 		case WM_T_82580:
   2417 		case WM_T_I350:
   2418 		case WM_T_I354:
   2419 		case WM_T_I210:
   2420 		case WM_T_I211:
   2421 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2422 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2423 			switch (link_mode) {
   2424 			case CTRL_EXT_LINK_MODE_1000KX:
   2425 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2426 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2427 				break;
   2428 			case CTRL_EXT_LINK_MODE_SGMII:
   2429 				if (wm_sgmii_uses_mdio(sc)) {
   2430 					aprint_verbose_dev(sc->sc_dev,
   2431 					    "SGMII(MDIO)\n");
   2432 					sc->sc_flags |= WM_F_SGMII;
   2433 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2434 					break;
   2435 				}
   2436 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2437 				/*FALLTHROUGH*/
   2438 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2439 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2440 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2441 					if (link_mode
   2442 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2443 						sc->sc_mediatype
   2444 						    = WM_MEDIATYPE_COPPER;
   2445 						sc->sc_flags |= WM_F_SGMII;
   2446 					} else {
   2447 						sc->sc_mediatype
   2448 						    = WM_MEDIATYPE_SERDES;
   2449 						aprint_verbose_dev(sc->sc_dev,
   2450 						    "SERDES\n");
   2451 					}
   2452 					break;
   2453 				}
   2454 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2455 					aprint_verbose_dev(sc->sc_dev,
   2456 					    "SERDES\n");
   2457 
   2458 				/* Change current link mode setting */
   2459 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2460 				switch (sc->sc_mediatype) {
   2461 				case WM_MEDIATYPE_COPPER:
   2462 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2463 					break;
   2464 				case WM_MEDIATYPE_SERDES:
   2465 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2466 					break;
   2467 				default:
   2468 					break;
   2469 				}
   2470 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2471 				break;
   2472 			case CTRL_EXT_LINK_MODE_GMII:
   2473 			default:
   2474 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2475 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2476 				break;
   2477 			}
   2478 
   2479 			reg &= ~CTRL_EXT_I2C_ENA;
   2480 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2481 				reg |= CTRL_EXT_I2C_ENA;
   2482 			else
   2483 				reg &= ~CTRL_EXT_I2C_ENA;
   2484 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2485 
   2486 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2487 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2488 			else
   2489 				wm_tbi_mediainit(sc);
   2490 			break;
   2491 		default:
   2492 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2493 				aprint_error_dev(sc->sc_dev,
   2494 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2495 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2496 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2497 		}
   2498 	}
   2499 
   2500 	ifp = &sc->sc_ethercom.ec_if;
   2501 	xname = device_xname(sc->sc_dev);
   2502 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2503 	ifp->if_softc = sc;
   2504 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2505 	ifp->if_extflags = IFEF_START_MPSAFE;
   2506 	ifp->if_ioctl = wm_ioctl;
   2507 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2508 		ifp->if_start = wm_nq_start;
   2509 		if (sc->sc_nqueues > 1)
   2510 			ifp->if_transmit = wm_nq_transmit;
   2511 	} else
   2512 		ifp->if_start = wm_start;
   2513 	ifp->if_watchdog = wm_watchdog;
   2514 	ifp->if_init = wm_init;
   2515 	ifp->if_stop = wm_stop;
   2516 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2517 	IFQ_SET_READY(&ifp->if_snd);
   2518 
   2519 	/* Check for jumbo frame */
   2520 	switch (sc->sc_type) {
   2521 	case WM_T_82573:
   2522 		/* XXX limited to 9234 if ASPM is disabled */
   2523 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2524 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2525 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2526 		break;
   2527 	case WM_T_82571:
   2528 	case WM_T_82572:
   2529 	case WM_T_82574:
   2530 	case WM_T_82575:
   2531 	case WM_T_82576:
   2532 	case WM_T_82580:
   2533 	case WM_T_I350:
   2534 	case WM_T_I354: /* XXXX ok? */
   2535 	case WM_T_I210:
   2536 	case WM_T_I211:
   2537 	case WM_T_80003:
   2538 	case WM_T_ICH9:
   2539 	case WM_T_ICH10:
   2540 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2541 	case WM_T_PCH_LPT:
   2542 	case WM_T_PCH_SPT:
   2543 		/* XXX limited to 9234 */
   2544 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2545 		break;
   2546 	case WM_T_PCH:
   2547 		/* XXX limited to 4096 */
   2548 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2549 		break;
   2550 	case WM_T_82542_2_0:
   2551 	case WM_T_82542_2_1:
   2552 	case WM_T_82583:
   2553 	case WM_T_ICH8:
   2554 		/* No support for jumbo frame */
   2555 		break;
   2556 	default:
   2557 		/* ETHER_MAX_LEN_JUMBO */
   2558 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2559 		break;
   2560 	}
   2561 
   2562 	/* If we're a i82543 or greater, we can support VLANs. */
   2563 	if (sc->sc_type >= WM_T_82543)
   2564 		sc->sc_ethercom.ec_capabilities |=
   2565 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2566 
   2567 	/*
   2568 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2569 	 * on i82543 and later.
   2570 	 */
   2571 	if (sc->sc_type >= WM_T_82543) {
   2572 		ifp->if_capabilities |=
   2573 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2574 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2575 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2576 		    IFCAP_CSUM_TCPv6_Tx |
   2577 		    IFCAP_CSUM_UDPv6_Tx;
   2578 	}
   2579 
   2580 	/*
   2581 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2582 	 *
   2583 	 *	82541GI (8086:1076) ... no
   2584 	 *	82572EI (8086:10b9) ... yes
   2585 	 */
   2586 	if (sc->sc_type >= WM_T_82571) {
   2587 		ifp->if_capabilities |=
   2588 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2589 	}
   2590 
   2591 	/*
   2592 	 * If we're a i82544 or greater (except i82547), we can do
   2593 	 * TCP segmentation offload.
   2594 	 */
   2595 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2596 		ifp->if_capabilities |= IFCAP_TSOv4;
   2597 	}
   2598 
   2599 	if (sc->sc_type >= WM_T_82571) {
   2600 		ifp->if_capabilities |= IFCAP_TSOv6;
   2601 	}
   2602 
   2603 #ifdef WM_MPSAFE
   2604 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2605 #else
   2606 	sc->sc_core_lock = NULL;
   2607 #endif
   2608 
   2609 	/* Attach the interface. */
   2610 	if_initialize(ifp);
   2611 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2612 	ether_ifattach(ifp, enaddr);
   2613 	if_register(ifp);
   2614 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2615 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2616 			  RND_FLAG_DEFAULT);
   2617 
   2618 #ifdef WM_EVENT_COUNTERS
   2619 	/* Attach event counters. */
   2620 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2621 	    NULL, xname, "linkintr");
   2622 
   2623 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2624 	    NULL, xname, "tx_xoff");
   2625 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2626 	    NULL, xname, "tx_xon");
   2627 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2628 	    NULL, xname, "rx_xoff");
   2629 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2630 	    NULL, xname, "rx_xon");
   2631 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2632 	    NULL, xname, "rx_macctl");
   2633 #endif /* WM_EVENT_COUNTERS */
   2634 
   2635 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2636 		pmf_class_network_register(self, ifp);
   2637 	else
   2638 		aprint_error_dev(self, "couldn't establish power handler\n");
   2639 
   2640 	sc->sc_flags |= WM_F_ATTACHED;
   2641  out:
   2642 	return;
   2643 }
   2644 
   2645 /* The detach function (ca_detach) */
   2646 static int
   2647 wm_detach(device_t self, int flags __unused)
   2648 {
   2649 	struct wm_softc *sc = device_private(self);
   2650 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2651 	int i;
   2652 
   2653 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2654 		return 0;
   2655 
   2656 	/* Stop the interface. Callouts are stopped in it. */
   2657 	wm_stop(ifp, 1);
   2658 
   2659 	pmf_device_deregister(self);
   2660 
   2661 	/* Tell the firmware about the release */
   2662 	WM_CORE_LOCK(sc);
   2663 	wm_release_manageability(sc);
   2664 	wm_release_hw_control(sc);
   2665 	wm_enable_wakeup(sc);
   2666 	WM_CORE_UNLOCK(sc);
   2667 
   2668 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2669 
   2670 	/* Delete all remaining media. */
   2671 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2672 
   2673 	ether_ifdetach(ifp);
   2674 	if_detach(ifp);
   2675 	if_percpuq_destroy(sc->sc_ipq);
   2676 
   2677 	/* Unload RX dmamaps and free mbufs */
   2678 	for (i = 0; i < sc->sc_nqueues; i++) {
   2679 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2680 		mutex_enter(rxq->rxq_lock);
   2681 		wm_rxdrain(rxq);
   2682 		mutex_exit(rxq->rxq_lock);
   2683 	}
   2684 	/* Must unlock here */
   2685 
   2686 	/* Disestablish the interrupt handler */
   2687 	for (i = 0; i < sc->sc_nintrs; i++) {
   2688 		if (sc->sc_ihs[i] != NULL) {
   2689 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2690 			sc->sc_ihs[i] = NULL;
   2691 		}
   2692 	}
   2693 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2694 
   2695 	wm_free_txrx_queues(sc);
   2696 
   2697 	/* Unmap the registers */
   2698 	if (sc->sc_ss) {
   2699 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2700 		sc->sc_ss = 0;
   2701 	}
   2702 	if (sc->sc_ios) {
   2703 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2704 		sc->sc_ios = 0;
   2705 	}
   2706 	if (sc->sc_flashs) {
   2707 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2708 		sc->sc_flashs = 0;
   2709 	}
   2710 
   2711 	if (sc->sc_core_lock)
   2712 		mutex_obj_free(sc->sc_core_lock);
   2713 	if (sc->sc_ich_phymtx)
   2714 		mutex_obj_free(sc->sc_ich_phymtx);
   2715 	if (sc->sc_ich_nvmmtx)
   2716 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2717 
   2718 	return 0;
   2719 }
   2720 
   2721 static bool
   2722 wm_suspend(device_t self, const pmf_qual_t *qual)
   2723 {
   2724 	struct wm_softc *sc = device_private(self);
   2725 
   2726 	wm_release_manageability(sc);
   2727 	wm_release_hw_control(sc);
   2728 	wm_enable_wakeup(sc);
   2729 
   2730 	return true;
   2731 }
   2732 
   2733 static bool
   2734 wm_resume(device_t self, const pmf_qual_t *qual)
   2735 {
   2736 	struct wm_softc *sc = device_private(self);
   2737 
   2738 	wm_init_manageability(sc);
   2739 
   2740 	return true;
   2741 }
   2742 
   2743 /*
   2744  * wm_watchdog:		[ifnet interface function]
   2745  *
   2746  *	Watchdog timer handler.
   2747  */
   2748 static void
   2749 wm_watchdog(struct ifnet *ifp)
   2750 {
   2751 	int qid;
   2752 	struct wm_softc *sc = ifp->if_softc;
   2753 
   2754 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2755 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2756 
   2757 		wm_watchdog_txq(ifp, txq);
   2758 	}
   2759 
   2760 	/* Reset the interface. */
   2761 	(void) wm_init(ifp);
   2762 
   2763 	/*
   2764 	 * There are still some upper layer processing which call
   2765 	 * ifp->if_start(). e.g. ALTQ
   2766 	 */
   2767 	/* Try to get more packets going. */
   2768 	ifp->if_start(ifp);
   2769 }
   2770 
   2771 static void
   2772 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2773 {
   2774 	struct wm_softc *sc = ifp->if_softc;
   2775 
   2776 	/*
   2777 	 * Since we're using delayed interrupts, sweep up
   2778 	 * before we report an error.
   2779 	 */
   2780 	mutex_enter(txq->txq_lock);
   2781 	wm_txeof(sc, txq);
   2782 	mutex_exit(txq->txq_lock);
   2783 
   2784 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2785 #ifdef WM_DEBUG
   2786 		int i, j;
   2787 		struct wm_txsoft *txs;
   2788 #endif
   2789 		log(LOG_ERR,
   2790 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2791 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2792 		    txq->txq_next);
   2793 		ifp->if_oerrors++;
   2794 #ifdef WM_DEBUG
   2795 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2796 		    i = WM_NEXTTXS(txq, i)) {
   2797 		    txs = &txq->txq_soft[i];
   2798 		    printf("txs %d tx %d -> %d\n",
   2799 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2800 		    for (j = txs->txs_firstdesc; ;
   2801 			j = WM_NEXTTX(txq, j)) {
   2802 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2803 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2804 			printf("\t %#08x%08x\n",
   2805 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2806 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2807 			if (j == txs->txs_lastdesc)
   2808 				break;
   2809 			}
   2810 		}
   2811 #endif
   2812 	}
   2813 }
   2814 
   2815 /*
   2816  * wm_tick:
   2817  *
   2818  *	One second timer, used to check link status, sweep up
   2819  *	completed transmit jobs, etc.
   2820  */
   2821 static void
   2822 wm_tick(void *arg)
   2823 {
   2824 	struct wm_softc *sc = arg;
   2825 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2826 #ifndef WM_MPSAFE
   2827 	int s = splnet();
   2828 #endif
   2829 
   2830 	WM_CORE_LOCK(sc);
   2831 
   2832 	if (sc->sc_core_stopping)
   2833 		goto out;
   2834 
   2835 	if (sc->sc_type >= WM_T_82542_2_1) {
   2836 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2837 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2838 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2839 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2840 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2841 	}
   2842 
   2843 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2844 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2845 	    + CSR_READ(sc, WMREG_CRCERRS)
   2846 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2847 	    + CSR_READ(sc, WMREG_SYMERRC)
   2848 	    + CSR_READ(sc, WMREG_RXERRC)
   2849 	    + CSR_READ(sc, WMREG_SEC)
   2850 	    + CSR_READ(sc, WMREG_CEXTERR)
   2851 	    + CSR_READ(sc, WMREG_RLEC);
   2852 	/*
   2853 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2854 	 * memory. It does not mean the number of dropped packet. Because
   2855 	 * ethernet controller can receive packets in such case if there is
   2856 	 * space in phy's FIFO.
   2857 	 *
   2858 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2859 	 * own EVCNT instead of if_iqdrops.
   2860 	 */
   2861 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2862 
   2863 	if (sc->sc_flags & WM_F_HAS_MII)
   2864 		mii_tick(&sc->sc_mii);
   2865 	else if ((sc->sc_type >= WM_T_82575)
   2866 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2867 		wm_serdes_tick(sc);
   2868 	else
   2869 		wm_tbi_tick(sc);
   2870 
   2871 out:
   2872 	WM_CORE_UNLOCK(sc);
   2873 #ifndef WM_MPSAFE
   2874 	splx(s);
   2875 #endif
   2876 
   2877 	if (!sc->sc_core_stopping)
   2878 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2879 }
   2880 
   2881 static int
   2882 wm_ifflags_cb(struct ethercom *ec)
   2883 {
   2884 	struct ifnet *ifp = &ec->ec_if;
   2885 	struct wm_softc *sc = ifp->if_softc;
   2886 	int rc = 0;
   2887 
   2888 	WM_CORE_LOCK(sc);
   2889 
   2890 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2891 	sc->sc_if_flags = ifp->if_flags;
   2892 
   2893 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2894 		rc = ENETRESET;
   2895 		goto out;
   2896 	}
   2897 
   2898 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2899 		wm_set_filter(sc);
   2900 
   2901 	wm_set_vlan(sc);
   2902 
   2903 out:
   2904 	WM_CORE_UNLOCK(sc);
   2905 
   2906 	return rc;
   2907 }
   2908 
   2909 /*
   2910  * wm_ioctl:		[ifnet interface function]
   2911  *
   2912  *	Handle control requests from the operator.
   2913  */
   2914 static int
   2915 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2916 {
   2917 	struct wm_softc *sc = ifp->if_softc;
   2918 	struct ifreq *ifr = (struct ifreq *) data;
   2919 	struct ifaddr *ifa = (struct ifaddr *)data;
   2920 	struct sockaddr_dl *sdl;
   2921 	int s, error;
   2922 
   2923 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2924 		device_xname(sc->sc_dev), __func__));
   2925 
   2926 #ifndef WM_MPSAFE
   2927 	s = splnet();
   2928 #endif
   2929 	switch (cmd) {
   2930 	case SIOCSIFMEDIA:
   2931 	case SIOCGIFMEDIA:
   2932 		WM_CORE_LOCK(sc);
   2933 		/* Flow control requires full-duplex mode. */
   2934 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2935 		    (ifr->ifr_media & IFM_FDX) == 0)
   2936 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2937 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2938 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2939 				/* We can do both TXPAUSE and RXPAUSE. */
   2940 				ifr->ifr_media |=
   2941 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2942 			}
   2943 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2944 		}
   2945 		WM_CORE_UNLOCK(sc);
   2946 #ifdef WM_MPSAFE
   2947 		s = splnet();
   2948 #endif
   2949 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2950 #ifdef WM_MPSAFE
   2951 		splx(s);
   2952 #endif
   2953 		break;
   2954 	case SIOCINITIFADDR:
   2955 		WM_CORE_LOCK(sc);
   2956 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2957 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2958 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2959 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2960 			/* unicast address is first multicast entry */
   2961 			wm_set_filter(sc);
   2962 			error = 0;
   2963 			WM_CORE_UNLOCK(sc);
   2964 			break;
   2965 		}
   2966 		WM_CORE_UNLOCK(sc);
   2967 		/*FALLTHROUGH*/
   2968 	default:
   2969 #ifdef WM_MPSAFE
   2970 		s = splnet();
   2971 #endif
   2972 		/* It may call wm_start, so unlock here */
   2973 		error = ether_ioctl(ifp, cmd, data);
   2974 #ifdef WM_MPSAFE
   2975 		splx(s);
   2976 #endif
   2977 		if (error != ENETRESET)
   2978 			break;
   2979 
   2980 		error = 0;
   2981 
   2982 		if (cmd == SIOCSIFCAP) {
   2983 			error = (*ifp->if_init)(ifp);
   2984 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2985 			;
   2986 		else if (ifp->if_flags & IFF_RUNNING) {
   2987 			/*
   2988 			 * Multicast list has changed; set the hardware filter
   2989 			 * accordingly.
   2990 			 */
   2991 			WM_CORE_LOCK(sc);
   2992 			wm_set_filter(sc);
   2993 			WM_CORE_UNLOCK(sc);
   2994 		}
   2995 		break;
   2996 	}
   2997 
   2998 #ifndef WM_MPSAFE
   2999 	splx(s);
   3000 #endif
   3001 	return error;
   3002 }
   3003 
   3004 /* MAC address related */
   3005 
   3006 /*
   3007  * Get the offset of MAC address and return it.
   3008  * If error occured, use offset 0.
   3009  */
   3010 static uint16_t
   3011 wm_check_alt_mac_addr(struct wm_softc *sc)
   3012 {
   3013 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3014 	uint16_t offset = NVM_OFF_MACADDR;
   3015 
   3016 	/* Try to read alternative MAC address pointer */
   3017 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3018 		return 0;
   3019 
   3020 	/* Check pointer if it's valid or not. */
   3021 	if ((offset == 0x0000) || (offset == 0xffff))
   3022 		return 0;
   3023 
   3024 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3025 	/*
   3026 	 * Check whether alternative MAC address is valid or not.
   3027 	 * Some cards have non 0xffff pointer but those don't use
   3028 	 * alternative MAC address in reality.
   3029 	 *
   3030 	 * Check whether the broadcast bit is set or not.
   3031 	 */
   3032 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3033 		if (((myea[0] & 0xff) & 0x01) == 0)
   3034 			return offset; /* Found */
   3035 
   3036 	/* Not found */
   3037 	return 0;
   3038 }
   3039 
   3040 static int
   3041 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3042 {
   3043 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3044 	uint16_t offset = NVM_OFF_MACADDR;
   3045 	int do_invert = 0;
   3046 
   3047 	switch (sc->sc_type) {
   3048 	case WM_T_82580:
   3049 	case WM_T_I350:
   3050 	case WM_T_I354:
   3051 		/* EEPROM Top Level Partitioning */
   3052 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3053 		break;
   3054 	case WM_T_82571:
   3055 	case WM_T_82575:
   3056 	case WM_T_82576:
   3057 	case WM_T_80003:
   3058 	case WM_T_I210:
   3059 	case WM_T_I211:
   3060 		offset = wm_check_alt_mac_addr(sc);
   3061 		if (offset == 0)
   3062 			if ((sc->sc_funcid & 0x01) == 1)
   3063 				do_invert = 1;
   3064 		break;
   3065 	default:
   3066 		if ((sc->sc_funcid & 0x01) == 1)
   3067 			do_invert = 1;
   3068 		break;
   3069 	}
   3070 
   3071 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3072 		goto bad;
   3073 
   3074 	enaddr[0] = myea[0] & 0xff;
   3075 	enaddr[1] = myea[0] >> 8;
   3076 	enaddr[2] = myea[1] & 0xff;
   3077 	enaddr[3] = myea[1] >> 8;
   3078 	enaddr[4] = myea[2] & 0xff;
   3079 	enaddr[5] = myea[2] >> 8;
   3080 
   3081 	/*
   3082 	 * Toggle the LSB of the MAC address on the second port
   3083 	 * of some dual port cards.
   3084 	 */
   3085 	if (do_invert != 0)
   3086 		enaddr[5] ^= 1;
   3087 
   3088 	return 0;
   3089 
   3090  bad:
   3091 	return -1;
   3092 }
   3093 
   3094 /*
   3095  * wm_set_ral:
   3096  *
   3097  *	Set an entery in the receive address list.
   3098  */
   3099 static void
   3100 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3101 {
   3102 	uint32_t ral_lo, ral_hi;
   3103 
   3104 	if (enaddr != NULL) {
   3105 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3106 		    (enaddr[3] << 24);
   3107 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3108 		ral_hi |= RAL_AV;
   3109 	} else {
   3110 		ral_lo = 0;
   3111 		ral_hi = 0;
   3112 	}
   3113 
   3114 	if (sc->sc_type >= WM_T_82544) {
   3115 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3116 		    ral_lo);
   3117 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3118 		    ral_hi);
   3119 	} else {
   3120 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3121 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3122 	}
   3123 }
   3124 
   3125 /*
   3126  * wm_mchash:
   3127  *
   3128  *	Compute the hash of the multicast address for the 4096-bit
   3129  *	multicast filter.
   3130  */
   3131 static uint32_t
   3132 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3133 {
   3134 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3135 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3136 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3137 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3138 	uint32_t hash;
   3139 
   3140 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3141 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3142 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3143 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3144 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3145 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3146 		return (hash & 0x3ff);
   3147 	}
   3148 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3149 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3150 
   3151 	return (hash & 0xfff);
   3152 }
   3153 
   3154 /*
   3155  * wm_set_filter:
   3156  *
   3157  *	Set up the receive filter.
   3158  */
   3159 static void
   3160 wm_set_filter(struct wm_softc *sc)
   3161 {
   3162 	struct ethercom *ec = &sc->sc_ethercom;
   3163 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3164 	struct ether_multi *enm;
   3165 	struct ether_multistep step;
   3166 	bus_addr_t mta_reg;
   3167 	uint32_t hash, reg, bit;
   3168 	int i, size, ralmax;
   3169 
   3170 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3171 		device_xname(sc->sc_dev), __func__));
   3172 
   3173 	if (sc->sc_type >= WM_T_82544)
   3174 		mta_reg = WMREG_CORDOVA_MTA;
   3175 	else
   3176 		mta_reg = WMREG_MTA;
   3177 
   3178 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3179 
   3180 	if (ifp->if_flags & IFF_BROADCAST)
   3181 		sc->sc_rctl |= RCTL_BAM;
   3182 	if (ifp->if_flags & IFF_PROMISC) {
   3183 		sc->sc_rctl |= RCTL_UPE;
   3184 		goto allmulti;
   3185 	}
   3186 
   3187 	/*
   3188 	 * Set the station address in the first RAL slot, and
   3189 	 * clear the remaining slots.
   3190 	 */
   3191 	if (sc->sc_type == WM_T_ICH8)
   3192 		size = WM_RAL_TABSIZE_ICH8 -1;
   3193 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3194 	    || (sc->sc_type == WM_T_PCH))
   3195 		size = WM_RAL_TABSIZE_ICH8;
   3196 	else if (sc->sc_type == WM_T_PCH2)
   3197 		size = WM_RAL_TABSIZE_PCH2;
   3198 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3199 		size = WM_RAL_TABSIZE_PCH_LPT;
   3200 	else if (sc->sc_type == WM_T_82575)
   3201 		size = WM_RAL_TABSIZE_82575;
   3202 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3203 		size = WM_RAL_TABSIZE_82576;
   3204 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3205 		size = WM_RAL_TABSIZE_I350;
   3206 	else
   3207 		size = WM_RAL_TABSIZE;
   3208 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3209 
   3210 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3211 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3212 		switch (i) {
   3213 		case 0:
   3214 			/* We can use all entries */
   3215 			ralmax = size;
   3216 			break;
   3217 		case 1:
   3218 			/* Only RAR[0] */
   3219 			ralmax = 1;
   3220 			break;
   3221 		default:
   3222 			/* available SHRA + RAR[0] */
   3223 			ralmax = i + 1;
   3224 		}
   3225 	} else
   3226 		ralmax = size;
   3227 	for (i = 1; i < size; i++) {
   3228 		if (i < ralmax)
   3229 			wm_set_ral(sc, NULL, i);
   3230 	}
   3231 
   3232 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3233 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3234 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3235 	    || (sc->sc_type == WM_T_PCH_SPT))
   3236 		size = WM_ICH8_MC_TABSIZE;
   3237 	else
   3238 		size = WM_MC_TABSIZE;
   3239 	/* Clear out the multicast table. */
   3240 	for (i = 0; i < size; i++)
   3241 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3242 
   3243 	ETHER_FIRST_MULTI(step, ec, enm);
   3244 	while (enm != NULL) {
   3245 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3246 			/*
   3247 			 * We must listen to a range of multicast addresses.
   3248 			 * For now, just accept all multicasts, rather than
   3249 			 * trying to set only those filter bits needed to match
   3250 			 * the range.  (At this time, the only use of address
   3251 			 * ranges is for IP multicast routing, for which the
   3252 			 * range is big enough to require all bits set.)
   3253 			 */
   3254 			goto allmulti;
   3255 		}
   3256 
   3257 		hash = wm_mchash(sc, enm->enm_addrlo);
   3258 
   3259 		reg = (hash >> 5);
   3260 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3261 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3262 		    || (sc->sc_type == WM_T_PCH2)
   3263 		    || (sc->sc_type == WM_T_PCH_LPT)
   3264 		    || (sc->sc_type == WM_T_PCH_SPT))
   3265 			reg &= 0x1f;
   3266 		else
   3267 			reg &= 0x7f;
   3268 		bit = hash & 0x1f;
   3269 
   3270 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3271 		hash |= 1U << bit;
   3272 
   3273 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3274 			/*
   3275 			 * 82544 Errata 9: Certain register cannot be written
   3276 			 * with particular alignments in PCI-X bus operation
   3277 			 * (FCAH, MTA and VFTA).
   3278 			 */
   3279 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3280 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3281 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3282 		} else
   3283 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3284 
   3285 		ETHER_NEXT_MULTI(step, enm);
   3286 	}
   3287 
   3288 	ifp->if_flags &= ~IFF_ALLMULTI;
   3289 	goto setit;
   3290 
   3291  allmulti:
   3292 	ifp->if_flags |= IFF_ALLMULTI;
   3293 	sc->sc_rctl |= RCTL_MPE;
   3294 
   3295  setit:
   3296 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3297 }
   3298 
   3299 /* Reset and init related */
   3300 
   3301 static void
   3302 wm_set_vlan(struct wm_softc *sc)
   3303 {
   3304 
   3305 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3306 		device_xname(sc->sc_dev), __func__));
   3307 
   3308 	/* Deal with VLAN enables. */
   3309 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3310 		sc->sc_ctrl |= CTRL_VME;
   3311 	else
   3312 		sc->sc_ctrl &= ~CTRL_VME;
   3313 
   3314 	/* Write the control registers. */
   3315 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3316 }
   3317 
   3318 static void
   3319 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3320 {
   3321 	uint32_t gcr;
   3322 	pcireg_t ctrl2;
   3323 
   3324 	gcr = CSR_READ(sc, WMREG_GCR);
   3325 
   3326 	/* Only take action if timeout value is defaulted to 0 */
   3327 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3328 		goto out;
   3329 
   3330 	if ((gcr & GCR_CAP_VER2) == 0) {
   3331 		gcr |= GCR_CMPL_TMOUT_10MS;
   3332 		goto out;
   3333 	}
   3334 
   3335 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3336 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3337 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3338 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3339 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3340 
   3341 out:
   3342 	/* Disable completion timeout resend */
   3343 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3344 
   3345 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3346 }
   3347 
   3348 void
   3349 wm_get_auto_rd_done(struct wm_softc *sc)
   3350 {
   3351 	int i;
   3352 
   3353 	/* wait for eeprom to reload */
   3354 	switch (sc->sc_type) {
   3355 	case WM_T_82571:
   3356 	case WM_T_82572:
   3357 	case WM_T_82573:
   3358 	case WM_T_82574:
   3359 	case WM_T_82583:
   3360 	case WM_T_82575:
   3361 	case WM_T_82576:
   3362 	case WM_T_82580:
   3363 	case WM_T_I350:
   3364 	case WM_T_I354:
   3365 	case WM_T_I210:
   3366 	case WM_T_I211:
   3367 	case WM_T_80003:
   3368 	case WM_T_ICH8:
   3369 	case WM_T_ICH9:
   3370 		for (i = 0; i < 10; i++) {
   3371 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3372 				break;
   3373 			delay(1000);
   3374 		}
   3375 		if (i == 10) {
   3376 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3377 			    "complete\n", device_xname(sc->sc_dev));
   3378 		}
   3379 		break;
   3380 	default:
   3381 		break;
   3382 	}
   3383 }
   3384 
   3385 void
   3386 wm_lan_init_done(struct wm_softc *sc)
   3387 {
   3388 	uint32_t reg = 0;
   3389 	int i;
   3390 
   3391 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3392 		device_xname(sc->sc_dev), __func__));
   3393 
   3394 	/* Wait for eeprom to reload */
   3395 	switch (sc->sc_type) {
   3396 	case WM_T_ICH10:
   3397 	case WM_T_PCH:
   3398 	case WM_T_PCH2:
   3399 	case WM_T_PCH_LPT:
   3400 	case WM_T_PCH_SPT:
   3401 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3402 			reg = CSR_READ(sc, WMREG_STATUS);
   3403 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3404 				break;
   3405 			delay(100);
   3406 		}
   3407 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3408 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3409 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3410 		}
   3411 		break;
   3412 	default:
   3413 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3414 		    __func__);
   3415 		break;
   3416 	}
   3417 
   3418 	reg &= ~STATUS_LAN_INIT_DONE;
   3419 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3420 }
   3421 
   3422 void
   3423 wm_get_cfg_done(struct wm_softc *sc)
   3424 {
   3425 	int mask;
   3426 	uint32_t reg;
   3427 	int i;
   3428 
   3429 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3430 		device_xname(sc->sc_dev), __func__));
   3431 
   3432 	/* Wait for eeprom to reload */
   3433 	switch (sc->sc_type) {
   3434 	case WM_T_82542_2_0:
   3435 	case WM_T_82542_2_1:
   3436 		/* null */
   3437 		break;
   3438 	case WM_T_82543:
   3439 	case WM_T_82544:
   3440 	case WM_T_82540:
   3441 	case WM_T_82545:
   3442 	case WM_T_82545_3:
   3443 	case WM_T_82546:
   3444 	case WM_T_82546_3:
   3445 	case WM_T_82541:
   3446 	case WM_T_82541_2:
   3447 	case WM_T_82547:
   3448 	case WM_T_82547_2:
   3449 	case WM_T_82573:
   3450 	case WM_T_82574:
   3451 	case WM_T_82583:
   3452 		/* generic */
   3453 		delay(10*1000);
   3454 		break;
   3455 	case WM_T_80003:
   3456 	case WM_T_82571:
   3457 	case WM_T_82572:
   3458 	case WM_T_82575:
   3459 	case WM_T_82576:
   3460 	case WM_T_82580:
   3461 	case WM_T_I350:
   3462 	case WM_T_I354:
   3463 	case WM_T_I210:
   3464 	case WM_T_I211:
   3465 		if (sc->sc_type == WM_T_82571) {
   3466 			/* Only 82571 shares port 0 */
   3467 			mask = EEMNGCTL_CFGDONE_0;
   3468 		} else
   3469 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3470 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3471 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3472 				break;
   3473 			delay(1000);
   3474 		}
   3475 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3476 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3477 				device_xname(sc->sc_dev), __func__));
   3478 		}
   3479 		break;
   3480 	case WM_T_ICH8:
   3481 	case WM_T_ICH9:
   3482 	case WM_T_ICH10:
   3483 	case WM_T_PCH:
   3484 	case WM_T_PCH2:
   3485 	case WM_T_PCH_LPT:
   3486 	case WM_T_PCH_SPT:
   3487 		delay(10*1000);
   3488 		if (sc->sc_type >= WM_T_ICH10)
   3489 			wm_lan_init_done(sc);
   3490 		else
   3491 			wm_get_auto_rd_done(sc);
   3492 
   3493 		reg = CSR_READ(sc, WMREG_STATUS);
   3494 		if ((reg & STATUS_PHYRA) != 0)
   3495 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3496 		break;
   3497 	default:
   3498 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3499 		    __func__);
   3500 		break;
   3501 	}
   3502 }
   3503 
   3504 /* Init hardware bits */
   3505 void
   3506 wm_initialize_hardware_bits(struct wm_softc *sc)
   3507 {
   3508 	uint32_t tarc0, tarc1, reg;
   3509 
   3510 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3511 		device_xname(sc->sc_dev), __func__));
   3512 
   3513 	/* For 82571 variant, 80003 and ICHs */
   3514 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3515 	    || (sc->sc_type >= WM_T_80003)) {
   3516 
   3517 		/* Transmit Descriptor Control 0 */
   3518 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3519 		reg |= TXDCTL_COUNT_DESC;
   3520 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3521 
   3522 		/* Transmit Descriptor Control 1 */
   3523 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3524 		reg |= TXDCTL_COUNT_DESC;
   3525 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3526 
   3527 		/* TARC0 */
   3528 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3529 		switch (sc->sc_type) {
   3530 		case WM_T_82571:
   3531 		case WM_T_82572:
   3532 		case WM_T_82573:
   3533 		case WM_T_82574:
   3534 		case WM_T_82583:
   3535 		case WM_T_80003:
   3536 			/* Clear bits 30..27 */
   3537 			tarc0 &= ~__BITS(30, 27);
   3538 			break;
   3539 		default:
   3540 			break;
   3541 		}
   3542 
   3543 		switch (sc->sc_type) {
   3544 		case WM_T_82571:
   3545 		case WM_T_82572:
   3546 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3547 
   3548 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3549 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3550 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3551 			/* 8257[12] Errata No.7 */
   3552 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3553 
   3554 			/* TARC1 bit 28 */
   3555 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3556 				tarc1 &= ~__BIT(28);
   3557 			else
   3558 				tarc1 |= __BIT(28);
   3559 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3560 
   3561 			/*
   3562 			 * 8257[12] Errata No.13
   3563 			 * Disable Dyamic Clock Gating.
   3564 			 */
   3565 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3566 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3567 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3568 			break;
   3569 		case WM_T_82573:
   3570 		case WM_T_82574:
   3571 		case WM_T_82583:
   3572 			if ((sc->sc_type == WM_T_82574)
   3573 			    || (sc->sc_type == WM_T_82583))
   3574 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3575 
   3576 			/* Extended Device Control */
   3577 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3578 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3579 			reg |= __BIT(22);	/* Set bit 22 */
   3580 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3581 
   3582 			/* Device Control */
   3583 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3584 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3585 
   3586 			/* PCIe Control Register */
   3587 			/*
   3588 			 * 82573 Errata (unknown).
   3589 			 *
   3590 			 * 82574 Errata 25 and 82583 Errata 12
   3591 			 * "Dropped Rx Packets":
   3592 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3593 			 */
   3594 			reg = CSR_READ(sc, WMREG_GCR);
   3595 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3596 			CSR_WRITE(sc, WMREG_GCR, reg);
   3597 
   3598 			if ((sc->sc_type == WM_T_82574)
   3599 			    || (sc->sc_type == WM_T_82583)) {
   3600 				/*
   3601 				 * Document says this bit must be set for
   3602 				 * proper operation.
   3603 				 */
   3604 				reg = CSR_READ(sc, WMREG_GCR);
   3605 				reg |= __BIT(22);
   3606 				CSR_WRITE(sc, WMREG_GCR, reg);
   3607 
   3608 				/*
   3609 				 * Apply workaround for hardware errata
   3610 				 * documented in errata docs Fixes issue where
   3611 				 * some error prone or unreliable PCIe
   3612 				 * completions are occurring, particularly
   3613 				 * with ASPM enabled. Without fix, issue can
   3614 				 * cause Tx timeouts.
   3615 				 */
   3616 				reg = CSR_READ(sc, WMREG_GCR2);
   3617 				reg |= __BIT(0);
   3618 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3619 			}
   3620 			break;
   3621 		case WM_T_80003:
   3622 			/* TARC0 */
   3623 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3624 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3625 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3626 
   3627 			/* TARC1 bit 28 */
   3628 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3629 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3630 				tarc1 &= ~__BIT(28);
   3631 			else
   3632 				tarc1 |= __BIT(28);
   3633 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3634 			break;
   3635 		case WM_T_ICH8:
   3636 		case WM_T_ICH9:
   3637 		case WM_T_ICH10:
   3638 		case WM_T_PCH:
   3639 		case WM_T_PCH2:
   3640 		case WM_T_PCH_LPT:
   3641 		case WM_T_PCH_SPT:
   3642 			/* TARC0 */
   3643 			if ((sc->sc_type == WM_T_ICH8)
   3644 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3645 				/* Set TARC0 bits 29 and 28 */
   3646 				tarc0 |= __BITS(29, 28);
   3647 			}
   3648 			/* Set TARC0 bits 23,24,26,27 */
   3649 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3650 
   3651 			/* CTRL_EXT */
   3652 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3653 			reg |= __BIT(22);	/* Set bit 22 */
   3654 			/*
   3655 			 * Enable PHY low-power state when MAC is at D3
   3656 			 * w/o WoL
   3657 			 */
   3658 			if (sc->sc_type >= WM_T_PCH)
   3659 				reg |= CTRL_EXT_PHYPDEN;
   3660 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3661 
   3662 			/* TARC1 */
   3663 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3664 			/* bit 28 */
   3665 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3666 				tarc1 &= ~__BIT(28);
   3667 			else
   3668 				tarc1 |= __BIT(28);
   3669 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3670 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3671 
   3672 			/* Device Status */
   3673 			if (sc->sc_type == WM_T_ICH8) {
   3674 				reg = CSR_READ(sc, WMREG_STATUS);
   3675 				reg &= ~__BIT(31);
   3676 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3677 
   3678 			}
   3679 
   3680 			/* IOSFPC */
   3681 			if (sc->sc_type == WM_T_PCH_SPT) {
   3682 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3683 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3684 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3685 			}
   3686 			/*
   3687 			 * Work-around descriptor data corruption issue during
   3688 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3689 			 * capability.
   3690 			 */
   3691 			reg = CSR_READ(sc, WMREG_RFCTL);
   3692 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3693 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3694 			break;
   3695 		default:
   3696 			break;
   3697 		}
   3698 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3699 
   3700 		/*
   3701 		 * 8257[12] Errata No.52 and some others.
   3702 		 * Avoid RSS Hash Value bug.
   3703 		 */
   3704 		switch (sc->sc_type) {
   3705 		case WM_T_82571:
   3706 		case WM_T_82572:
   3707 		case WM_T_82573:
   3708 		case WM_T_80003:
   3709 		case WM_T_ICH8:
   3710 			reg = CSR_READ(sc, WMREG_RFCTL);
   3711 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3712 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3713 			break;
   3714 		default:
   3715 			break;
   3716 		}
   3717 	}
   3718 }
   3719 
   3720 static uint32_t
   3721 wm_rxpbs_adjust_82580(uint32_t val)
   3722 {
   3723 	uint32_t rv = 0;
   3724 
   3725 	if (val < __arraycount(wm_82580_rxpbs_table))
   3726 		rv = wm_82580_rxpbs_table[val];
   3727 
   3728 	return rv;
   3729 }
   3730 
   3731 /*
   3732  * wm_reset_phy:
   3733  *
   3734  *	generic PHY reset function.
   3735  *	Same as e1000_phy_hw_reset_generic()
   3736  */
   3737 static void
   3738 wm_reset_phy(struct wm_softc *sc)
   3739 {
   3740 	uint32_t reg;
   3741 
   3742 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3743 		device_xname(sc->sc_dev), __func__));
   3744 	if (wm_phy_resetisblocked(sc))
   3745 		return;
   3746 
   3747 	sc->phy.acquire(sc);
   3748 
   3749 	reg = CSR_READ(sc, WMREG_CTRL);
   3750 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3751 	CSR_WRITE_FLUSH(sc);
   3752 
   3753 	delay(sc->phy.reset_delay_us);
   3754 
   3755 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3756 	CSR_WRITE_FLUSH(sc);
   3757 
   3758 	delay(150);
   3759 
   3760 	sc->phy.release(sc);
   3761 
   3762 	wm_get_cfg_done(sc);
   3763 }
   3764 
   3765 static void
   3766 wm_flush_desc_rings(struct wm_softc *sc)
   3767 {
   3768 	pcireg_t preg;
   3769 	uint32_t reg;
   3770 	int nexttx;
   3771 
   3772 	/* First, disable MULR fix in FEXTNVM11 */
   3773 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3774 	reg |= FEXTNVM11_DIS_MULRFIX;
   3775 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3776 
   3777 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3778 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3779 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3780 		struct wm_txqueue *txq;
   3781 		wiseman_txdesc_t *txd;
   3782 
   3783 		/* TX */
   3784 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3785 		    device_xname(sc->sc_dev), preg, reg);
   3786 		reg = CSR_READ(sc, WMREG_TCTL);
   3787 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3788 
   3789 		txq = &sc->sc_queue[0].wmq_txq;
   3790 		nexttx = txq->txq_next;
   3791 		txd = &txq->txq_descs[nexttx];
   3792 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3793 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3794 		txd->wtx_fields.wtxu_status = 0;
   3795 		txd->wtx_fields.wtxu_options = 0;
   3796 		txd->wtx_fields.wtxu_vlan = 0;
   3797 
   3798 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3799 			BUS_SPACE_BARRIER_WRITE);
   3800 
   3801 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3802 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3803 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3804 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3805 		delay(250);
   3806 	}
   3807 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3808 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3809 		uint32_t rctl;
   3810 
   3811 		/* RX */
   3812 		printf("%s: Need RX flush (reg = %08x)\n",
   3813 		    device_xname(sc->sc_dev), preg);
   3814 		rctl = CSR_READ(sc, WMREG_RCTL);
   3815 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3816 		CSR_WRITE_FLUSH(sc);
   3817 		delay(150);
   3818 
   3819 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3820 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3821 		reg &= 0xffffc000;
   3822 		/*
   3823 		 * update thresholds: prefetch threshold to 31, host threshold
   3824 		 * to 1 and make sure the granularity is "descriptors" and not
   3825 		 * "cache lines"
   3826 		 */
   3827 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   3828 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   3829 
   3830 		/*
   3831 		 * momentarily enable the RX ring for the changes to take
   3832 		 * effect
   3833 		 */
   3834 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   3835 		CSR_WRITE_FLUSH(sc);
   3836 		delay(150);
   3837 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3838 	}
   3839 }
   3840 
   3841 /*
   3842  * wm_reset:
   3843  *
   3844  *	Reset the i82542 chip.
   3845  */
   3846 static void
   3847 wm_reset(struct wm_softc *sc)
   3848 {
   3849 	int phy_reset = 0;
   3850 	int i, error = 0;
   3851 	uint32_t reg;
   3852 
   3853 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3854 		device_xname(sc->sc_dev), __func__));
   3855 	KASSERT(sc->sc_type != 0);
   3856 
   3857 	/*
   3858 	 * Allocate on-chip memory according to the MTU size.
   3859 	 * The Packet Buffer Allocation register must be written
   3860 	 * before the chip is reset.
   3861 	 */
   3862 	switch (sc->sc_type) {
   3863 	case WM_T_82547:
   3864 	case WM_T_82547_2:
   3865 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3866 		    PBA_22K : PBA_30K;
   3867 		for (i = 0; i < sc->sc_nqueues; i++) {
   3868 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3869 			txq->txq_fifo_head = 0;
   3870 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3871 			txq->txq_fifo_size =
   3872 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3873 			txq->txq_fifo_stall = 0;
   3874 		}
   3875 		break;
   3876 	case WM_T_82571:
   3877 	case WM_T_82572:
   3878 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3879 	case WM_T_80003:
   3880 		sc->sc_pba = PBA_32K;
   3881 		break;
   3882 	case WM_T_82573:
   3883 		sc->sc_pba = PBA_12K;
   3884 		break;
   3885 	case WM_T_82574:
   3886 	case WM_T_82583:
   3887 		sc->sc_pba = PBA_20K;
   3888 		break;
   3889 	case WM_T_82576:
   3890 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3891 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3892 		break;
   3893 	case WM_T_82580:
   3894 	case WM_T_I350:
   3895 	case WM_T_I354:
   3896 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3897 		break;
   3898 	case WM_T_I210:
   3899 	case WM_T_I211:
   3900 		sc->sc_pba = PBA_34K;
   3901 		break;
   3902 	case WM_T_ICH8:
   3903 		/* Workaround for a bit corruption issue in FIFO memory */
   3904 		sc->sc_pba = PBA_8K;
   3905 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3906 		break;
   3907 	case WM_T_ICH9:
   3908 	case WM_T_ICH10:
   3909 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3910 		    PBA_14K : PBA_10K;
   3911 		break;
   3912 	case WM_T_PCH:
   3913 	case WM_T_PCH2:
   3914 	case WM_T_PCH_LPT:
   3915 	case WM_T_PCH_SPT:
   3916 		sc->sc_pba = PBA_26K;
   3917 		break;
   3918 	default:
   3919 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3920 		    PBA_40K : PBA_48K;
   3921 		break;
   3922 	}
   3923 	/*
   3924 	 * Only old or non-multiqueue devices have the PBA register
   3925 	 * XXX Need special handling for 82575.
   3926 	 */
   3927 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3928 	    || (sc->sc_type == WM_T_82575))
   3929 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3930 
   3931 	/* Prevent the PCI-E bus from sticking */
   3932 	if (sc->sc_flags & WM_F_PCIE) {
   3933 		int timeout = 800;
   3934 
   3935 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3936 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3937 
   3938 		while (timeout--) {
   3939 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3940 			    == 0)
   3941 				break;
   3942 			delay(100);
   3943 		}
   3944 	}
   3945 
   3946 	/* Set the completion timeout for interface */
   3947 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3948 	    || (sc->sc_type == WM_T_82580)
   3949 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3950 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3951 		wm_set_pcie_completion_timeout(sc);
   3952 
   3953 	/* Clear interrupt */
   3954 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3955 	if (sc->sc_nintrs > 1) {
   3956 		if (sc->sc_type != WM_T_82574) {
   3957 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3958 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3959 		} else {
   3960 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3961 		}
   3962 	}
   3963 
   3964 	/* Stop the transmit and receive processes. */
   3965 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3966 	sc->sc_rctl &= ~RCTL_EN;
   3967 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3968 	CSR_WRITE_FLUSH(sc);
   3969 
   3970 	/* XXX set_tbi_sbp_82543() */
   3971 
   3972 	delay(10*1000);
   3973 
   3974 	/* Must acquire the MDIO ownership before MAC reset */
   3975 	switch (sc->sc_type) {
   3976 	case WM_T_82573:
   3977 	case WM_T_82574:
   3978 	case WM_T_82583:
   3979 		error = wm_get_hw_semaphore_82573(sc);
   3980 		break;
   3981 	default:
   3982 		break;
   3983 	}
   3984 
   3985 	/*
   3986 	 * 82541 Errata 29? & 82547 Errata 28?
   3987 	 * See also the description about PHY_RST bit in CTRL register
   3988 	 * in 8254x_GBe_SDM.pdf.
   3989 	 */
   3990 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3991 		CSR_WRITE(sc, WMREG_CTRL,
   3992 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3993 		CSR_WRITE_FLUSH(sc);
   3994 		delay(5000);
   3995 	}
   3996 
   3997 	switch (sc->sc_type) {
   3998 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3999 	case WM_T_82541:
   4000 	case WM_T_82541_2:
   4001 	case WM_T_82547:
   4002 	case WM_T_82547_2:
   4003 		/*
   4004 		 * On some chipsets, a reset through a memory-mapped write
   4005 		 * cycle can cause the chip to reset before completing the
   4006 		 * write cycle.  This causes major headache that can be
   4007 		 * avoided by issuing the reset via indirect register writes
   4008 		 * through I/O space.
   4009 		 *
   4010 		 * So, if we successfully mapped the I/O BAR at attach time,
   4011 		 * use that.  Otherwise, try our luck with a memory-mapped
   4012 		 * reset.
   4013 		 */
   4014 		if (sc->sc_flags & WM_F_IOH_VALID)
   4015 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4016 		else
   4017 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4018 		break;
   4019 	case WM_T_82545_3:
   4020 	case WM_T_82546_3:
   4021 		/* Use the shadow control register on these chips. */
   4022 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4023 		break;
   4024 	case WM_T_80003:
   4025 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4026 		sc->phy.acquire(sc);
   4027 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4028 		sc->phy.release(sc);
   4029 		break;
   4030 	case WM_T_ICH8:
   4031 	case WM_T_ICH9:
   4032 	case WM_T_ICH10:
   4033 	case WM_T_PCH:
   4034 	case WM_T_PCH2:
   4035 	case WM_T_PCH_LPT:
   4036 	case WM_T_PCH_SPT:
   4037 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4038 		if (wm_phy_resetisblocked(sc) == false) {
   4039 			/*
   4040 			 * Gate automatic PHY configuration by hardware on
   4041 			 * non-managed 82579
   4042 			 */
   4043 			if ((sc->sc_type == WM_T_PCH2)
   4044 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4045 				== 0))
   4046 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4047 
   4048 			reg |= CTRL_PHY_RESET;
   4049 			phy_reset = 1;
   4050 		} else
   4051 			printf("XXX reset is blocked!!!\n");
   4052 		sc->phy.acquire(sc);
   4053 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4054 		/* Don't insert a completion barrier when reset */
   4055 		delay(20*1000);
   4056 		mutex_exit(sc->sc_ich_phymtx);
   4057 		break;
   4058 	case WM_T_82580:
   4059 	case WM_T_I350:
   4060 	case WM_T_I354:
   4061 	case WM_T_I210:
   4062 	case WM_T_I211:
   4063 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4064 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4065 			CSR_WRITE_FLUSH(sc);
   4066 		delay(5000);
   4067 		break;
   4068 	case WM_T_82542_2_0:
   4069 	case WM_T_82542_2_1:
   4070 	case WM_T_82543:
   4071 	case WM_T_82540:
   4072 	case WM_T_82545:
   4073 	case WM_T_82546:
   4074 	case WM_T_82571:
   4075 	case WM_T_82572:
   4076 	case WM_T_82573:
   4077 	case WM_T_82574:
   4078 	case WM_T_82575:
   4079 	case WM_T_82576:
   4080 	case WM_T_82583:
   4081 	default:
   4082 		/* Everything else can safely use the documented method. */
   4083 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4084 		break;
   4085 	}
   4086 
   4087 	/* Must release the MDIO ownership after MAC reset */
   4088 	switch (sc->sc_type) {
   4089 	case WM_T_82573:
   4090 	case WM_T_82574:
   4091 	case WM_T_82583:
   4092 		if (error == 0)
   4093 			wm_put_hw_semaphore_82573(sc);
   4094 		break;
   4095 	default:
   4096 		break;
   4097 	}
   4098 
   4099 	if (phy_reset != 0)
   4100 		wm_get_cfg_done(sc);
   4101 
   4102 	/* reload EEPROM */
   4103 	switch (sc->sc_type) {
   4104 	case WM_T_82542_2_0:
   4105 	case WM_T_82542_2_1:
   4106 	case WM_T_82543:
   4107 	case WM_T_82544:
   4108 		delay(10);
   4109 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4110 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4111 		CSR_WRITE_FLUSH(sc);
   4112 		delay(2000);
   4113 		break;
   4114 	case WM_T_82540:
   4115 	case WM_T_82545:
   4116 	case WM_T_82545_3:
   4117 	case WM_T_82546:
   4118 	case WM_T_82546_3:
   4119 		delay(5*1000);
   4120 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4121 		break;
   4122 	case WM_T_82541:
   4123 	case WM_T_82541_2:
   4124 	case WM_T_82547:
   4125 	case WM_T_82547_2:
   4126 		delay(20000);
   4127 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4128 		break;
   4129 	case WM_T_82571:
   4130 	case WM_T_82572:
   4131 	case WM_T_82573:
   4132 	case WM_T_82574:
   4133 	case WM_T_82583:
   4134 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4135 			delay(10);
   4136 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4137 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4138 			CSR_WRITE_FLUSH(sc);
   4139 		}
   4140 		/* check EECD_EE_AUTORD */
   4141 		wm_get_auto_rd_done(sc);
   4142 		/*
   4143 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4144 		 * is set.
   4145 		 */
   4146 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4147 		    || (sc->sc_type == WM_T_82583))
   4148 			delay(25*1000);
   4149 		break;
   4150 	case WM_T_82575:
   4151 	case WM_T_82576:
   4152 	case WM_T_82580:
   4153 	case WM_T_I350:
   4154 	case WM_T_I354:
   4155 	case WM_T_I210:
   4156 	case WM_T_I211:
   4157 	case WM_T_80003:
   4158 		/* check EECD_EE_AUTORD */
   4159 		wm_get_auto_rd_done(sc);
   4160 		break;
   4161 	case WM_T_ICH8:
   4162 	case WM_T_ICH9:
   4163 	case WM_T_ICH10:
   4164 	case WM_T_PCH:
   4165 	case WM_T_PCH2:
   4166 	case WM_T_PCH_LPT:
   4167 	case WM_T_PCH_SPT:
   4168 		break;
   4169 	default:
   4170 		panic("%s: unknown type\n", __func__);
   4171 	}
   4172 
   4173 	/* Check whether EEPROM is present or not */
   4174 	switch (sc->sc_type) {
   4175 	case WM_T_82575:
   4176 	case WM_T_82576:
   4177 	case WM_T_82580:
   4178 	case WM_T_I350:
   4179 	case WM_T_I354:
   4180 	case WM_T_ICH8:
   4181 	case WM_T_ICH9:
   4182 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4183 			/* Not found */
   4184 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4185 			if (sc->sc_type == WM_T_82575)
   4186 				wm_reset_init_script_82575(sc);
   4187 		}
   4188 		break;
   4189 	default:
   4190 		break;
   4191 	}
   4192 
   4193 	if ((sc->sc_type == WM_T_82580)
   4194 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4195 		/* clear global device reset status bit */
   4196 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4197 	}
   4198 
   4199 	/* Clear any pending interrupt events. */
   4200 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4201 	reg = CSR_READ(sc, WMREG_ICR);
   4202 	if (sc->sc_nintrs > 1) {
   4203 		if (sc->sc_type != WM_T_82574) {
   4204 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4205 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4206 		} else
   4207 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4208 	}
   4209 
   4210 	/* reload sc_ctrl */
   4211 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4212 
   4213 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4214 		wm_set_eee_i350(sc);
   4215 
   4216 	/* Clear the host wakeup bit after lcd reset */
   4217 	if (sc->sc_type >= WM_T_PCH) {
   4218 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4219 		    BM_PORT_GEN_CFG);
   4220 		reg &= ~BM_WUC_HOST_WU_BIT;
   4221 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4222 		    BM_PORT_GEN_CFG, reg);
   4223 	}
   4224 
   4225 	/*
   4226 	 * For PCH, this write will make sure that any noise will be detected
   4227 	 * as a CRC error and be dropped rather than show up as a bad packet
   4228 	 * to the DMA engine
   4229 	 */
   4230 	if (sc->sc_type == WM_T_PCH)
   4231 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4232 
   4233 	if (sc->sc_type >= WM_T_82544)
   4234 		CSR_WRITE(sc, WMREG_WUC, 0);
   4235 
   4236 	wm_reset_mdicnfg_82580(sc);
   4237 
   4238 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4239 		wm_pll_workaround_i210(sc);
   4240 }
   4241 
   4242 /*
   4243  * wm_add_rxbuf:
   4244  *
   4245  *	Add a receive buffer to the indiciated descriptor.
   4246  */
   4247 static int
   4248 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4249 {
   4250 	struct wm_softc *sc = rxq->rxq_sc;
   4251 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4252 	struct mbuf *m;
   4253 	int error;
   4254 
   4255 	KASSERT(mutex_owned(rxq->rxq_lock));
   4256 
   4257 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4258 	if (m == NULL)
   4259 		return ENOBUFS;
   4260 
   4261 	MCLGET(m, M_DONTWAIT);
   4262 	if ((m->m_flags & M_EXT) == 0) {
   4263 		m_freem(m);
   4264 		return ENOBUFS;
   4265 	}
   4266 
   4267 	if (rxs->rxs_mbuf != NULL)
   4268 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4269 
   4270 	rxs->rxs_mbuf = m;
   4271 
   4272 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4273 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4274 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4275 	if (error) {
   4276 		/* XXX XXX XXX */
   4277 		aprint_error_dev(sc->sc_dev,
   4278 		    "unable to load rx DMA map %d, error = %d\n",
   4279 		    idx, error);
   4280 		panic("wm_add_rxbuf");
   4281 	}
   4282 
   4283 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4284 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4285 
   4286 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4287 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4288 			wm_init_rxdesc(rxq, idx);
   4289 	} else
   4290 		wm_init_rxdesc(rxq, idx);
   4291 
   4292 	return 0;
   4293 }
   4294 
   4295 /*
   4296  * wm_rxdrain:
   4297  *
   4298  *	Drain the receive queue.
   4299  */
   4300 static void
   4301 wm_rxdrain(struct wm_rxqueue *rxq)
   4302 {
   4303 	struct wm_softc *sc = rxq->rxq_sc;
   4304 	struct wm_rxsoft *rxs;
   4305 	int i;
   4306 
   4307 	KASSERT(mutex_owned(rxq->rxq_lock));
   4308 
   4309 	for (i = 0; i < WM_NRXDESC; i++) {
   4310 		rxs = &rxq->rxq_soft[i];
   4311 		if (rxs->rxs_mbuf != NULL) {
   4312 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4313 			m_freem(rxs->rxs_mbuf);
   4314 			rxs->rxs_mbuf = NULL;
   4315 		}
   4316 	}
   4317 }
   4318 
   4319 
   4320 /*
   4321  * XXX copy from FreeBSD's sys/net/rss_config.c
   4322  */
   4323 /*
   4324  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4325  * effectiveness may be limited by algorithm choice and available entropy
   4326  * during the boot.
   4327  *
   4328  * XXXRW: And that we don't randomize it yet!
   4329  *
   4330  * This is the default Microsoft RSS specification key which is also
   4331  * the Chelsio T5 firmware default key.
   4332  */
   4333 #define RSS_KEYSIZE 40
   4334 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4335 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4336 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4337 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4338 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4339 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4340 };
   4341 
   4342 /*
   4343  * Caller must pass an array of size sizeof(rss_key).
   4344  *
   4345  * XXX
   4346  * As if_ixgbe may use this function, this function should not be
   4347  * if_wm specific function.
   4348  */
   4349 static void
   4350 wm_rss_getkey(uint8_t *key)
   4351 {
   4352 
   4353 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4354 }
   4355 
   4356 /*
   4357  * Setup registers for RSS.
   4358  *
   4359  * XXX not yet VMDq support
   4360  */
   4361 static void
   4362 wm_init_rss(struct wm_softc *sc)
   4363 {
   4364 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4365 	int i;
   4366 
   4367 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4368 
   4369 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4370 		int qid, reta_ent;
   4371 
   4372 		qid  = i % sc->sc_nqueues;
   4373 		switch(sc->sc_type) {
   4374 		case WM_T_82574:
   4375 			reta_ent = __SHIFTIN(qid,
   4376 			    RETA_ENT_QINDEX_MASK_82574);
   4377 			break;
   4378 		case WM_T_82575:
   4379 			reta_ent = __SHIFTIN(qid,
   4380 			    RETA_ENT_QINDEX1_MASK_82575);
   4381 			break;
   4382 		default:
   4383 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4384 			break;
   4385 		}
   4386 
   4387 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4388 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4389 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4390 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4391 	}
   4392 
   4393 	wm_rss_getkey((uint8_t *)rss_key);
   4394 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4395 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4396 
   4397 	if (sc->sc_type == WM_T_82574)
   4398 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4399 	else
   4400 		mrqc = MRQC_ENABLE_RSS_MQ;
   4401 
   4402 	/* XXXX
   4403 	 * The same as FreeBSD igb.
   4404 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4405 	 */
   4406 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4407 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4408 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4409 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4410 
   4411 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4412 }
   4413 
   4414 /*
   4415  * Adjust TX and RX queue numbers which the system actulally uses.
   4416  *
   4417  * The numbers are affected by below parameters.
   4418  *     - The nubmer of hardware queues
   4419  *     - The number of MSI-X vectors (= "nvectors" argument)
   4420  *     - ncpu
   4421  */
   4422 static void
   4423 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4424 {
   4425 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4426 
   4427 	if (nvectors < 2) {
   4428 		sc->sc_nqueues = 1;
   4429 		return;
   4430 	}
   4431 
   4432 	switch(sc->sc_type) {
   4433 	case WM_T_82572:
   4434 		hw_ntxqueues = 2;
   4435 		hw_nrxqueues = 2;
   4436 		break;
   4437 	case WM_T_82574:
   4438 		hw_ntxqueues = 2;
   4439 		hw_nrxqueues = 2;
   4440 		break;
   4441 	case WM_T_82575:
   4442 		hw_ntxqueues = 4;
   4443 		hw_nrxqueues = 4;
   4444 		break;
   4445 	case WM_T_82576:
   4446 		hw_ntxqueues = 16;
   4447 		hw_nrxqueues = 16;
   4448 		break;
   4449 	case WM_T_82580:
   4450 	case WM_T_I350:
   4451 	case WM_T_I354:
   4452 		hw_ntxqueues = 8;
   4453 		hw_nrxqueues = 8;
   4454 		break;
   4455 	case WM_T_I210:
   4456 		hw_ntxqueues = 4;
   4457 		hw_nrxqueues = 4;
   4458 		break;
   4459 	case WM_T_I211:
   4460 		hw_ntxqueues = 2;
   4461 		hw_nrxqueues = 2;
   4462 		break;
   4463 		/*
   4464 		 * As below ethernet controllers does not support MSI-X,
   4465 		 * this driver let them not use multiqueue.
   4466 		 *     - WM_T_80003
   4467 		 *     - WM_T_ICH8
   4468 		 *     - WM_T_ICH9
   4469 		 *     - WM_T_ICH10
   4470 		 *     - WM_T_PCH
   4471 		 *     - WM_T_PCH2
   4472 		 *     - WM_T_PCH_LPT
   4473 		 */
   4474 	default:
   4475 		hw_ntxqueues = 1;
   4476 		hw_nrxqueues = 1;
   4477 		break;
   4478 	}
   4479 
   4480 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4481 
   4482 	/*
   4483 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4484 	 * the number of queues used actually.
   4485 	 */
   4486 	if (nvectors < hw_nqueues + 1) {
   4487 		sc->sc_nqueues = nvectors - 1;
   4488 	} else {
   4489 		sc->sc_nqueues = hw_nqueues;
   4490 	}
   4491 
   4492 	/*
   4493 	 * As queues more then cpus cannot improve scaling, we limit
   4494 	 * the number of queues used actually.
   4495 	 */
   4496 	if (ncpu < sc->sc_nqueues)
   4497 		sc->sc_nqueues = ncpu;
   4498 }
   4499 
   4500 /*
   4501  * Both single interrupt MSI and INTx can use this function.
   4502  */
   4503 static int
   4504 wm_setup_legacy(struct wm_softc *sc)
   4505 {
   4506 	pci_chipset_tag_t pc = sc->sc_pc;
   4507 	const char *intrstr = NULL;
   4508 	char intrbuf[PCI_INTRSTR_LEN];
   4509 	int error;
   4510 
   4511 	error = wm_alloc_txrx_queues(sc);
   4512 	if (error) {
   4513 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4514 		    error);
   4515 		return ENOMEM;
   4516 	}
   4517 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4518 	    sizeof(intrbuf));
   4519 #ifdef WM_MPSAFE
   4520 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4521 #endif
   4522 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4523 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4524 	if (sc->sc_ihs[0] == NULL) {
   4525 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4526 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4527 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4528 		return ENOMEM;
   4529 	}
   4530 
   4531 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4532 	sc->sc_nintrs = 1;
   4533 	return 0;
   4534 }
   4535 
   4536 static int
   4537 wm_setup_msix(struct wm_softc *sc)
   4538 {
   4539 	void *vih;
   4540 	kcpuset_t *affinity;
   4541 	int qidx, error, intr_idx, txrx_established;
   4542 	pci_chipset_tag_t pc = sc->sc_pc;
   4543 	const char *intrstr = NULL;
   4544 	char intrbuf[PCI_INTRSTR_LEN];
   4545 	char intr_xname[INTRDEVNAMEBUF];
   4546 
   4547 	if (sc->sc_nqueues < ncpu) {
   4548 		/*
   4549 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4550 		 * interrupts start from CPU#1.
   4551 		 */
   4552 		sc->sc_affinity_offset = 1;
   4553 	} else {
   4554 		/*
   4555 		 * In this case, this device use all CPUs. So, we unify
   4556 		 * affinitied cpu_index to msix vector number for readability.
   4557 		 */
   4558 		sc->sc_affinity_offset = 0;
   4559 	}
   4560 
   4561 	error = wm_alloc_txrx_queues(sc);
   4562 	if (error) {
   4563 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4564 		    error);
   4565 		return ENOMEM;
   4566 	}
   4567 
   4568 	kcpuset_create(&affinity, false);
   4569 	intr_idx = 0;
   4570 
   4571 	/*
   4572 	 * TX and RX
   4573 	 */
   4574 	txrx_established = 0;
   4575 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4576 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4577 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4578 
   4579 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4580 		    sizeof(intrbuf));
   4581 #ifdef WM_MPSAFE
   4582 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4583 		    PCI_INTR_MPSAFE, true);
   4584 #endif
   4585 		memset(intr_xname, 0, sizeof(intr_xname));
   4586 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4587 		    device_xname(sc->sc_dev), qidx);
   4588 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4589 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4590 		if (vih == NULL) {
   4591 			aprint_error_dev(sc->sc_dev,
   4592 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4593 			    intrstr ? " at " : "",
   4594 			    intrstr ? intrstr : "");
   4595 
   4596 			goto fail;
   4597 		}
   4598 		kcpuset_zero(affinity);
   4599 		/* Round-robin affinity */
   4600 		kcpuset_set(affinity, affinity_to);
   4601 		error = interrupt_distribute(vih, affinity, NULL);
   4602 		if (error == 0) {
   4603 			aprint_normal_dev(sc->sc_dev,
   4604 			    "for TX and RX interrupting at %s affinity to %u\n",
   4605 			    intrstr, affinity_to);
   4606 		} else {
   4607 			aprint_normal_dev(sc->sc_dev,
   4608 			    "for TX and RX interrupting at %s\n", intrstr);
   4609 		}
   4610 		sc->sc_ihs[intr_idx] = vih;
   4611 		wmq->wmq_id= qidx;
   4612 		wmq->wmq_intr_idx = intr_idx;
   4613 
   4614 		txrx_established++;
   4615 		intr_idx++;
   4616 	}
   4617 
   4618 	/*
   4619 	 * LINK
   4620 	 */
   4621 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4622 	    sizeof(intrbuf));
   4623 #ifdef WM_MPSAFE
   4624 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4625 #endif
   4626 	memset(intr_xname, 0, sizeof(intr_xname));
   4627 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4628 	    device_xname(sc->sc_dev));
   4629 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4630 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4631 	if (vih == NULL) {
   4632 		aprint_error_dev(sc->sc_dev,
   4633 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4634 		    intrstr ? " at " : "",
   4635 		    intrstr ? intrstr : "");
   4636 
   4637 		goto fail;
   4638 	}
   4639 	/* keep default affinity to LINK interrupt */
   4640 	aprint_normal_dev(sc->sc_dev,
   4641 	    "for LINK interrupting at %s\n", intrstr);
   4642 	sc->sc_ihs[intr_idx] = vih;
   4643 	sc->sc_link_intr_idx = intr_idx;
   4644 
   4645 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4646 	kcpuset_destroy(affinity);
   4647 	return 0;
   4648 
   4649  fail:
   4650 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4651 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4652 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4653 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4654 	}
   4655 
   4656 	kcpuset_destroy(affinity);
   4657 	return ENOMEM;
   4658 }
   4659 
   4660 static void
   4661 wm_turnon(struct wm_softc *sc)
   4662 {
   4663 	int i;
   4664 
   4665 	KASSERT(WM_CORE_LOCKED(sc));
   4666 
   4667 	for(i = 0; i < sc->sc_nqueues; i++) {
   4668 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4669 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4670 
   4671 		mutex_enter(txq->txq_lock);
   4672 		txq->txq_stopping = false;
   4673 		mutex_exit(txq->txq_lock);
   4674 
   4675 		mutex_enter(rxq->rxq_lock);
   4676 		rxq->rxq_stopping = false;
   4677 		mutex_exit(rxq->rxq_lock);
   4678 	}
   4679 
   4680 	sc->sc_core_stopping = false;
   4681 }
   4682 
   4683 static void
   4684 wm_turnoff(struct wm_softc *sc)
   4685 {
   4686 	int i;
   4687 
   4688 	KASSERT(WM_CORE_LOCKED(sc));
   4689 
   4690 	sc->sc_core_stopping = true;
   4691 
   4692 	for(i = 0; i < sc->sc_nqueues; i++) {
   4693 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4694 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4695 
   4696 		mutex_enter(rxq->rxq_lock);
   4697 		rxq->rxq_stopping = true;
   4698 		mutex_exit(rxq->rxq_lock);
   4699 
   4700 		mutex_enter(txq->txq_lock);
   4701 		txq->txq_stopping = true;
   4702 		mutex_exit(txq->txq_lock);
   4703 	}
   4704 }
   4705 
   4706 /*
   4707  * wm_init:		[ifnet interface function]
   4708  *
   4709  *	Initialize the interface.
   4710  */
   4711 static int
   4712 wm_init(struct ifnet *ifp)
   4713 {
   4714 	struct wm_softc *sc = ifp->if_softc;
   4715 	int ret;
   4716 
   4717 	WM_CORE_LOCK(sc);
   4718 	ret = wm_init_locked(ifp);
   4719 	WM_CORE_UNLOCK(sc);
   4720 
   4721 	return ret;
   4722 }
   4723 
   4724 static int
   4725 wm_init_locked(struct ifnet *ifp)
   4726 {
   4727 	struct wm_softc *sc = ifp->if_softc;
   4728 	int i, j, trynum, error = 0;
   4729 	uint32_t reg;
   4730 
   4731 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4732 		device_xname(sc->sc_dev), __func__));
   4733 	KASSERT(WM_CORE_LOCKED(sc));
   4734 
   4735 	/*
   4736 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4737 	 * There is a small but measurable benefit to avoiding the adjusment
   4738 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4739 	 * on such platforms.  One possibility is that the DMA itself is
   4740 	 * slightly more efficient if the front of the entire packet (instead
   4741 	 * of the front of the headers) is aligned.
   4742 	 *
   4743 	 * Note we must always set align_tweak to 0 if we are using
   4744 	 * jumbo frames.
   4745 	 */
   4746 #ifdef __NO_STRICT_ALIGNMENT
   4747 	sc->sc_align_tweak = 0;
   4748 #else
   4749 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4750 		sc->sc_align_tweak = 0;
   4751 	else
   4752 		sc->sc_align_tweak = 2;
   4753 #endif /* __NO_STRICT_ALIGNMENT */
   4754 
   4755 	/* Cancel any pending I/O. */
   4756 	wm_stop_locked(ifp, 0);
   4757 
   4758 	/* update statistics before reset */
   4759 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4760 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4761 
   4762 	/* PCH_SPT hardware workaround */
   4763 	if (sc->sc_type == WM_T_PCH_SPT)
   4764 		wm_flush_desc_rings(sc);
   4765 
   4766 	/* Reset the chip to a known state. */
   4767 	wm_reset(sc);
   4768 
   4769 	/* AMT based hardware can now take control from firmware */
   4770 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4771 		wm_get_hw_control(sc);
   4772 
   4773 	/* Init hardware bits */
   4774 	wm_initialize_hardware_bits(sc);
   4775 
   4776 	/* Reset the PHY. */
   4777 	if (sc->sc_flags & WM_F_HAS_MII)
   4778 		wm_gmii_reset(sc);
   4779 
   4780 	/* Calculate (E)ITR value */
   4781 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4782 		sc->sc_itr = 450;	/* For EITR */
   4783 	} else if (sc->sc_type >= WM_T_82543) {
   4784 		/*
   4785 		 * Set up the interrupt throttling register (units of 256ns)
   4786 		 * Note that a footnote in Intel's documentation says this
   4787 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4788 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4789 		 * that that is also true for the 1024ns units of the other
   4790 		 * interrupt-related timer registers -- so, really, we ought
   4791 		 * to divide this value by 4 when the link speed is low.
   4792 		 *
   4793 		 * XXX implement this division at link speed change!
   4794 		 */
   4795 
   4796 		/*
   4797 		 * For N interrupts/sec, set this value to:
   4798 		 * 1000000000 / (N * 256).  Note that we set the
   4799 		 * absolute and packet timer values to this value
   4800 		 * divided by 4 to get "simple timer" behavior.
   4801 		 */
   4802 
   4803 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4804 	}
   4805 
   4806 	error = wm_init_txrx_queues(sc);
   4807 	if (error)
   4808 		goto out;
   4809 
   4810 	/*
   4811 	 * Clear out the VLAN table -- we don't use it (yet).
   4812 	 */
   4813 	CSR_WRITE(sc, WMREG_VET, 0);
   4814 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4815 		trynum = 10; /* Due to hw errata */
   4816 	else
   4817 		trynum = 1;
   4818 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4819 		for (j = 0; j < trynum; j++)
   4820 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4821 
   4822 	/*
   4823 	 * Set up flow-control parameters.
   4824 	 *
   4825 	 * XXX Values could probably stand some tuning.
   4826 	 */
   4827 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4828 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4829 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4830 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4831 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4832 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4833 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4834 	}
   4835 
   4836 	sc->sc_fcrtl = FCRTL_DFLT;
   4837 	if (sc->sc_type < WM_T_82543) {
   4838 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4839 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4840 	} else {
   4841 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4842 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4843 	}
   4844 
   4845 	if (sc->sc_type == WM_T_80003)
   4846 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4847 	else
   4848 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4849 
   4850 	/* Writes the control register. */
   4851 	wm_set_vlan(sc);
   4852 
   4853 	if (sc->sc_flags & WM_F_HAS_MII) {
   4854 		int val;
   4855 
   4856 		switch (sc->sc_type) {
   4857 		case WM_T_80003:
   4858 		case WM_T_ICH8:
   4859 		case WM_T_ICH9:
   4860 		case WM_T_ICH10:
   4861 		case WM_T_PCH:
   4862 		case WM_T_PCH2:
   4863 		case WM_T_PCH_LPT:
   4864 		case WM_T_PCH_SPT:
   4865 			/*
   4866 			 * Set the mac to wait the maximum time between each
   4867 			 * iteration and increase the max iterations when
   4868 			 * polling the phy; this fixes erroneous timeouts at
   4869 			 * 10Mbps.
   4870 			 */
   4871 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4872 			    0xFFFF);
   4873 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4874 			val |= 0x3F;
   4875 			wm_kmrn_writereg(sc,
   4876 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4877 			break;
   4878 		default:
   4879 			break;
   4880 		}
   4881 
   4882 		if (sc->sc_type == WM_T_80003) {
   4883 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4884 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4885 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4886 
   4887 			/* Bypass RX and TX FIFO's */
   4888 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4889 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4890 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4891 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4892 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4893 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4894 		}
   4895 	}
   4896 #if 0
   4897 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4898 #endif
   4899 
   4900 	/* Set up checksum offload parameters. */
   4901 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4902 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4903 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4904 		reg |= RXCSUM_IPOFL;
   4905 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4906 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4907 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4908 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4909 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4910 
   4911 	/* Set up MSI-X */
   4912 	if (sc->sc_nintrs > 1) {
   4913 		uint32_t ivar;
   4914 		struct wm_queue *wmq;
   4915 		int qid, qintr_idx;
   4916 
   4917 		if (sc->sc_type == WM_T_82575) {
   4918 			/* Interrupt control */
   4919 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4920 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4921 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4922 
   4923 			/* TX and RX */
   4924 			for (i = 0; i < sc->sc_nqueues; i++) {
   4925 				wmq = &sc->sc_queue[i];
   4926 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   4927 				    EITR_TX_QUEUE(wmq->wmq_id)
   4928 				    | EITR_RX_QUEUE(wmq->wmq_id));
   4929 			}
   4930 			/* Link status */
   4931 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4932 			    EITR_OTHER);
   4933 		} else if (sc->sc_type == WM_T_82574) {
   4934 			/* Interrupt control */
   4935 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4936 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4937 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4938 
   4939 			ivar = 0;
   4940 			/* TX and RX */
   4941 			for (i = 0; i < sc->sc_nqueues; i++) {
   4942 				wmq = &sc->sc_queue[i];
   4943 				qid = wmq->wmq_id;
   4944 				qintr_idx = wmq->wmq_intr_idx;
   4945 
   4946 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4947 				    IVAR_TX_MASK_Q_82574(qid));
   4948 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4949 				    IVAR_RX_MASK_Q_82574(qid));
   4950 			}
   4951 			/* Link status */
   4952 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4953 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   4954 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4955 		} else {
   4956 			/* Interrupt control */
   4957 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   4958 			    | GPIE_EIAME | GPIE_PBA);
   4959 
   4960 			switch (sc->sc_type) {
   4961 			case WM_T_82580:
   4962 			case WM_T_I350:
   4963 			case WM_T_I354:
   4964 			case WM_T_I210:
   4965 			case WM_T_I211:
   4966 				/* TX and RX */
   4967 				for (i = 0; i < sc->sc_nqueues; i++) {
   4968 					wmq = &sc->sc_queue[i];
   4969 					qid = wmq->wmq_id;
   4970 					qintr_idx = wmq->wmq_intr_idx;
   4971 
   4972 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4973 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4974 					ivar |= __SHIFTIN((qintr_idx
   4975 						| IVAR_VALID),
   4976 					    IVAR_TX_MASK_Q(qid));
   4977 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4978 					ivar |= __SHIFTIN((qintr_idx
   4979 						| IVAR_VALID),
   4980 					    IVAR_RX_MASK_Q(qid));
   4981 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4982 				}
   4983 				break;
   4984 			case WM_T_82576:
   4985 				/* TX and RX */
   4986 				for (i = 0; i < sc->sc_nqueues; i++) {
   4987 					wmq = &sc->sc_queue[i];
   4988 					qid = wmq->wmq_id;
   4989 					qintr_idx = wmq->wmq_intr_idx;
   4990 
   4991 					ivar = CSR_READ(sc,
   4992 					    WMREG_IVAR_Q_82576(qid));
   4993 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   4994 					ivar |= __SHIFTIN((qintr_idx
   4995 						| IVAR_VALID),
   4996 					    IVAR_TX_MASK_Q_82576(qid));
   4997 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   4998 					ivar |= __SHIFTIN((qintr_idx
   4999 						| IVAR_VALID),
   5000 					    IVAR_RX_MASK_Q_82576(qid));
   5001 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5002 					    ivar);
   5003 				}
   5004 				break;
   5005 			default:
   5006 				break;
   5007 			}
   5008 
   5009 			/* Link status */
   5010 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5011 			    IVAR_MISC_OTHER);
   5012 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5013 		}
   5014 
   5015 		if (sc->sc_nqueues > 1) {
   5016 			wm_init_rss(sc);
   5017 
   5018 			/*
   5019 			** NOTE: Receive Full-Packet Checksum Offload
   5020 			** is mutually exclusive with Multiqueue. However
   5021 			** this is not the same as TCP/IP checksums which
   5022 			** still work.
   5023 			*/
   5024 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5025 			reg |= RXCSUM_PCSD;
   5026 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5027 		}
   5028 	}
   5029 
   5030 	/* Set up the interrupt registers. */
   5031 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5032 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5033 	    ICR_RXO | ICR_RXT0;
   5034 	if (sc->sc_nintrs > 1) {
   5035 		uint32_t mask;
   5036 		struct wm_queue *wmq;
   5037 
   5038 		switch (sc->sc_type) {
   5039 		case WM_T_82574:
   5040 			CSR_WRITE(sc, WMREG_EIAC_82574,
   5041 			    WMREG_EIAC_82574_MSIX_MASK);
   5042 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   5043 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5044 			break;
   5045 		default:
   5046 			if (sc->sc_type == WM_T_82575) {
   5047 				mask = 0;
   5048 				for (i = 0; i < sc->sc_nqueues; i++) {
   5049 					wmq = &sc->sc_queue[i];
   5050 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5051 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5052 				}
   5053 				mask |= EITR_OTHER;
   5054 			} else {
   5055 				mask = 0;
   5056 				for (i = 0; i < sc->sc_nqueues; i++) {
   5057 					wmq = &sc->sc_queue[i];
   5058 					mask |= 1 << wmq->wmq_intr_idx;
   5059 				}
   5060 				mask |= 1 << sc->sc_link_intr_idx;
   5061 			}
   5062 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5063 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5064 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5065 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5066 			break;
   5067 		}
   5068 	} else
   5069 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5070 
   5071 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5072 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5073 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5074 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   5075 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5076 		reg |= KABGTXD_BGSQLBIAS;
   5077 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5078 	}
   5079 
   5080 	/* Set up the inter-packet gap. */
   5081 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5082 
   5083 	if (sc->sc_type >= WM_T_82543) {
   5084 		/*
   5085 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   5086 		 * the multi queue function with MSI-X.
   5087 		 */
   5088 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5089 			int qidx;
   5090 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5091 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   5092 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
   5093 				    sc->sc_itr);
   5094 			}
   5095 			/*
   5096 			 * Link interrupts occur much less than TX
   5097 			 * interrupts and RX interrupts. So, we don't
   5098 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5099 			 * FreeBSD's if_igb.
   5100 			 */
   5101 		} else
   5102 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   5103 	}
   5104 
   5105 	/* Set the VLAN ethernetype. */
   5106 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5107 
   5108 	/*
   5109 	 * Set up the transmit control register; we start out with
   5110 	 * a collision distance suitable for FDX, but update it whe
   5111 	 * we resolve the media type.
   5112 	 */
   5113 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5114 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5115 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5116 	if (sc->sc_type >= WM_T_82571)
   5117 		sc->sc_tctl |= TCTL_MULR;
   5118 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5119 
   5120 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5121 		/* Write TDT after TCTL.EN is set. See the document. */
   5122 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5123 	}
   5124 
   5125 	if (sc->sc_type == WM_T_80003) {
   5126 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5127 		reg &= ~TCTL_EXT_GCEX_MASK;
   5128 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5129 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5130 	}
   5131 
   5132 	/* Set the media. */
   5133 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5134 		goto out;
   5135 
   5136 	/* Configure for OS presence */
   5137 	wm_init_manageability(sc);
   5138 
   5139 	/*
   5140 	 * Set up the receive control register; we actually program
   5141 	 * the register when we set the receive filter.  Use multicast
   5142 	 * address offset type 0.
   5143 	 *
   5144 	 * Only the i82544 has the ability to strip the incoming
   5145 	 * CRC, so we don't enable that feature.
   5146 	 */
   5147 	sc->sc_mchash_type = 0;
   5148 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5149 	    | RCTL_MO(sc->sc_mchash_type);
   5150 
   5151 	/*
   5152 	 * The I350 has a bug where it always strips the CRC whether
   5153 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5154 	 */
   5155 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5156 	    || (sc->sc_type == WM_T_I210))
   5157 		sc->sc_rctl |= RCTL_SECRC;
   5158 
   5159 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5160 	    && (ifp->if_mtu > ETHERMTU)) {
   5161 		sc->sc_rctl |= RCTL_LPE;
   5162 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5163 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5164 	}
   5165 
   5166 	if (MCLBYTES == 2048) {
   5167 		sc->sc_rctl |= RCTL_2k;
   5168 	} else {
   5169 		if (sc->sc_type >= WM_T_82543) {
   5170 			switch (MCLBYTES) {
   5171 			case 4096:
   5172 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5173 				break;
   5174 			case 8192:
   5175 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5176 				break;
   5177 			case 16384:
   5178 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5179 				break;
   5180 			default:
   5181 				panic("wm_init: MCLBYTES %d unsupported",
   5182 				    MCLBYTES);
   5183 				break;
   5184 			}
   5185 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5186 	}
   5187 
   5188 	/* Set the receive filter. */
   5189 	wm_set_filter(sc);
   5190 
   5191 	/* Enable ECC */
   5192 	switch (sc->sc_type) {
   5193 	case WM_T_82571:
   5194 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5195 		reg |= PBA_ECC_CORR_EN;
   5196 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5197 		break;
   5198 	case WM_T_PCH_LPT:
   5199 	case WM_T_PCH_SPT:
   5200 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5201 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5202 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5203 
   5204 		sc->sc_ctrl |= CTRL_MEHE;
   5205 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5206 		break;
   5207 	default:
   5208 		break;
   5209 	}
   5210 
   5211 	/* On 575 and later set RDT only if RX enabled */
   5212 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5213 		int qidx;
   5214 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5215 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5216 			for (i = 0; i < WM_NRXDESC; i++) {
   5217 				mutex_enter(rxq->rxq_lock);
   5218 				wm_init_rxdesc(rxq, i);
   5219 				mutex_exit(rxq->rxq_lock);
   5220 
   5221 			}
   5222 		}
   5223 	}
   5224 
   5225 	wm_turnon(sc);
   5226 
   5227 	/* Start the one second link check clock. */
   5228 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5229 
   5230 	/* ...all done! */
   5231 	ifp->if_flags |= IFF_RUNNING;
   5232 	ifp->if_flags &= ~IFF_OACTIVE;
   5233 
   5234  out:
   5235 	sc->sc_if_flags = ifp->if_flags;
   5236 	if (error)
   5237 		log(LOG_ERR, "%s: interface not running\n",
   5238 		    device_xname(sc->sc_dev));
   5239 	return error;
   5240 }
   5241 
   5242 /*
   5243  * wm_stop:		[ifnet interface function]
   5244  *
   5245  *	Stop transmission on the interface.
   5246  */
   5247 static void
   5248 wm_stop(struct ifnet *ifp, int disable)
   5249 {
   5250 	struct wm_softc *sc = ifp->if_softc;
   5251 
   5252 	WM_CORE_LOCK(sc);
   5253 	wm_stop_locked(ifp, disable);
   5254 	WM_CORE_UNLOCK(sc);
   5255 }
   5256 
   5257 static void
   5258 wm_stop_locked(struct ifnet *ifp, int disable)
   5259 {
   5260 	struct wm_softc *sc = ifp->if_softc;
   5261 	struct wm_txsoft *txs;
   5262 	int i, qidx;
   5263 
   5264 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5265 		device_xname(sc->sc_dev), __func__));
   5266 	KASSERT(WM_CORE_LOCKED(sc));
   5267 
   5268 	wm_turnoff(sc);
   5269 
   5270 	/* Stop the one second clock. */
   5271 	callout_stop(&sc->sc_tick_ch);
   5272 
   5273 	/* Stop the 82547 Tx FIFO stall check timer. */
   5274 	if (sc->sc_type == WM_T_82547)
   5275 		callout_stop(&sc->sc_txfifo_ch);
   5276 
   5277 	if (sc->sc_flags & WM_F_HAS_MII) {
   5278 		/* Down the MII. */
   5279 		mii_down(&sc->sc_mii);
   5280 	} else {
   5281 #if 0
   5282 		/* Should we clear PHY's status properly? */
   5283 		wm_reset(sc);
   5284 #endif
   5285 	}
   5286 
   5287 	/* Stop the transmit and receive processes. */
   5288 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5289 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5290 	sc->sc_rctl &= ~RCTL_EN;
   5291 
   5292 	/*
   5293 	 * Clear the interrupt mask to ensure the device cannot assert its
   5294 	 * interrupt line.
   5295 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5296 	 * service any currently pending or shared interrupt.
   5297 	 */
   5298 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5299 	sc->sc_icr = 0;
   5300 	if (sc->sc_nintrs > 1) {
   5301 		if (sc->sc_type != WM_T_82574) {
   5302 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5303 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5304 		} else
   5305 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5306 	}
   5307 
   5308 	/* Release any queued transmit buffers. */
   5309 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5310 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5311 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5312 		mutex_enter(txq->txq_lock);
   5313 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5314 			txs = &txq->txq_soft[i];
   5315 			if (txs->txs_mbuf != NULL) {
   5316 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5317 				m_freem(txs->txs_mbuf);
   5318 				txs->txs_mbuf = NULL;
   5319 			}
   5320 		}
   5321 		mutex_exit(txq->txq_lock);
   5322 	}
   5323 
   5324 	/* Mark the interface as down and cancel the watchdog timer. */
   5325 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5326 	ifp->if_timer = 0;
   5327 
   5328 	if (disable) {
   5329 		for (i = 0; i < sc->sc_nqueues; i++) {
   5330 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5331 			mutex_enter(rxq->rxq_lock);
   5332 			wm_rxdrain(rxq);
   5333 			mutex_exit(rxq->rxq_lock);
   5334 		}
   5335 	}
   5336 
   5337 #if 0 /* notyet */
   5338 	if (sc->sc_type >= WM_T_82544)
   5339 		CSR_WRITE(sc, WMREG_WUC, 0);
   5340 #endif
   5341 }
   5342 
   5343 static void
   5344 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5345 {
   5346 	struct mbuf *m;
   5347 	int i;
   5348 
   5349 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5350 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5351 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5352 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5353 		    m->m_data, m->m_len, m->m_flags);
   5354 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5355 	    i, i == 1 ? "" : "s");
   5356 }
   5357 
   5358 /*
   5359  * wm_82547_txfifo_stall:
   5360  *
   5361  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5362  *	reset the FIFO pointers, and restart packet transmission.
   5363  */
   5364 static void
   5365 wm_82547_txfifo_stall(void *arg)
   5366 {
   5367 	struct wm_softc *sc = arg;
   5368 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5369 
   5370 	mutex_enter(txq->txq_lock);
   5371 
   5372 	if (txq->txq_stopping)
   5373 		goto out;
   5374 
   5375 	if (txq->txq_fifo_stall) {
   5376 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5377 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5378 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5379 			/*
   5380 			 * Packets have drained.  Stop transmitter, reset
   5381 			 * FIFO pointers, restart transmitter, and kick
   5382 			 * the packet queue.
   5383 			 */
   5384 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5385 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5386 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5387 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5388 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5389 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5390 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5391 			CSR_WRITE_FLUSH(sc);
   5392 
   5393 			txq->txq_fifo_head = 0;
   5394 			txq->txq_fifo_stall = 0;
   5395 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5396 		} else {
   5397 			/*
   5398 			 * Still waiting for packets to drain; try again in
   5399 			 * another tick.
   5400 			 */
   5401 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5402 		}
   5403 	}
   5404 
   5405 out:
   5406 	mutex_exit(txq->txq_lock);
   5407 }
   5408 
   5409 /*
   5410  * wm_82547_txfifo_bugchk:
   5411  *
   5412  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5413  *	prevent enqueueing a packet that would wrap around the end
   5414  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5415  *
   5416  *	We do this by checking the amount of space before the end
   5417  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5418  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5419  *	the internal FIFO pointers to the beginning, and restart
   5420  *	transmission on the interface.
   5421  */
   5422 #define	WM_FIFO_HDR		0x10
   5423 #define	WM_82547_PAD_LEN	0x3e0
   5424 static int
   5425 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5426 {
   5427 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5428 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5429 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5430 
   5431 	/* Just return if already stalled. */
   5432 	if (txq->txq_fifo_stall)
   5433 		return 1;
   5434 
   5435 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5436 		/* Stall only occurs in half-duplex mode. */
   5437 		goto send_packet;
   5438 	}
   5439 
   5440 	if (len >= WM_82547_PAD_LEN + space) {
   5441 		txq->txq_fifo_stall = 1;
   5442 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5443 		return 1;
   5444 	}
   5445 
   5446  send_packet:
   5447 	txq->txq_fifo_head += len;
   5448 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5449 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5450 
   5451 	return 0;
   5452 }
   5453 
   5454 static int
   5455 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5456 {
   5457 	int error;
   5458 
   5459 	/*
   5460 	 * Allocate the control data structures, and create and load the
   5461 	 * DMA map for it.
   5462 	 *
   5463 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5464 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5465 	 * both sets within the same 4G segment.
   5466 	 */
   5467 	if (sc->sc_type < WM_T_82544)
   5468 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5469 	else
   5470 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5471 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5472 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5473 	else
   5474 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5475 
   5476 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5477 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5478 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5479 		aprint_error_dev(sc->sc_dev,
   5480 		    "unable to allocate TX control data, error = %d\n",
   5481 		    error);
   5482 		goto fail_0;
   5483 	}
   5484 
   5485 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5486 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5487 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5488 		aprint_error_dev(sc->sc_dev,
   5489 		    "unable to map TX control data, error = %d\n", error);
   5490 		goto fail_1;
   5491 	}
   5492 
   5493 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5494 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5495 		aprint_error_dev(sc->sc_dev,
   5496 		    "unable to create TX control data DMA map, error = %d\n",
   5497 		    error);
   5498 		goto fail_2;
   5499 	}
   5500 
   5501 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5502 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5503 		aprint_error_dev(sc->sc_dev,
   5504 		    "unable to load TX control data DMA map, error = %d\n",
   5505 		    error);
   5506 		goto fail_3;
   5507 	}
   5508 
   5509 	return 0;
   5510 
   5511  fail_3:
   5512 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5513  fail_2:
   5514 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5515 	    WM_TXDESCS_SIZE(txq));
   5516  fail_1:
   5517 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5518  fail_0:
   5519 	return error;
   5520 }
   5521 
   5522 static void
   5523 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5524 {
   5525 
   5526 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5527 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5528 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5529 	    WM_TXDESCS_SIZE(txq));
   5530 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5531 }
   5532 
   5533 static int
   5534 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5535 {
   5536 	int error;
   5537 
   5538 	/*
   5539 	 * Allocate the control data structures, and create and load the
   5540 	 * DMA map for it.
   5541 	 *
   5542 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5543 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5544 	 * both sets within the same 4G segment.
   5545 	 */
   5546 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5547 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
   5548 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5549 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5550 		aprint_error_dev(sc->sc_dev,
   5551 		    "unable to allocate RX control data, error = %d\n",
   5552 		    error);
   5553 		goto fail_0;
   5554 	}
   5555 
   5556 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5557 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5558 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5559 		aprint_error_dev(sc->sc_dev,
   5560 		    "unable to map RX control data, error = %d\n", error);
   5561 		goto fail_1;
   5562 	}
   5563 
   5564 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5565 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5566 		aprint_error_dev(sc->sc_dev,
   5567 		    "unable to create RX control data DMA map, error = %d\n",
   5568 		    error);
   5569 		goto fail_2;
   5570 	}
   5571 
   5572 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5573 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5574 		aprint_error_dev(sc->sc_dev,
   5575 		    "unable to load RX control data DMA map, error = %d\n",
   5576 		    error);
   5577 		goto fail_3;
   5578 	}
   5579 
   5580 	return 0;
   5581 
   5582  fail_3:
   5583 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5584  fail_2:
   5585 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5586 	    rxq->rxq_desc_size);
   5587  fail_1:
   5588 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5589  fail_0:
   5590 	return error;
   5591 }
   5592 
   5593 static void
   5594 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5595 {
   5596 
   5597 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5598 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5599 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5600 	    rxq->rxq_desc_size);
   5601 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5602 }
   5603 
   5604 
   5605 static int
   5606 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5607 {
   5608 	int i, error;
   5609 
   5610 	/* Create the transmit buffer DMA maps. */
   5611 	WM_TXQUEUELEN(txq) =
   5612 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5613 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5614 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5615 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5616 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5617 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5618 			aprint_error_dev(sc->sc_dev,
   5619 			    "unable to create Tx DMA map %d, error = %d\n",
   5620 			    i, error);
   5621 			goto fail;
   5622 		}
   5623 	}
   5624 
   5625 	return 0;
   5626 
   5627  fail:
   5628 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5629 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5630 			bus_dmamap_destroy(sc->sc_dmat,
   5631 			    txq->txq_soft[i].txs_dmamap);
   5632 	}
   5633 	return error;
   5634 }
   5635 
   5636 static void
   5637 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5638 {
   5639 	int i;
   5640 
   5641 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5642 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5643 			bus_dmamap_destroy(sc->sc_dmat,
   5644 			    txq->txq_soft[i].txs_dmamap);
   5645 	}
   5646 }
   5647 
   5648 static int
   5649 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5650 {
   5651 	int i, error;
   5652 
   5653 	/* Create the receive buffer DMA maps. */
   5654 	for (i = 0; i < WM_NRXDESC; i++) {
   5655 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5656 			    MCLBYTES, 0, 0,
   5657 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5658 			aprint_error_dev(sc->sc_dev,
   5659 			    "unable to create Rx DMA map %d error = %d\n",
   5660 			    i, error);
   5661 			goto fail;
   5662 		}
   5663 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5664 	}
   5665 
   5666 	return 0;
   5667 
   5668  fail:
   5669 	for (i = 0; i < WM_NRXDESC; i++) {
   5670 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5671 			bus_dmamap_destroy(sc->sc_dmat,
   5672 			    rxq->rxq_soft[i].rxs_dmamap);
   5673 	}
   5674 	return error;
   5675 }
   5676 
   5677 static void
   5678 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5679 {
   5680 	int i;
   5681 
   5682 	for (i = 0; i < WM_NRXDESC; i++) {
   5683 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5684 			bus_dmamap_destroy(sc->sc_dmat,
   5685 			    rxq->rxq_soft[i].rxs_dmamap);
   5686 	}
   5687 }
   5688 
   5689 /*
   5690  * wm_alloc_quques:
   5691  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5692  */
   5693 static int
   5694 wm_alloc_txrx_queues(struct wm_softc *sc)
   5695 {
   5696 	int i, error, tx_done, rx_done;
   5697 
   5698 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5699 	    KM_SLEEP);
   5700 	if (sc->sc_queue == NULL) {
   5701 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5702 		error = ENOMEM;
   5703 		goto fail_0;
   5704 	}
   5705 
   5706 	/*
   5707 	 * For transmission
   5708 	 */
   5709 	error = 0;
   5710 	tx_done = 0;
   5711 	for (i = 0; i < sc->sc_nqueues; i++) {
   5712 #ifdef WM_EVENT_COUNTERS
   5713 		int j;
   5714 		const char *xname;
   5715 #endif
   5716 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5717 		txq->txq_sc = sc;
   5718 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5719 
   5720 		error = wm_alloc_tx_descs(sc, txq);
   5721 		if (error)
   5722 			break;
   5723 		error = wm_alloc_tx_buffer(sc, txq);
   5724 		if (error) {
   5725 			wm_free_tx_descs(sc, txq);
   5726 			break;
   5727 		}
   5728 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5729 		if (txq->txq_interq == NULL) {
   5730 			wm_free_tx_descs(sc, txq);
   5731 			wm_free_tx_buffer(sc, txq);
   5732 			error = ENOMEM;
   5733 			break;
   5734 		}
   5735 
   5736 #ifdef WM_EVENT_COUNTERS
   5737 		xname = device_xname(sc->sc_dev);
   5738 
   5739 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5740 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5741 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5742 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5743 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5744 
   5745 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5746 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5747 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5748 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5749 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5750 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5751 
   5752 		for (j = 0; j < WM_NTXSEGS; j++) {
   5753 			snprintf(txq->txq_txseg_evcnt_names[j],
   5754 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   5755 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   5756 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   5757 		}
   5758 
   5759 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   5760 
   5761 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   5762 #endif /* WM_EVENT_COUNTERS */
   5763 
   5764 		tx_done++;
   5765 	}
   5766 	if (error)
   5767 		goto fail_1;
   5768 
   5769 	/*
   5770 	 * For recieve
   5771 	 */
   5772 	error = 0;
   5773 	rx_done = 0;
   5774 	for (i = 0; i < sc->sc_nqueues; i++) {
   5775 #ifdef WM_EVENT_COUNTERS
   5776 		const char *xname;
   5777 #endif
   5778 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5779 		rxq->rxq_sc = sc;
   5780 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5781 
   5782 		error = wm_alloc_rx_descs(sc, rxq);
   5783 		if (error)
   5784 			break;
   5785 
   5786 		error = wm_alloc_rx_buffer(sc, rxq);
   5787 		if (error) {
   5788 			wm_free_rx_descs(sc, rxq);
   5789 			break;
   5790 		}
   5791 
   5792 #ifdef WM_EVENT_COUNTERS
   5793 		xname = device_xname(sc->sc_dev);
   5794 
   5795 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   5796 
   5797 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   5798 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   5799 #endif /* WM_EVENT_COUNTERS */
   5800 
   5801 		rx_done++;
   5802 	}
   5803 	if (error)
   5804 		goto fail_2;
   5805 
   5806 	return 0;
   5807 
   5808  fail_2:
   5809 	for (i = 0; i < rx_done; i++) {
   5810 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5811 		wm_free_rx_buffer(sc, rxq);
   5812 		wm_free_rx_descs(sc, rxq);
   5813 		if (rxq->rxq_lock)
   5814 			mutex_obj_free(rxq->rxq_lock);
   5815 	}
   5816  fail_1:
   5817 	for (i = 0; i < tx_done; i++) {
   5818 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5819 		pcq_destroy(txq->txq_interq);
   5820 		wm_free_tx_buffer(sc, txq);
   5821 		wm_free_tx_descs(sc, txq);
   5822 		if (txq->txq_lock)
   5823 			mutex_obj_free(txq->txq_lock);
   5824 	}
   5825 
   5826 	kmem_free(sc->sc_queue,
   5827 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   5828  fail_0:
   5829 	return error;
   5830 }
   5831 
   5832 /*
   5833  * wm_free_quques:
   5834  *	Free {tx,rx}descs and {tx,rx} buffers
   5835  */
   5836 static void
   5837 wm_free_txrx_queues(struct wm_softc *sc)
   5838 {
   5839 	int i;
   5840 
   5841 	for (i = 0; i < sc->sc_nqueues; i++) {
   5842 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5843 		wm_free_rx_buffer(sc, rxq);
   5844 		wm_free_rx_descs(sc, rxq);
   5845 		if (rxq->rxq_lock)
   5846 			mutex_obj_free(rxq->rxq_lock);
   5847 	}
   5848 
   5849 	for (i = 0; i < sc->sc_nqueues; i++) {
   5850 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5851 		wm_free_tx_buffer(sc, txq);
   5852 		wm_free_tx_descs(sc, txq);
   5853 		if (txq->txq_lock)
   5854 			mutex_obj_free(txq->txq_lock);
   5855 	}
   5856 
   5857 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   5858 }
   5859 
   5860 static void
   5861 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5862 {
   5863 
   5864 	KASSERT(mutex_owned(txq->txq_lock));
   5865 
   5866 	/* Initialize the transmit descriptor ring. */
   5867 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5868 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5869 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5870 	txq->txq_free = WM_NTXDESC(txq);
   5871 	txq->txq_next = 0;
   5872 }
   5873 
   5874 static void
   5875 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5876     struct wm_txqueue *txq)
   5877 {
   5878 
   5879 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5880 		device_xname(sc->sc_dev), __func__));
   5881 	KASSERT(mutex_owned(txq->txq_lock));
   5882 
   5883 	if (sc->sc_type < WM_T_82543) {
   5884 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5885 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5886 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5887 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5888 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5889 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5890 	} else {
   5891 		int qid = wmq->wmq_id;
   5892 
   5893 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5894 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5895 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5896 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5897 
   5898 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5899 			/*
   5900 			 * Don't write TDT before TCTL.EN is set.
   5901 			 * See the document.
   5902 			 */
   5903 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5904 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5905 			    | TXDCTL_WTHRESH(0));
   5906 		else {
   5907 			/* ITR / 4 */
   5908 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5909 			if (sc->sc_type >= WM_T_82540) {
   5910 				/* should be same */
   5911 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5912 			}
   5913 
   5914 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5915 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5916 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5917 		}
   5918 	}
   5919 }
   5920 
   5921 static void
   5922 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5923 {
   5924 	int i;
   5925 
   5926 	KASSERT(mutex_owned(txq->txq_lock));
   5927 
   5928 	/* Initialize the transmit job descriptors. */
   5929 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5930 		txq->txq_soft[i].txs_mbuf = NULL;
   5931 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5932 	txq->txq_snext = 0;
   5933 	txq->txq_sdirty = 0;
   5934 }
   5935 
   5936 static void
   5937 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5938     struct wm_txqueue *txq)
   5939 {
   5940 
   5941 	KASSERT(mutex_owned(txq->txq_lock));
   5942 
   5943 	/*
   5944 	 * Set up some register offsets that are different between
   5945 	 * the i82542 and the i82543 and later chips.
   5946 	 */
   5947 	if (sc->sc_type < WM_T_82543)
   5948 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5949 	else
   5950 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   5951 
   5952 	wm_init_tx_descs(sc, txq);
   5953 	wm_init_tx_regs(sc, wmq, txq);
   5954 	wm_init_tx_buffer(sc, txq);
   5955 }
   5956 
   5957 static void
   5958 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5959     struct wm_rxqueue *rxq)
   5960 {
   5961 
   5962 	KASSERT(mutex_owned(rxq->rxq_lock));
   5963 
   5964 	/*
   5965 	 * Initialize the receive descriptor and receive job
   5966 	 * descriptor rings.
   5967 	 */
   5968 	if (sc->sc_type < WM_T_82543) {
   5969 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5970 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5971 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5972 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5973 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5974 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5975 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5976 
   5977 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5978 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5979 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5980 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5981 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5982 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5983 	} else {
   5984 		int qid = wmq->wmq_id;
   5985 
   5986 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5987 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5988 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   5989 
   5990 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5991 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   5992 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   5993 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   5994 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   5995 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   5996 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   5997 			    | RXDCTL_WTHRESH(1));
   5998 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5999 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6000 		} else {
   6001 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6002 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6003 			/* ITR / 4 */
   6004 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   6005 			/* MUST be same */
   6006 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   6007 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6008 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6009 		}
   6010 	}
   6011 }
   6012 
   6013 static int
   6014 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6015 {
   6016 	struct wm_rxsoft *rxs;
   6017 	int error, i;
   6018 
   6019 	KASSERT(mutex_owned(rxq->rxq_lock));
   6020 
   6021 	for (i = 0; i < WM_NRXDESC; i++) {
   6022 		rxs = &rxq->rxq_soft[i];
   6023 		if (rxs->rxs_mbuf == NULL) {
   6024 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6025 				log(LOG_ERR, "%s: unable to allocate or map "
   6026 				    "rx buffer %d, error = %d\n",
   6027 				    device_xname(sc->sc_dev), i, error);
   6028 				/*
   6029 				 * XXX Should attempt to run with fewer receive
   6030 				 * XXX buffers instead of just failing.
   6031 				 */
   6032 				wm_rxdrain(rxq);
   6033 				return ENOMEM;
   6034 			}
   6035 		} else {
   6036 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6037 				wm_init_rxdesc(rxq, i);
   6038 			/*
   6039 			 * For 82575 and newer device, the RX descriptors
   6040 			 * must be initialized after the setting of RCTL.EN in
   6041 			 * wm_set_filter()
   6042 			 */
   6043 		}
   6044 	}
   6045 	rxq->rxq_ptr = 0;
   6046 	rxq->rxq_discard = 0;
   6047 	WM_RXCHAIN_RESET(rxq);
   6048 
   6049 	return 0;
   6050 }
   6051 
   6052 static int
   6053 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6054     struct wm_rxqueue *rxq)
   6055 {
   6056 
   6057 	KASSERT(mutex_owned(rxq->rxq_lock));
   6058 
   6059 	/*
   6060 	 * Set up some register offsets that are different between
   6061 	 * the i82542 and the i82543 and later chips.
   6062 	 */
   6063 	if (sc->sc_type < WM_T_82543)
   6064 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6065 	else
   6066 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6067 
   6068 	wm_init_rx_regs(sc, wmq, rxq);
   6069 	return wm_init_rx_buffer(sc, rxq);
   6070 }
   6071 
   6072 /*
   6073  * wm_init_quques:
   6074  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6075  */
   6076 static int
   6077 wm_init_txrx_queues(struct wm_softc *sc)
   6078 {
   6079 	int i, error = 0;
   6080 
   6081 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6082 		device_xname(sc->sc_dev), __func__));
   6083 
   6084 	for (i = 0; i < sc->sc_nqueues; i++) {
   6085 		struct wm_queue *wmq = &sc->sc_queue[i];
   6086 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6087 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6088 
   6089 		mutex_enter(txq->txq_lock);
   6090 		wm_init_tx_queue(sc, wmq, txq);
   6091 		mutex_exit(txq->txq_lock);
   6092 
   6093 		mutex_enter(rxq->rxq_lock);
   6094 		error = wm_init_rx_queue(sc, wmq, rxq);
   6095 		mutex_exit(rxq->rxq_lock);
   6096 		if (error)
   6097 			break;
   6098 	}
   6099 
   6100 	return error;
   6101 }
   6102 
   6103 /*
   6104  * wm_tx_offload:
   6105  *
   6106  *	Set up TCP/IP checksumming parameters for the
   6107  *	specified packet.
   6108  */
   6109 static int
   6110 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   6111     uint8_t *fieldsp)
   6112 {
   6113 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6114 	struct mbuf *m0 = txs->txs_mbuf;
   6115 	struct livengood_tcpip_ctxdesc *t;
   6116 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6117 	uint32_t ipcse;
   6118 	struct ether_header *eh;
   6119 	int offset, iphl;
   6120 	uint8_t fields;
   6121 
   6122 	/*
   6123 	 * XXX It would be nice if the mbuf pkthdr had offset
   6124 	 * fields for the protocol headers.
   6125 	 */
   6126 
   6127 	eh = mtod(m0, struct ether_header *);
   6128 	switch (htons(eh->ether_type)) {
   6129 	case ETHERTYPE_IP:
   6130 	case ETHERTYPE_IPV6:
   6131 		offset = ETHER_HDR_LEN;
   6132 		break;
   6133 
   6134 	case ETHERTYPE_VLAN:
   6135 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6136 		break;
   6137 
   6138 	default:
   6139 		/*
   6140 		 * Don't support this protocol or encapsulation.
   6141 		 */
   6142 		*fieldsp = 0;
   6143 		*cmdp = 0;
   6144 		return 0;
   6145 	}
   6146 
   6147 	if ((m0->m_pkthdr.csum_flags &
   6148 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   6149 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6150 	} else {
   6151 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6152 	}
   6153 	ipcse = offset + iphl - 1;
   6154 
   6155 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6156 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6157 	seg = 0;
   6158 	fields = 0;
   6159 
   6160 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6161 		int hlen = offset + iphl;
   6162 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6163 
   6164 		if (__predict_false(m0->m_len <
   6165 				    (hlen + sizeof(struct tcphdr)))) {
   6166 			/*
   6167 			 * TCP/IP headers are not in the first mbuf; we need
   6168 			 * to do this the slow and painful way.  Let's just
   6169 			 * hope this doesn't happen very often.
   6170 			 */
   6171 			struct tcphdr th;
   6172 
   6173 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6174 
   6175 			m_copydata(m0, hlen, sizeof(th), &th);
   6176 			if (v4) {
   6177 				struct ip ip;
   6178 
   6179 				m_copydata(m0, offset, sizeof(ip), &ip);
   6180 				ip.ip_len = 0;
   6181 				m_copyback(m0,
   6182 				    offset + offsetof(struct ip, ip_len),
   6183 				    sizeof(ip.ip_len), &ip.ip_len);
   6184 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6185 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6186 			} else {
   6187 				struct ip6_hdr ip6;
   6188 
   6189 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6190 				ip6.ip6_plen = 0;
   6191 				m_copyback(m0,
   6192 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6193 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6194 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6195 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6196 			}
   6197 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6198 			    sizeof(th.th_sum), &th.th_sum);
   6199 
   6200 			hlen += th.th_off << 2;
   6201 		} else {
   6202 			/*
   6203 			 * TCP/IP headers are in the first mbuf; we can do
   6204 			 * this the easy way.
   6205 			 */
   6206 			struct tcphdr *th;
   6207 
   6208 			if (v4) {
   6209 				struct ip *ip =
   6210 				    (void *)(mtod(m0, char *) + offset);
   6211 				th = (void *)(mtod(m0, char *) + hlen);
   6212 
   6213 				ip->ip_len = 0;
   6214 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6215 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6216 			} else {
   6217 				struct ip6_hdr *ip6 =
   6218 				    (void *)(mtod(m0, char *) + offset);
   6219 				th = (void *)(mtod(m0, char *) + hlen);
   6220 
   6221 				ip6->ip6_plen = 0;
   6222 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6223 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6224 			}
   6225 			hlen += th->th_off << 2;
   6226 		}
   6227 
   6228 		if (v4) {
   6229 			WM_Q_EVCNT_INCR(txq, txtso);
   6230 			cmdlen |= WTX_TCPIP_CMD_IP;
   6231 		} else {
   6232 			WM_Q_EVCNT_INCR(txq, txtso6);
   6233 			ipcse = 0;
   6234 		}
   6235 		cmd |= WTX_TCPIP_CMD_TSE;
   6236 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6237 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6238 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6239 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6240 	}
   6241 
   6242 	/*
   6243 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6244 	 * offload feature, if we load the context descriptor, we
   6245 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6246 	 */
   6247 
   6248 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6249 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6250 	    WTX_TCPIP_IPCSE(ipcse);
   6251 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6252 		WM_Q_EVCNT_INCR(txq, txipsum);
   6253 		fields |= WTX_IXSM;
   6254 	}
   6255 
   6256 	offset += iphl;
   6257 
   6258 	if (m0->m_pkthdr.csum_flags &
   6259 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6260 		WM_Q_EVCNT_INCR(txq, txtusum);
   6261 		fields |= WTX_TXSM;
   6262 		tucs = WTX_TCPIP_TUCSS(offset) |
   6263 		    WTX_TCPIP_TUCSO(offset +
   6264 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6265 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6266 	} else if ((m0->m_pkthdr.csum_flags &
   6267 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6268 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6269 		fields |= WTX_TXSM;
   6270 		tucs = WTX_TCPIP_TUCSS(offset) |
   6271 		    WTX_TCPIP_TUCSO(offset +
   6272 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6273 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6274 	} else {
   6275 		/* Just initialize it to a valid TCP context. */
   6276 		tucs = WTX_TCPIP_TUCSS(offset) |
   6277 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6278 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6279 	}
   6280 
   6281 	/* Fill in the context descriptor. */
   6282 	t = (struct livengood_tcpip_ctxdesc *)
   6283 	    &txq->txq_descs[txq->txq_next];
   6284 	t->tcpip_ipcs = htole32(ipcs);
   6285 	t->tcpip_tucs = htole32(tucs);
   6286 	t->tcpip_cmdlen = htole32(cmdlen);
   6287 	t->tcpip_seg = htole32(seg);
   6288 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6289 
   6290 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6291 	txs->txs_ndesc++;
   6292 
   6293 	*cmdp = cmd;
   6294 	*fieldsp = fields;
   6295 
   6296 	return 0;
   6297 }
   6298 
   6299 /*
   6300  * wm_start:		[ifnet interface function]
   6301  *
   6302  *	Start packet transmission on the interface.
   6303  */
   6304 static void
   6305 wm_start(struct ifnet *ifp)
   6306 {
   6307 	struct wm_softc *sc = ifp->if_softc;
   6308 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6309 
   6310 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6311 
   6312 	mutex_enter(txq->txq_lock);
   6313 	if (!txq->txq_stopping)
   6314 		wm_start_locked(ifp);
   6315 	mutex_exit(txq->txq_lock);
   6316 }
   6317 
   6318 static void
   6319 wm_start_locked(struct ifnet *ifp)
   6320 {
   6321 	struct wm_softc *sc = ifp->if_softc;
   6322 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6323 	struct mbuf *m0;
   6324 	struct m_tag *mtag;
   6325 	struct wm_txsoft *txs;
   6326 	bus_dmamap_t dmamap;
   6327 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6328 	bus_addr_t curaddr;
   6329 	bus_size_t seglen, curlen;
   6330 	uint32_t cksumcmd;
   6331 	uint8_t cksumfields;
   6332 
   6333 	KASSERT(mutex_owned(txq->txq_lock));
   6334 
   6335 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6336 		return;
   6337 
   6338 	/* Remember the previous number of free descriptors. */
   6339 	ofree = txq->txq_free;
   6340 
   6341 	/*
   6342 	 * Loop through the send queue, setting up transmit descriptors
   6343 	 * until we drain the queue, or use up all available transmit
   6344 	 * descriptors.
   6345 	 */
   6346 	for (;;) {
   6347 		m0 = NULL;
   6348 
   6349 		/* Get a work queue entry. */
   6350 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6351 			wm_txeof(sc, txq);
   6352 			if (txq->txq_sfree == 0) {
   6353 				DPRINTF(WM_DEBUG_TX,
   6354 				    ("%s: TX: no free job descriptors\n",
   6355 					device_xname(sc->sc_dev)));
   6356 				WM_Q_EVCNT_INCR(txq, txsstall);
   6357 				break;
   6358 			}
   6359 		}
   6360 
   6361 		/* Grab a packet off the queue. */
   6362 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6363 		if (m0 == NULL)
   6364 			break;
   6365 
   6366 		DPRINTF(WM_DEBUG_TX,
   6367 		    ("%s: TX: have packet to transmit: %p\n",
   6368 		    device_xname(sc->sc_dev), m0));
   6369 
   6370 		txs = &txq->txq_soft[txq->txq_snext];
   6371 		dmamap = txs->txs_dmamap;
   6372 
   6373 		use_tso = (m0->m_pkthdr.csum_flags &
   6374 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6375 
   6376 		/*
   6377 		 * So says the Linux driver:
   6378 		 * The controller does a simple calculation to make sure
   6379 		 * there is enough room in the FIFO before initiating the
   6380 		 * DMA for each buffer.  The calc is:
   6381 		 *	4 = ceil(buffer len / MSS)
   6382 		 * To make sure we don't overrun the FIFO, adjust the max
   6383 		 * buffer len if the MSS drops.
   6384 		 */
   6385 		dmamap->dm_maxsegsz =
   6386 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6387 		    ? m0->m_pkthdr.segsz << 2
   6388 		    : WTX_MAX_LEN;
   6389 
   6390 		/*
   6391 		 * Load the DMA map.  If this fails, the packet either
   6392 		 * didn't fit in the allotted number of segments, or we
   6393 		 * were short on resources.  For the too-many-segments
   6394 		 * case, we simply report an error and drop the packet,
   6395 		 * since we can't sanely copy a jumbo packet to a single
   6396 		 * buffer.
   6397 		 */
   6398 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6399 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6400 		if (error) {
   6401 			if (error == EFBIG) {
   6402 				WM_Q_EVCNT_INCR(txq, txdrop);
   6403 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6404 				    "DMA segments, dropping...\n",
   6405 				    device_xname(sc->sc_dev));
   6406 				wm_dump_mbuf_chain(sc, m0);
   6407 				m_freem(m0);
   6408 				continue;
   6409 			}
   6410 			/*  Short on resources, just stop for now. */
   6411 			DPRINTF(WM_DEBUG_TX,
   6412 			    ("%s: TX: dmamap load failed: %d\n",
   6413 			    device_xname(sc->sc_dev), error));
   6414 			break;
   6415 		}
   6416 
   6417 		segs_needed = dmamap->dm_nsegs;
   6418 		if (use_tso) {
   6419 			/* For sentinel descriptor; see below. */
   6420 			segs_needed++;
   6421 		}
   6422 
   6423 		/*
   6424 		 * Ensure we have enough descriptors free to describe
   6425 		 * the packet.  Note, we always reserve one descriptor
   6426 		 * at the end of the ring due to the semantics of the
   6427 		 * TDT register, plus one more in the event we need
   6428 		 * to load offload context.
   6429 		 */
   6430 		if (segs_needed > txq->txq_free - 2) {
   6431 			/*
   6432 			 * Not enough free descriptors to transmit this
   6433 			 * packet.  We haven't committed anything yet,
   6434 			 * so just unload the DMA map, put the packet
   6435 			 * pack on the queue, and punt.  Notify the upper
   6436 			 * layer that there are no more slots left.
   6437 			 */
   6438 			DPRINTF(WM_DEBUG_TX,
   6439 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6440 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6441 			    segs_needed, txq->txq_free - 1));
   6442 			ifp->if_flags |= IFF_OACTIVE;
   6443 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6444 			WM_Q_EVCNT_INCR(txq, txdstall);
   6445 			break;
   6446 		}
   6447 
   6448 		/*
   6449 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6450 		 * once we know we can transmit the packet, since we
   6451 		 * do some internal FIFO space accounting here.
   6452 		 */
   6453 		if (sc->sc_type == WM_T_82547 &&
   6454 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6455 			DPRINTF(WM_DEBUG_TX,
   6456 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6457 			    device_xname(sc->sc_dev)));
   6458 			ifp->if_flags |= IFF_OACTIVE;
   6459 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6460 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6461 			break;
   6462 		}
   6463 
   6464 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6465 
   6466 		DPRINTF(WM_DEBUG_TX,
   6467 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6468 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6469 
   6470 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6471 
   6472 		/*
   6473 		 * Store a pointer to the packet so that we can free it
   6474 		 * later.
   6475 		 *
   6476 		 * Initially, we consider the number of descriptors the
   6477 		 * packet uses the number of DMA segments.  This may be
   6478 		 * incremented by 1 if we do checksum offload (a descriptor
   6479 		 * is used to set the checksum context).
   6480 		 */
   6481 		txs->txs_mbuf = m0;
   6482 		txs->txs_firstdesc = txq->txq_next;
   6483 		txs->txs_ndesc = segs_needed;
   6484 
   6485 		/* Set up offload parameters for this packet. */
   6486 		if (m0->m_pkthdr.csum_flags &
   6487 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6488 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6489 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6490 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6491 					  &cksumfields) != 0) {
   6492 				/* Error message already displayed. */
   6493 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6494 				continue;
   6495 			}
   6496 		} else {
   6497 			cksumcmd = 0;
   6498 			cksumfields = 0;
   6499 		}
   6500 
   6501 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6502 
   6503 		/* Sync the DMA map. */
   6504 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6505 		    BUS_DMASYNC_PREWRITE);
   6506 
   6507 		/* Initialize the transmit descriptor. */
   6508 		for (nexttx = txq->txq_next, seg = 0;
   6509 		     seg < dmamap->dm_nsegs; seg++) {
   6510 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6511 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6512 			     seglen != 0;
   6513 			     curaddr += curlen, seglen -= curlen,
   6514 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6515 				curlen = seglen;
   6516 
   6517 				/*
   6518 				 * So says the Linux driver:
   6519 				 * Work around for premature descriptor
   6520 				 * write-backs in TSO mode.  Append a
   6521 				 * 4-byte sentinel descriptor.
   6522 				 */
   6523 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6524 				    curlen > 8)
   6525 					curlen -= 4;
   6526 
   6527 				wm_set_dma_addr(
   6528 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6529 				txq->txq_descs[nexttx].wtx_cmdlen
   6530 				    = htole32(cksumcmd | curlen);
   6531 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6532 				    = 0;
   6533 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6534 				    = cksumfields;
   6535 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6536 				lasttx = nexttx;
   6537 
   6538 				DPRINTF(WM_DEBUG_TX,
   6539 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6540 				     "len %#04zx\n",
   6541 				    device_xname(sc->sc_dev), nexttx,
   6542 				    (uint64_t)curaddr, curlen));
   6543 			}
   6544 		}
   6545 
   6546 		KASSERT(lasttx != -1);
   6547 
   6548 		/*
   6549 		 * Set up the command byte on the last descriptor of
   6550 		 * the packet.  If we're in the interrupt delay window,
   6551 		 * delay the interrupt.
   6552 		 */
   6553 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6554 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6555 
   6556 		/*
   6557 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6558 		 * up the descriptor to encapsulate the packet for us.
   6559 		 *
   6560 		 * This is only valid on the last descriptor of the packet.
   6561 		 */
   6562 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6563 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6564 			    htole32(WTX_CMD_VLE);
   6565 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6566 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6567 		}
   6568 
   6569 		txs->txs_lastdesc = lasttx;
   6570 
   6571 		DPRINTF(WM_DEBUG_TX,
   6572 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6573 		    device_xname(sc->sc_dev),
   6574 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6575 
   6576 		/* Sync the descriptors we're using. */
   6577 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6578 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6579 
   6580 		/* Give the packet to the chip. */
   6581 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6582 
   6583 		DPRINTF(WM_DEBUG_TX,
   6584 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6585 
   6586 		DPRINTF(WM_DEBUG_TX,
   6587 		    ("%s: TX: finished transmitting packet, job %d\n",
   6588 		    device_xname(sc->sc_dev), txq->txq_snext));
   6589 
   6590 		/* Advance the tx pointer. */
   6591 		txq->txq_free -= txs->txs_ndesc;
   6592 		txq->txq_next = nexttx;
   6593 
   6594 		txq->txq_sfree--;
   6595 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6596 
   6597 		/* Pass the packet to any BPF listeners. */
   6598 		bpf_mtap(ifp, m0);
   6599 	}
   6600 
   6601 	if (m0 != NULL) {
   6602 		ifp->if_flags |= IFF_OACTIVE;
   6603 		WM_Q_EVCNT_INCR(txq, txdrop);
   6604 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6605 			__func__));
   6606 		m_freem(m0);
   6607 	}
   6608 
   6609 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6610 		/* No more slots; notify upper layer. */
   6611 		ifp->if_flags |= IFF_OACTIVE;
   6612 	}
   6613 
   6614 	if (txq->txq_free != ofree) {
   6615 		/* Set a watchdog timer in case the chip flakes out. */
   6616 		ifp->if_timer = 5;
   6617 	}
   6618 }
   6619 
   6620 /*
   6621  * wm_nq_tx_offload:
   6622  *
   6623  *	Set up TCP/IP checksumming parameters for the
   6624  *	specified packet, for NEWQUEUE devices
   6625  */
   6626 static int
   6627 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6628     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6629 {
   6630 	struct mbuf *m0 = txs->txs_mbuf;
   6631 	struct m_tag *mtag;
   6632 	uint32_t vl_len, mssidx, cmdc;
   6633 	struct ether_header *eh;
   6634 	int offset, iphl;
   6635 
   6636 	/*
   6637 	 * XXX It would be nice if the mbuf pkthdr had offset
   6638 	 * fields for the protocol headers.
   6639 	 */
   6640 	*cmdlenp = 0;
   6641 	*fieldsp = 0;
   6642 
   6643 	eh = mtod(m0, struct ether_header *);
   6644 	switch (htons(eh->ether_type)) {
   6645 	case ETHERTYPE_IP:
   6646 	case ETHERTYPE_IPV6:
   6647 		offset = ETHER_HDR_LEN;
   6648 		break;
   6649 
   6650 	case ETHERTYPE_VLAN:
   6651 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6652 		break;
   6653 
   6654 	default:
   6655 		/* Don't support this protocol or encapsulation. */
   6656 		*do_csum = false;
   6657 		return 0;
   6658 	}
   6659 	*do_csum = true;
   6660 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6661 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6662 
   6663 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6664 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6665 
   6666 	if ((m0->m_pkthdr.csum_flags &
   6667 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6668 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6669 	} else {
   6670 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6671 	}
   6672 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6673 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6674 
   6675 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6676 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6677 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6678 		*cmdlenp |= NQTX_CMD_VLE;
   6679 	}
   6680 
   6681 	mssidx = 0;
   6682 
   6683 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6684 		int hlen = offset + iphl;
   6685 		int tcp_hlen;
   6686 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6687 
   6688 		if (__predict_false(m0->m_len <
   6689 				    (hlen + sizeof(struct tcphdr)))) {
   6690 			/*
   6691 			 * TCP/IP headers are not in the first mbuf; we need
   6692 			 * to do this the slow and painful way.  Let's just
   6693 			 * hope this doesn't happen very often.
   6694 			 */
   6695 			struct tcphdr th;
   6696 
   6697 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6698 
   6699 			m_copydata(m0, hlen, sizeof(th), &th);
   6700 			if (v4) {
   6701 				struct ip ip;
   6702 
   6703 				m_copydata(m0, offset, sizeof(ip), &ip);
   6704 				ip.ip_len = 0;
   6705 				m_copyback(m0,
   6706 				    offset + offsetof(struct ip, ip_len),
   6707 				    sizeof(ip.ip_len), &ip.ip_len);
   6708 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6709 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6710 			} else {
   6711 				struct ip6_hdr ip6;
   6712 
   6713 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6714 				ip6.ip6_plen = 0;
   6715 				m_copyback(m0,
   6716 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6717 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6718 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6719 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6720 			}
   6721 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6722 			    sizeof(th.th_sum), &th.th_sum);
   6723 
   6724 			tcp_hlen = th.th_off << 2;
   6725 		} else {
   6726 			/*
   6727 			 * TCP/IP headers are in the first mbuf; we can do
   6728 			 * this the easy way.
   6729 			 */
   6730 			struct tcphdr *th;
   6731 
   6732 			if (v4) {
   6733 				struct ip *ip =
   6734 				    (void *)(mtod(m0, char *) + offset);
   6735 				th = (void *)(mtod(m0, char *) + hlen);
   6736 
   6737 				ip->ip_len = 0;
   6738 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6739 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6740 			} else {
   6741 				struct ip6_hdr *ip6 =
   6742 				    (void *)(mtod(m0, char *) + offset);
   6743 				th = (void *)(mtod(m0, char *) + hlen);
   6744 
   6745 				ip6->ip6_plen = 0;
   6746 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6747 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6748 			}
   6749 			tcp_hlen = th->th_off << 2;
   6750 		}
   6751 		hlen += tcp_hlen;
   6752 		*cmdlenp |= NQTX_CMD_TSE;
   6753 
   6754 		if (v4) {
   6755 			WM_Q_EVCNT_INCR(txq, txtso);
   6756 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6757 		} else {
   6758 			WM_Q_EVCNT_INCR(txq, txtso6);
   6759 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6760 		}
   6761 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6762 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6763 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6764 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6765 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6766 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6767 	} else {
   6768 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6769 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6770 	}
   6771 
   6772 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6773 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6774 		cmdc |= NQTXC_CMD_IP4;
   6775 	}
   6776 
   6777 	if (m0->m_pkthdr.csum_flags &
   6778 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6779 		WM_Q_EVCNT_INCR(txq, txtusum);
   6780 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6781 			cmdc |= NQTXC_CMD_TCP;
   6782 		} else {
   6783 			cmdc |= NQTXC_CMD_UDP;
   6784 		}
   6785 		cmdc |= NQTXC_CMD_IP4;
   6786 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6787 	}
   6788 	if (m0->m_pkthdr.csum_flags &
   6789 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6790 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6791 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6792 			cmdc |= NQTXC_CMD_TCP;
   6793 		} else {
   6794 			cmdc |= NQTXC_CMD_UDP;
   6795 		}
   6796 		cmdc |= NQTXC_CMD_IP6;
   6797 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6798 	}
   6799 
   6800 	/* Fill in the context descriptor. */
   6801 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6802 	    htole32(vl_len);
   6803 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6804 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6805 	    htole32(cmdc);
   6806 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6807 	    htole32(mssidx);
   6808 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6809 	DPRINTF(WM_DEBUG_TX,
   6810 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6811 	    txq->txq_next, 0, vl_len));
   6812 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6813 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6814 	txs->txs_ndesc++;
   6815 	return 0;
   6816 }
   6817 
   6818 /*
   6819  * wm_nq_start:		[ifnet interface function]
   6820  *
   6821  *	Start packet transmission on the interface for NEWQUEUE devices
   6822  */
   6823 static void
   6824 wm_nq_start(struct ifnet *ifp)
   6825 {
   6826 	struct wm_softc *sc = ifp->if_softc;
   6827 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6828 
   6829 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6830 
   6831 	mutex_enter(txq->txq_lock);
   6832 	if (!txq->txq_stopping)
   6833 		wm_nq_start_locked(ifp);
   6834 	mutex_exit(txq->txq_lock);
   6835 }
   6836 
   6837 static void
   6838 wm_nq_start_locked(struct ifnet *ifp)
   6839 {
   6840 	struct wm_softc *sc = ifp->if_softc;
   6841 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6842 
   6843 	wm_nq_send_common_locked(ifp, txq, false);
   6844 }
   6845 
   6846 static inline int
   6847 wm_nq_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6848 {
   6849 	struct wm_softc *sc = ifp->if_softc;
   6850 	u_int cpuid = cpu_index(curcpu());
   6851 
   6852 	/*
   6853 	 * Currently, simple distribute strategy.
   6854 	 * TODO:
   6855 	 * destribute by flowid(RSS has value).
   6856 	 */
   6857 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
   6858 }
   6859 
   6860 static int
   6861 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   6862 {
   6863 	int qid;
   6864 	struct wm_softc *sc = ifp->if_softc;
   6865 	struct wm_txqueue *txq;
   6866 
   6867 	qid = wm_nq_select_txqueue(ifp, m);
   6868 	txq = &sc->sc_queue[qid].wmq_txq;
   6869 
   6870 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6871 		m_freem(m);
   6872 		WM_Q_EVCNT_INCR(txq, txdrop);
   6873 		return ENOBUFS;
   6874 	}
   6875 
   6876 	if (mutex_tryenter(txq->txq_lock)) {
   6877 		/* XXXX should be per TX queue */
   6878 		ifp->if_obytes += m->m_pkthdr.len;
   6879 		if (m->m_flags & M_MCAST)
   6880 			ifp->if_omcasts++;
   6881 
   6882 		if (!txq->txq_stopping)
   6883 			wm_nq_transmit_locked(ifp, txq);
   6884 		mutex_exit(txq->txq_lock);
   6885 	}
   6886 
   6887 	return 0;
   6888 }
   6889 
   6890 static void
   6891 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6892 {
   6893 
   6894 	wm_nq_send_common_locked(ifp, txq, true);
   6895 }
   6896 
   6897 static void
   6898 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6899     bool is_transmit)
   6900 {
   6901 	struct wm_softc *sc = ifp->if_softc;
   6902 	struct mbuf *m0;
   6903 	struct m_tag *mtag;
   6904 	struct wm_txsoft *txs;
   6905 	bus_dmamap_t dmamap;
   6906 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6907 	bool do_csum, sent;
   6908 
   6909 	KASSERT(mutex_owned(txq->txq_lock));
   6910 
   6911 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6912 		return;
   6913 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6914 		return;
   6915 
   6916 	sent = false;
   6917 
   6918 	/*
   6919 	 * Loop through the send queue, setting up transmit descriptors
   6920 	 * until we drain the queue, or use up all available transmit
   6921 	 * descriptors.
   6922 	 */
   6923 	for (;;) {
   6924 		m0 = NULL;
   6925 
   6926 		/* Get a work queue entry. */
   6927 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6928 			wm_txeof(sc, txq);
   6929 			if (txq->txq_sfree == 0) {
   6930 				DPRINTF(WM_DEBUG_TX,
   6931 				    ("%s: TX: no free job descriptors\n",
   6932 					device_xname(sc->sc_dev)));
   6933 				WM_Q_EVCNT_INCR(txq, txsstall);
   6934 				break;
   6935 			}
   6936 		}
   6937 
   6938 		/* Grab a packet off the queue. */
   6939 		if (is_transmit)
   6940 			m0 = pcq_get(txq->txq_interq);
   6941 		else
   6942 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6943 		if (m0 == NULL)
   6944 			break;
   6945 
   6946 		DPRINTF(WM_DEBUG_TX,
   6947 		    ("%s: TX: have packet to transmit: %p\n",
   6948 		    device_xname(sc->sc_dev), m0));
   6949 
   6950 		txs = &txq->txq_soft[txq->txq_snext];
   6951 		dmamap = txs->txs_dmamap;
   6952 
   6953 		/*
   6954 		 * Load the DMA map.  If this fails, the packet either
   6955 		 * didn't fit in the allotted number of segments, or we
   6956 		 * were short on resources.  For the too-many-segments
   6957 		 * case, we simply report an error and drop the packet,
   6958 		 * since we can't sanely copy a jumbo packet to a single
   6959 		 * buffer.
   6960 		 */
   6961 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6962 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6963 		if (error) {
   6964 			if (error == EFBIG) {
   6965 				WM_Q_EVCNT_INCR(txq, txdrop);
   6966 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6967 				    "DMA segments, dropping...\n",
   6968 				    device_xname(sc->sc_dev));
   6969 				wm_dump_mbuf_chain(sc, m0);
   6970 				m_freem(m0);
   6971 				continue;
   6972 			}
   6973 			/* Short on resources, just stop for now. */
   6974 			DPRINTF(WM_DEBUG_TX,
   6975 			    ("%s: TX: dmamap load failed: %d\n",
   6976 			    device_xname(sc->sc_dev), error));
   6977 			break;
   6978 		}
   6979 
   6980 		segs_needed = dmamap->dm_nsegs;
   6981 
   6982 		/*
   6983 		 * Ensure we have enough descriptors free to describe
   6984 		 * the packet.  Note, we always reserve one descriptor
   6985 		 * at the end of the ring due to the semantics of the
   6986 		 * TDT register, plus one more in the event we need
   6987 		 * to load offload context.
   6988 		 */
   6989 		if (segs_needed > txq->txq_free - 2) {
   6990 			/*
   6991 			 * Not enough free descriptors to transmit this
   6992 			 * packet.  We haven't committed anything yet,
   6993 			 * so just unload the DMA map, put the packet
   6994 			 * pack on the queue, and punt.  Notify the upper
   6995 			 * layer that there are no more slots left.
   6996 			 */
   6997 			DPRINTF(WM_DEBUG_TX,
   6998 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6999 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7000 			    segs_needed, txq->txq_free - 1));
   7001 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7002 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7003 			WM_Q_EVCNT_INCR(txq, txdstall);
   7004 			break;
   7005 		}
   7006 
   7007 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7008 
   7009 		DPRINTF(WM_DEBUG_TX,
   7010 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7011 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7012 
   7013 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7014 
   7015 		/*
   7016 		 * Store a pointer to the packet so that we can free it
   7017 		 * later.
   7018 		 *
   7019 		 * Initially, we consider the number of descriptors the
   7020 		 * packet uses the number of DMA segments.  This may be
   7021 		 * incremented by 1 if we do checksum offload (a descriptor
   7022 		 * is used to set the checksum context).
   7023 		 */
   7024 		txs->txs_mbuf = m0;
   7025 		txs->txs_firstdesc = txq->txq_next;
   7026 		txs->txs_ndesc = segs_needed;
   7027 
   7028 		/* Set up offload parameters for this packet. */
   7029 		uint32_t cmdlen, fields, dcmdlen;
   7030 		if (m0->m_pkthdr.csum_flags &
   7031 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7032 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7033 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7034 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7035 			    &do_csum) != 0) {
   7036 				/* Error message already displayed. */
   7037 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7038 				continue;
   7039 			}
   7040 		} else {
   7041 			do_csum = false;
   7042 			cmdlen = 0;
   7043 			fields = 0;
   7044 		}
   7045 
   7046 		/* Sync the DMA map. */
   7047 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7048 		    BUS_DMASYNC_PREWRITE);
   7049 
   7050 		/* Initialize the first transmit descriptor. */
   7051 		nexttx = txq->txq_next;
   7052 		if (!do_csum) {
   7053 			/* setup a legacy descriptor */
   7054 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7055 			    dmamap->dm_segs[0].ds_addr);
   7056 			txq->txq_descs[nexttx].wtx_cmdlen =
   7057 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7058 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7059 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7060 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7061 			    NULL) {
   7062 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7063 				    htole32(WTX_CMD_VLE);
   7064 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7065 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7066 			} else {
   7067 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7068 			}
   7069 			dcmdlen = 0;
   7070 		} else {
   7071 			/* setup an advanced data descriptor */
   7072 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7073 			    htole64(dmamap->dm_segs[0].ds_addr);
   7074 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7075 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7076 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7077 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7078 			    htole32(fields);
   7079 			DPRINTF(WM_DEBUG_TX,
   7080 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7081 			    device_xname(sc->sc_dev), nexttx,
   7082 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7083 			DPRINTF(WM_DEBUG_TX,
   7084 			    ("\t 0x%08x%08x\n", fields,
   7085 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7086 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7087 		}
   7088 
   7089 		lasttx = nexttx;
   7090 		nexttx = WM_NEXTTX(txq, nexttx);
   7091 		/*
   7092 		 * fill in the next descriptors. legacy or adcanced format
   7093 		 * is the same here
   7094 		 */
   7095 		for (seg = 1; seg < dmamap->dm_nsegs;
   7096 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7097 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7098 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7099 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7100 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7101 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7102 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7103 			lasttx = nexttx;
   7104 
   7105 			DPRINTF(WM_DEBUG_TX,
   7106 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7107 			     "len %#04zx\n",
   7108 			    device_xname(sc->sc_dev), nexttx,
   7109 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7110 			    dmamap->dm_segs[seg].ds_len));
   7111 		}
   7112 
   7113 		KASSERT(lasttx != -1);
   7114 
   7115 		/*
   7116 		 * Set up the command byte on the last descriptor of
   7117 		 * the packet.  If we're in the interrupt delay window,
   7118 		 * delay the interrupt.
   7119 		 */
   7120 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7121 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7122 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7123 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7124 
   7125 		txs->txs_lastdesc = lasttx;
   7126 
   7127 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7128 		    device_xname(sc->sc_dev),
   7129 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7130 
   7131 		/* Sync the descriptors we're using. */
   7132 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7133 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7134 
   7135 		/* Give the packet to the chip. */
   7136 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7137 		sent = true;
   7138 
   7139 		DPRINTF(WM_DEBUG_TX,
   7140 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7141 
   7142 		DPRINTF(WM_DEBUG_TX,
   7143 		    ("%s: TX: finished transmitting packet, job %d\n",
   7144 		    device_xname(sc->sc_dev), txq->txq_snext));
   7145 
   7146 		/* Advance the tx pointer. */
   7147 		txq->txq_free -= txs->txs_ndesc;
   7148 		txq->txq_next = nexttx;
   7149 
   7150 		txq->txq_sfree--;
   7151 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7152 
   7153 		/* Pass the packet to any BPF listeners. */
   7154 		bpf_mtap(ifp, m0);
   7155 	}
   7156 
   7157 	if (m0 != NULL) {
   7158 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7159 		WM_Q_EVCNT_INCR(txq, txdrop);
   7160 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7161 			__func__));
   7162 		m_freem(m0);
   7163 	}
   7164 
   7165 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7166 		/* No more slots; notify upper layer. */
   7167 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7168 	}
   7169 
   7170 	if (sent) {
   7171 		/* Set a watchdog timer in case the chip flakes out. */
   7172 		ifp->if_timer = 5;
   7173 	}
   7174 }
   7175 
   7176 /* Interrupt */
   7177 
   7178 /*
   7179  * wm_txeof:
   7180  *
   7181  *	Helper; handle transmit interrupts.
   7182  */
   7183 static int
   7184 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7185 {
   7186 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7187 	struct wm_txsoft *txs;
   7188 	bool processed = false;
   7189 	int count = 0;
   7190 	int i;
   7191 	uint8_t status;
   7192 
   7193 	KASSERT(mutex_owned(txq->txq_lock));
   7194 
   7195 	if (txq->txq_stopping)
   7196 		return 0;
   7197 
   7198 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7199 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7200 	else
   7201 		ifp->if_flags &= ~IFF_OACTIVE;
   7202 
   7203 	/*
   7204 	 * Go through the Tx list and free mbufs for those
   7205 	 * frames which have been transmitted.
   7206 	 */
   7207 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7208 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7209 		txs = &txq->txq_soft[i];
   7210 
   7211 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7212 			device_xname(sc->sc_dev), i));
   7213 
   7214 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7215 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7216 
   7217 		status =
   7218 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7219 		if ((status & WTX_ST_DD) == 0) {
   7220 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7221 			    BUS_DMASYNC_PREREAD);
   7222 			break;
   7223 		}
   7224 
   7225 		processed = true;
   7226 		count++;
   7227 		DPRINTF(WM_DEBUG_TX,
   7228 		    ("%s: TX: job %d done: descs %d..%d\n",
   7229 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7230 		    txs->txs_lastdesc));
   7231 
   7232 		/*
   7233 		 * XXX We should probably be using the statistics
   7234 		 * XXX registers, but I don't know if they exist
   7235 		 * XXX on chips before the i82544.
   7236 		 */
   7237 
   7238 #ifdef WM_EVENT_COUNTERS
   7239 		if (status & WTX_ST_TU)
   7240 			WM_Q_EVCNT_INCR(txq, tu);
   7241 #endif /* WM_EVENT_COUNTERS */
   7242 
   7243 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7244 			ifp->if_oerrors++;
   7245 			if (status & WTX_ST_LC)
   7246 				log(LOG_WARNING, "%s: late collision\n",
   7247 				    device_xname(sc->sc_dev));
   7248 			else if (status & WTX_ST_EC) {
   7249 				ifp->if_collisions += 16;
   7250 				log(LOG_WARNING, "%s: excessive collisions\n",
   7251 				    device_xname(sc->sc_dev));
   7252 			}
   7253 		} else
   7254 			ifp->if_opackets++;
   7255 
   7256 		txq->txq_free += txs->txs_ndesc;
   7257 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7258 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7259 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7260 		m_freem(txs->txs_mbuf);
   7261 		txs->txs_mbuf = NULL;
   7262 	}
   7263 
   7264 	/* Update the dirty transmit buffer pointer. */
   7265 	txq->txq_sdirty = i;
   7266 	DPRINTF(WM_DEBUG_TX,
   7267 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7268 
   7269 	if (count != 0)
   7270 		rnd_add_uint32(&sc->rnd_source, count);
   7271 
   7272 	/*
   7273 	 * If there are no more pending transmissions, cancel the watchdog
   7274 	 * timer.
   7275 	 */
   7276 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7277 		ifp->if_timer = 0;
   7278 
   7279 	return processed;
   7280 }
   7281 
   7282 /*
   7283  * wm_rxeof:
   7284  *
   7285  *	Helper; handle receive interrupts.
   7286  */
   7287 static void
   7288 wm_rxeof(struct wm_rxqueue *rxq)
   7289 {
   7290 	struct wm_softc *sc = rxq->rxq_sc;
   7291 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7292 	struct wm_rxsoft *rxs;
   7293 	struct mbuf *m;
   7294 	int i, len;
   7295 	int count = 0;
   7296 	uint8_t status, errors;
   7297 	uint16_t vlantag;
   7298 
   7299 	KASSERT(mutex_owned(rxq->rxq_lock));
   7300 
   7301 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7302 		rxs = &rxq->rxq_soft[i];
   7303 
   7304 		DPRINTF(WM_DEBUG_RX,
   7305 		    ("%s: RX: checking descriptor %d\n",
   7306 		    device_xname(sc->sc_dev), i));
   7307 
   7308 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7309 
   7310 		status = rxq->rxq_descs[i].wrx_status;
   7311 		errors = rxq->rxq_descs[i].wrx_errors;
   7312 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   7313 		vlantag = rxq->rxq_descs[i].wrx_special;
   7314 
   7315 		if ((status & WRX_ST_DD) == 0) {
   7316 			/* We have processed all of the receive descriptors. */
   7317 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   7318 			break;
   7319 		}
   7320 
   7321 		count++;
   7322 		if (__predict_false(rxq->rxq_discard)) {
   7323 			DPRINTF(WM_DEBUG_RX,
   7324 			    ("%s: RX: discarding contents of descriptor %d\n",
   7325 			    device_xname(sc->sc_dev), i));
   7326 			wm_init_rxdesc(rxq, i);
   7327 			if (status & WRX_ST_EOP) {
   7328 				/* Reset our state. */
   7329 				DPRINTF(WM_DEBUG_RX,
   7330 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7331 				    device_xname(sc->sc_dev)));
   7332 				rxq->rxq_discard = 0;
   7333 			}
   7334 			continue;
   7335 		}
   7336 
   7337 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7338 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7339 
   7340 		m = rxs->rxs_mbuf;
   7341 
   7342 		/*
   7343 		 * Add a new receive buffer to the ring, unless of
   7344 		 * course the length is zero. Treat the latter as a
   7345 		 * failed mapping.
   7346 		 */
   7347 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7348 			/*
   7349 			 * Failed, throw away what we've done so
   7350 			 * far, and discard the rest of the packet.
   7351 			 */
   7352 			ifp->if_ierrors++;
   7353 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7354 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7355 			wm_init_rxdesc(rxq, i);
   7356 			if ((status & WRX_ST_EOP) == 0)
   7357 				rxq->rxq_discard = 1;
   7358 			if (rxq->rxq_head != NULL)
   7359 				m_freem(rxq->rxq_head);
   7360 			WM_RXCHAIN_RESET(rxq);
   7361 			DPRINTF(WM_DEBUG_RX,
   7362 			    ("%s: RX: Rx buffer allocation failed, "
   7363 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7364 			    rxq->rxq_discard ? " (discard)" : ""));
   7365 			continue;
   7366 		}
   7367 
   7368 		m->m_len = len;
   7369 		rxq->rxq_len += len;
   7370 		DPRINTF(WM_DEBUG_RX,
   7371 		    ("%s: RX: buffer at %p len %d\n",
   7372 		    device_xname(sc->sc_dev), m->m_data, len));
   7373 
   7374 		/* If this is not the end of the packet, keep looking. */
   7375 		if ((status & WRX_ST_EOP) == 0) {
   7376 			WM_RXCHAIN_LINK(rxq, m);
   7377 			DPRINTF(WM_DEBUG_RX,
   7378 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7379 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7380 			continue;
   7381 		}
   7382 
   7383 		/*
   7384 		 * Okay, we have the entire packet now.  The chip is
   7385 		 * configured to include the FCS except I350 and I21[01]
   7386 		 * (not all chips can be configured to strip it),
   7387 		 * so we need to trim it.
   7388 		 * May need to adjust length of previous mbuf in the
   7389 		 * chain if the current mbuf is too short.
   7390 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7391 		 * is always set in I350, so we don't trim it.
   7392 		 */
   7393 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7394 		    && (sc->sc_type != WM_T_I210)
   7395 		    && (sc->sc_type != WM_T_I211)) {
   7396 			if (m->m_len < ETHER_CRC_LEN) {
   7397 				rxq->rxq_tail->m_len
   7398 				    -= (ETHER_CRC_LEN - m->m_len);
   7399 				m->m_len = 0;
   7400 			} else
   7401 				m->m_len -= ETHER_CRC_LEN;
   7402 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7403 		} else
   7404 			len = rxq->rxq_len;
   7405 
   7406 		WM_RXCHAIN_LINK(rxq, m);
   7407 
   7408 		*rxq->rxq_tailp = NULL;
   7409 		m = rxq->rxq_head;
   7410 
   7411 		WM_RXCHAIN_RESET(rxq);
   7412 
   7413 		DPRINTF(WM_DEBUG_RX,
   7414 		    ("%s: RX: have entire packet, len -> %d\n",
   7415 		    device_xname(sc->sc_dev), len));
   7416 
   7417 		/* If an error occurred, update stats and drop the packet. */
   7418 		if (errors &
   7419 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7420 			if (errors & WRX_ER_SE)
   7421 				log(LOG_WARNING, "%s: symbol error\n",
   7422 				    device_xname(sc->sc_dev));
   7423 			else if (errors & WRX_ER_SEQ)
   7424 				log(LOG_WARNING, "%s: receive sequence error\n",
   7425 				    device_xname(sc->sc_dev));
   7426 			else if (errors & WRX_ER_CE)
   7427 				log(LOG_WARNING, "%s: CRC error\n",
   7428 				    device_xname(sc->sc_dev));
   7429 			m_freem(m);
   7430 			continue;
   7431 		}
   7432 
   7433 		/* No errors.  Receive the packet. */
   7434 		m_set_rcvif(m, ifp);
   7435 		m->m_pkthdr.len = len;
   7436 
   7437 		/*
   7438 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7439 		 * for us.  Associate the tag with the packet.
   7440 		 */
   7441 		/* XXXX should check for i350 and i354 */
   7442 		if ((status & WRX_ST_VP) != 0) {
   7443 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
   7444 		}
   7445 
   7446 		/* Set up checksum info for this packet. */
   7447 		if ((status & WRX_ST_IXSM) == 0) {
   7448 			if (status & WRX_ST_IPCS) {
   7449 				WM_Q_EVCNT_INCR(rxq, rxipsum);
   7450 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7451 				if (errors & WRX_ER_IPE)
   7452 					m->m_pkthdr.csum_flags |=
   7453 					    M_CSUM_IPv4_BAD;
   7454 			}
   7455 			if (status & WRX_ST_TCPCS) {
   7456 				/*
   7457 				 * Note: we don't know if this was TCP or UDP,
   7458 				 * so we just set both bits, and expect the
   7459 				 * upper layers to deal.
   7460 				 */
   7461 				WM_Q_EVCNT_INCR(rxq, rxtusum);
   7462 				m->m_pkthdr.csum_flags |=
   7463 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7464 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7465 				if (errors & WRX_ER_TCPE)
   7466 					m->m_pkthdr.csum_flags |=
   7467 					    M_CSUM_TCP_UDP_BAD;
   7468 			}
   7469 		}
   7470 
   7471 		ifp->if_ipackets++;
   7472 
   7473 		mutex_exit(rxq->rxq_lock);
   7474 
   7475 		/* Pass this up to any BPF listeners. */
   7476 		bpf_mtap(ifp, m);
   7477 
   7478 		/* Pass it on. */
   7479 		if_percpuq_enqueue(sc->sc_ipq, m);
   7480 
   7481 		mutex_enter(rxq->rxq_lock);
   7482 
   7483 		if (rxq->rxq_stopping)
   7484 			break;
   7485 	}
   7486 
   7487 	/* Update the receive pointer. */
   7488 	rxq->rxq_ptr = i;
   7489 	if (count != 0)
   7490 		rnd_add_uint32(&sc->rnd_source, count);
   7491 
   7492 	DPRINTF(WM_DEBUG_RX,
   7493 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7494 }
   7495 
   7496 /*
   7497  * wm_linkintr_gmii:
   7498  *
   7499  *	Helper; handle link interrupts for GMII.
   7500  */
   7501 static void
   7502 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7503 {
   7504 
   7505 	KASSERT(WM_CORE_LOCKED(sc));
   7506 
   7507 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7508 		__func__));
   7509 
   7510 	if (icr & ICR_LSC) {
   7511 		uint32_t reg;
   7512 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7513 
   7514 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7515 			wm_gig_downshift_workaround_ich8lan(sc);
   7516 
   7517 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7518 			device_xname(sc->sc_dev)));
   7519 		mii_pollstat(&sc->sc_mii);
   7520 		if (sc->sc_type == WM_T_82543) {
   7521 			int miistatus, active;
   7522 
   7523 			/*
   7524 			 * With 82543, we need to force speed and
   7525 			 * duplex on the MAC equal to what the PHY
   7526 			 * speed and duplex configuration is.
   7527 			 */
   7528 			miistatus = sc->sc_mii.mii_media_status;
   7529 
   7530 			if (miistatus & IFM_ACTIVE) {
   7531 				active = sc->sc_mii.mii_media_active;
   7532 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7533 				switch (IFM_SUBTYPE(active)) {
   7534 				case IFM_10_T:
   7535 					sc->sc_ctrl |= CTRL_SPEED_10;
   7536 					break;
   7537 				case IFM_100_TX:
   7538 					sc->sc_ctrl |= CTRL_SPEED_100;
   7539 					break;
   7540 				case IFM_1000_T:
   7541 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7542 					break;
   7543 				default:
   7544 					/*
   7545 					 * fiber?
   7546 					 * Shoud not enter here.
   7547 					 */
   7548 					printf("unknown media (%x)\n", active);
   7549 					break;
   7550 				}
   7551 				if (active & IFM_FDX)
   7552 					sc->sc_ctrl |= CTRL_FD;
   7553 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7554 			}
   7555 		} else if ((sc->sc_type == WM_T_ICH8)
   7556 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7557 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7558 		} else if (sc->sc_type == WM_T_PCH) {
   7559 			wm_k1_gig_workaround_hv(sc,
   7560 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7561 		}
   7562 
   7563 		if ((sc->sc_phytype == WMPHY_82578)
   7564 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7565 			== IFM_1000_T)) {
   7566 
   7567 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7568 				delay(200*1000); /* XXX too big */
   7569 
   7570 				/* Link stall fix for link up */
   7571 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7572 				    HV_MUX_DATA_CTRL,
   7573 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7574 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7575 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7576 				    HV_MUX_DATA_CTRL,
   7577 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7578 			}
   7579 		}
   7580 		/*
   7581 		 * I217 Packet Loss issue:
   7582 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   7583 		 * on power up.
   7584 		 * Set the Beacon Duration for I217 to 8 usec
   7585 		 */
   7586 		if ((sc->sc_type == WM_T_PCH_LPT)
   7587 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   7588 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   7589 			reg &= ~FEXTNVM4_BEACON_DURATION;
   7590 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   7591 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   7592 		}
   7593 
   7594 		/* XXX Work-around I218 hang issue */
   7595 		/* e1000_k1_workaround_lpt_lp() */
   7596 
   7597 		if ((sc->sc_type == WM_T_PCH_LPT)
   7598 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   7599 			/*
   7600 			 * Set platform power management values for Latency
   7601 			 * Tolerance Reporting (LTR)
   7602 			 */
   7603 			wm_platform_pm_pch_lpt(sc,
   7604 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   7605 				    != 0));
   7606 		}
   7607 
   7608 		/* FEXTNVM6 K1-off workaround */
   7609 		if (sc->sc_type == WM_T_PCH_SPT) {
   7610 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   7611 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   7612 			    & FEXTNVM6_K1_OFF_ENABLE)
   7613 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   7614 			else
   7615 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   7616 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   7617 		}
   7618 	} else if (icr & ICR_RXSEQ) {
   7619 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7620 			device_xname(sc->sc_dev)));
   7621 	}
   7622 }
   7623 
   7624 /*
   7625  * wm_linkintr_tbi:
   7626  *
   7627  *	Helper; handle link interrupts for TBI mode.
   7628  */
   7629 static void
   7630 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7631 {
   7632 	uint32_t status;
   7633 
   7634 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7635 		__func__));
   7636 
   7637 	status = CSR_READ(sc, WMREG_STATUS);
   7638 	if (icr & ICR_LSC) {
   7639 		if (status & STATUS_LU) {
   7640 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7641 			    device_xname(sc->sc_dev),
   7642 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7643 			/*
   7644 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7645 			 * so we should update sc->sc_ctrl
   7646 			 */
   7647 
   7648 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7649 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7650 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7651 			if (status & STATUS_FD)
   7652 				sc->sc_tctl |=
   7653 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7654 			else
   7655 				sc->sc_tctl |=
   7656 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7657 			if (sc->sc_ctrl & CTRL_TFCE)
   7658 				sc->sc_fcrtl |= FCRTL_XONE;
   7659 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7660 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7661 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7662 				      sc->sc_fcrtl);
   7663 			sc->sc_tbi_linkup = 1;
   7664 		} else {
   7665 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7666 			    device_xname(sc->sc_dev)));
   7667 			sc->sc_tbi_linkup = 0;
   7668 		}
   7669 		/* Update LED */
   7670 		wm_tbi_serdes_set_linkled(sc);
   7671 	} else if (icr & ICR_RXSEQ) {
   7672 		DPRINTF(WM_DEBUG_LINK,
   7673 		    ("%s: LINK: Receive sequence error\n",
   7674 		    device_xname(sc->sc_dev)));
   7675 	}
   7676 }
   7677 
   7678 /*
   7679  * wm_linkintr_serdes:
   7680  *
   7681  *	Helper; handle link interrupts for TBI mode.
   7682  */
   7683 static void
   7684 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7685 {
   7686 	struct mii_data *mii = &sc->sc_mii;
   7687 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7688 	uint32_t pcs_adv, pcs_lpab, reg;
   7689 
   7690 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7691 		__func__));
   7692 
   7693 	if (icr & ICR_LSC) {
   7694 		/* Check PCS */
   7695 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7696 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7697 			mii->mii_media_status |= IFM_ACTIVE;
   7698 			sc->sc_tbi_linkup = 1;
   7699 		} else {
   7700 			mii->mii_media_status |= IFM_NONE;
   7701 			sc->sc_tbi_linkup = 0;
   7702 			wm_tbi_serdes_set_linkled(sc);
   7703 			return;
   7704 		}
   7705 		mii->mii_media_active |= IFM_1000_SX;
   7706 		if ((reg & PCS_LSTS_FDX) != 0)
   7707 			mii->mii_media_active |= IFM_FDX;
   7708 		else
   7709 			mii->mii_media_active |= IFM_HDX;
   7710 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7711 			/* Check flow */
   7712 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7713 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7714 				DPRINTF(WM_DEBUG_LINK,
   7715 				    ("XXX LINKOK but not ACOMP\n"));
   7716 				return;
   7717 			}
   7718 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7719 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7720 			DPRINTF(WM_DEBUG_LINK,
   7721 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7722 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7723 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7724 				mii->mii_media_active |= IFM_FLOW
   7725 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7726 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7727 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7728 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7729 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7730 				mii->mii_media_active |= IFM_FLOW
   7731 				    | IFM_ETH_TXPAUSE;
   7732 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7733 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7734 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7735 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7736 				mii->mii_media_active |= IFM_FLOW
   7737 				    | IFM_ETH_RXPAUSE;
   7738 		}
   7739 		/* Update LED */
   7740 		wm_tbi_serdes_set_linkled(sc);
   7741 	} else {
   7742 		DPRINTF(WM_DEBUG_LINK,
   7743 		    ("%s: LINK: Receive sequence error\n",
   7744 		    device_xname(sc->sc_dev)));
   7745 	}
   7746 }
   7747 
   7748 /*
   7749  * wm_linkintr:
   7750  *
   7751  *	Helper; handle link interrupts.
   7752  */
   7753 static void
   7754 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7755 {
   7756 
   7757 	KASSERT(WM_CORE_LOCKED(sc));
   7758 
   7759 	if (sc->sc_flags & WM_F_HAS_MII)
   7760 		wm_linkintr_gmii(sc, icr);
   7761 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7762 	    && (sc->sc_type >= WM_T_82575))
   7763 		wm_linkintr_serdes(sc, icr);
   7764 	else
   7765 		wm_linkintr_tbi(sc, icr);
   7766 }
   7767 
   7768 /*
   7769  * wm_intr_legacy:
   7770  *
   7771  *	Interrupt service routine for INTx and MSI.
   7772  */
   7773 static int
   7774 wm_intr_legacy(void *arg)
   7775 {
   7776 	struct wm_softc *sc = arg;
   7777 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7778 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
   7779 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7780 	uint32_t icr, rndval = 0;
   7781 	int handled = 0;
   7782 
   7783 	DPRINTF(WM_DEBUG_TX,
   7784 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7785 	while (1 /* CONSTCOND */) {
   7786 		icr = CSR_READ(sc, WMREG_ICR);
   7787 		if ((icr & sc->sc_icr) == 0)
   7788 			break;
   7789 		if (rndval == 0)
   7790 			rndval = icr;
   7791 
   7792 		mutex_enter(rxq->rxq_lock);
   7793 
   7794 		if (rxq->rxq_stopping) {
   7795 			mutex_exit(rxq->rxq_lock);
   7796 			break;
   7797 		}
   7798 
   7799 		handled = 1;
   7800 
   7801 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7802 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   7803 			DPRINTF(WM_DEBUG_RX,
   7804 			    ("%s: RX: got Rx intr 0x%08x\n",
   7805 			    device_xname(sc->sc_dev),
   7806 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   7807 			WM_Q_EVCNT_INCR(rxq, rxintr);
   7808 		}
   7809 #endif
   7810 		wm_rxeof(rxq);
   7811 
   7812 		mutex_exit(rxq->rxq_lock);
   7813 		mutex_enter(txq->txq_lock);
   7814 
   7815 		if (txq->txq_stopping) {
   7816 			mutex_exit(txq->txq_lock);
   7817 			break;
   7818 		}
   7819 
   7820 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7821 		if (icr & ICR_TXDW) {
   7822 			DPRINTF(WM_DEBUG_TX,
   7823 			    ("%s: TX: got TXDW interrupt\n",
   7824 			    device_xname(sc->sc_dev)));
   7825 			WM_Q_EVCNT_INCR(txq, txdw);
   7826 		}
   7827 #endif
   7828 		wm_txeof(sc, txq);
   7829 
   7830 		mutex_exit(txq->txq_lock);
   7831 		WM_CORE_LOCK(sc);
   7832 
   7833 		if (sc->sc_core_stopping) {
   7834 			WM_CORE_UNLOCK(sc);
   7835 			break;
   7836 		}
   7837 
   7838 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   7839 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7840 			wm_linkintr(sc, icr);
   7841 		}
   7842 
   7843 		WM_CORE_UNLOCK(sc);
   7844 
   7845 		if (icr & ICR_RXO) {
   7846 #if defined(WM_DEBUG)
   7847 			log(LOG_WARNING, "%s: Receive overrun\n",
   7848 			    device_xname(sc->sc_dev));
   7849 #endif /* defined(WM_DEBUG) */
   7850 		}
   7851 	}
   7852 
   7853 	rnd_add_uint32(&sc->rnd_source, rndval);
   7854 
   7855 	if (handled) {
   7856 		/* Try to get more packets going. */
   7857 		ifp->if_start(ifp);
   7858 	}
   7859 
   7860 	return handled;
   7861 }
   7862 
   7863 static int
   7864 wm_txrxintr_msix(void *arg)
   7865 {
   7866 	struct wm_queue *wmq = arg;
   7867 	struct wm_txqueue *txq = &wmq->wmq_txq;
   7868 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7869 	struct wm_softc *sc = txq->txq_sc;
   7870 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7871 
   7872 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   7873 
   7874 	DPRINTF(WM_DEBUG_TX,
   7875 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7876 
   7877 	if (sc->sc_type == WM_T_82574)
   7878 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7879 	else if (sc->sc_type == WM_T_82575)
   7880 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7881 	else
   7882 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   7883 
   7884 	mutex_enter(txq->txq_lock);
   7885 
   7886 	if (txq->txq_stopping) {
   7887 		mutex_exit(txq->txq_lock);
   7888 		return 0;
   7889 	}
   7890 
   7891 	WM_Q_EVCNT_INCR(txq, txdw);
   7892 	wm_txeof(sc, txq);
   7893 
   7894 	/* Try to get more packets going. */
   7895 	if (pcq_peek(txq->txq_interq) != NULL)
   7896 		wm_nq_transmit_locked(ifp, txq);
   7897 	/*
   7898 	 * There are still some upper layer processing which call
   7899 	 * ifp->if_start(). e.g. ALTQ
   7900 	 */
   7901 	if (wmq->wmq_id == 0) {
   7902 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
   7903 			wm_nq_start_locked(ifp);
   7904 	}
   7905 
   7906 	mutex_exit(txq->txq_lock);
   7907 
   7908 	DPRINTF(WM_DEBUG_RX,
   7909 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7910 	mutex_enter(rxq->rxq_lock);
   7911 
   7912 	if (rxq->rxq_stopping) {
   7913 		mutex_exit(rxq->rxq_lock);
   7914 		return 0;
   7915 	}
   7916 
   7917 	WM_Q_EVCNT_INCR(rxq, rxintr);
   7918 	wm_rxeof(rxq);
   7919 	mutex_exit(rxq->rxq_lock);
   7920 
   7921 	if (sc->sc_type == WM_T_82574)
   7922 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7923 	else if (sc->sc_type == WM_T_82575)
   7924 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7925 	else
   7926 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   7927 
   7928 	return 1;
   7929 }
   7930 
   7931 /*
   7932  * wm_linkintr_msix:
   7933  *
   7934  *	Interrupt service routine for link status change for MSI-X.
   7935  */
   7936 static int
   7937 wm_linkintr_msix(void *arg)
   7938 {
   7939 	struct wm_softc *sc = arg;
   7940 	uint32_t reg;
   7941 
   7942 	DPRINTF(WM_DEBUG_LINK,
   7943 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   7944 
   7945 	reg = CSR_READ(sc, WMREG_ICR);
   7946 	WM_CORE_LOCK(sc);
   7947 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   7948 		goto out;
   7949 
   7950 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7951 	wm_linkintr(sc, ICR_LSC);
   7952 
   7953 out:
   7954 	WM_CORE_UNLOCK(sc);
   7955 
   7956 	if (sc->sc_type == WM_T_82574)
   7957 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   7958 	else if (sc->sc_type == WM_T_82575)
   7959 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   7960 	else
   7961 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   7962 
   7963 	return 1;
   7964 }
   7965 
   7966 /*
   7967  * Media related.
   7968  * GMII, SGMII, TBI (and SERDES)
   7969  */
   7970 
   7971 /* Common */
   7972 
   7973 /*
   7974  * wm_tbi_serdes_set_linkled:
   7975  *
   7976  *	Update the link LED on TBI and SERDES devices.
   7977  */
   7978 static void
   7979 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   7980 {
   7981 
   7982 	if (sc->sc_tbi_linkup)
   7983 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7984 	else
   7985 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7986 
   7987 	/* 82540 or newer devices are active low */
   7988 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7989 
   7990 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7991 }
   7992 
   7993 /* GMII related */
   7994 
   7995 /*
   7996  * wm_gmii_reset:
   7997  *
   7998  *	Reset the PHY.
   7999  */
   8000 static void
   8001 wm_gmii_reset(struct wm_softc *sc)
   8002 {
   8003 	uint32_t reg;
   8004 	int rv;
   8005 
   8006 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8007 		device_xname(sc->sc_dev), __func__));
   8008 
   8009 	rv = sc->phy.acquire(sc);
   8010 	if (rv != 0) {
   8011 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8012 		    __func__);
   8013 		return;
   8014 	}
   8015 
   8016 	switch (sc->sc_type) {
   8017 	case WM_T_82542_2_0:
   8018 	case WM_T_82542_2_1:
   8019 		/* null */
   8020 		break;
   8021 	case WM_T_82543:
   8022 		/*
   8023 		 * With 82543, we need to force speed and duplex on the MAC
   8024 		 * equal to what the PHY speed and duplex configuration is.
   8025 		 * In addition, we need to perform a hardware reset on the PHY
   8026 		 * to take it out of reset.
   8027 		 */
   8028 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8029 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8030 
   8031 		/* The PHY reset pin is active-low. */
   8032 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8033 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8034 		    CTRL_EXT_SWDPIN(4));
   8035 		reg |= CTRL_EXT_SWDPIO(4);
   8036 
   8037 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8038 		CSR_WRITE_FLUSH(sc);
   8039 		delay(10*1000);
   8040 
   8041 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8042 		CSR_WRITE_FLUSH(sc);
   8043 		delay(150);
   8044 #if 0
   8045 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8046 #endif
   8047 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8048 		break;
   8049 	case WM_T_82544:	/* reset 10000us */
   8050 	case WM_T_82540:
   8051 	case WM_T_82545:
   8052 	case WM_T_82545_3:
   8053 	case WM_T_82546:
   8054 	case WM_T_82546_3:
   8055 	case WM_T_82541:
   8056 	case WM_T_82541_2:
   8057 	case WM_T_82547:
   8058 	case WM_T_82547_2:
   8059 	case WM_T_82571:	/* reset 100us */
   8060 	case WM_T_82572:
   8061 	case WM_T_82573:
   8062 	case WM_T_82574:
   8063 	case WM_T_82575:
   8064 	case WM_T_82576:
   8065 	case WM_T_82580:
   8066 	case WM_T_I350:
   8067 	case WM_T_I354:
   8068 	case WM_T_I210:
   8069 	case WM_T_I211:
   8070 	case WM_T_82583:
   8071 	case WM_T_80003:
   8072 		/* generic reset */
   8073 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8074 		CSR_WRITE_FLUSH(sc);
   8075 		delay(20000);
   8076 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8077 		CSR_WRITE_FLUSH(sc);
   8078 		delay(20000);
   8079 
   8080 		if ((sc->sc_type == WM_T_82541)
   8081 		    || (sc->sc_type == WM_T_82541_2)
   8082 		    || (sc->sc_type == WM_T_82547)
   8083 		    || (sc->sc_type == WM_T_82547_2)) {
   8084 			/* workaround for igp are done in igp_reset() */
   8085 			/* XXX add code to set LED after phy reset */
   8086 		}
   8087 		break;
   8088 	case WM_T_ICH8:
   8089 	case WM_T_ICH9:
   8090 	case WM_T_ICH10:
   8091 	case WM_T_PCH:
   8092 	case WM_T_PCH2:
   8093 	case WM_T_PCH_LPT:
   8094 	case WM_T_PCH_SPT:
   8095 		/* generic reset */
   8096 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8097 		CSR_WRITE_FLUSH(sc);
   8098 		delay(100);
   8099 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8100 		CSR_WRITE_FLUSH(sc);
   8101 		delay(150);
   8102 		break;
   8103 	default:
   8104 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8105 		    __func__);
   8106 		break;
   8107 	}
   8108 
   8109 	sc->phy.release(sc);
   8110 
   8111 	/* get_cfg_done */
   8112 	wm_get_cfg_done(sc);
   8113 
   8114 	/* extra setup */
   8115 	switch (sc->sc_type) {
   8116 	case WM_T_82542_2_0:
   8117 	case WM_T_82542_2_1:
   8118 	case WM_T_82543:
   8119 	case WM_T_82544:
   8120 	case WM_T_82540:
   8121 	case WM_T_82545:
   8122 	case WM_T_82545_3:
   8123 	case WM_T_82546:
   8124 	case WM_T_82546_3:
   8125 	case WM_T_82541_2:
   8126 	case WM_T_82547_2:
   8127 	case WM_T_82571:
   8128 	case WM_T_82572:
   8129 	case WM_T_82573:
   8130 	case WM_T_82575:
   8131 	case WM_T_82576:
   8132 	case WM_T_82580:
   8133 	case WM_T_I350:
   8134 	case WM_T_I354:
   8135 	case WM_T_I210:
   8136 	case WM_T_I211:
   8137 	case WM_T_80003:
   8138 		/* null */
   8139 		break;
   8140 	case WM_T_82574:
   8141 	case WM_T_82583:
   8142 		wm_lplu_d0_disable(sc);
   8143 		break;
   8144 	case WM_T_82541:
   8145 	case WM_T_82547:
   8146 		/* XXX Configure actively LED after PHY reset */
   8147 		break;
   8148 	case WM_T_ICH8:
   8149 	case WM_T_ICH9:
   8150 	case WM_T_ICH10:
   8151 	case WM_T_PCH:
   8152 	case WM_T_PCH2:
   8153 	case WM_T_PCH_LPT:
   8154 	case WM_T_PCH_SPT:
   8155 		/* Allow time for h/w to get to a quiescent state afer reset */
   8156 		delay(10*1000);
   8157 
   8158 		if (sc->sc_type == WM_T_PCH)
   8159 			wm_hv_phy_workaround_ich8lan(sc);
   8160 
   8161 		if (sc->sc_type == WM_T_PCH2)
   8162 			wm_lv_phy_workaround_ich8lan(sc);
   8163 
   8164 		/* Clear the host wakeup bit after lcd reset */
   8165 		if (sc->sc_type >= WM_T_PCH) {
   8166 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8167 			    BM_PORT_GEN_CFG);
   8168 			reg &= ~BM_WUC_HOST_WU_BIT;
   8169 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8170 			    BM_PORT_GEN_CFG, reg);
   8171 		}
   8172 
   8173 		/*
   8174 		 * XXX Configure the LCD with th extended configuration region
   8175 		 * in NVM
   8176 		 */
   8177 
   8178 		/* Disable D0 LPLU. */
   8179 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8180 			wm_lplu_d0_disable_pch(sc);
   8181 		else
   8182 			wm_lplu_d0_disable(sc);	/* ICH* */
   8183 		break;
   8184 	default:
   8185 		panic("%s: unknown type\n", __func__);
   8186 		break;
   8187 	}
   8188 }
   8189 
   8190 /*
   8191  * wm_get_phy_id_82575:
   8192  *
   8193  * Return PHY ID. Return -1 if it failed.
   8194  */
   8195 static int
   8196 wm_get_phy_id_82575(struct wm_softc *sc)
   8197 {
   8198 	uint32_t reg;
   8199 	int phyid = -1;
   8200 
   8201 	/* XXX */
   8202 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8203 		return -1;
   8204 
   8205 	if (wm_sgmii_uses_mdio(sc)) {
   8206 		switch (sc->sc_type) {
   8207 		case WM_T_82575:
   8208 		case WM_T_82576:
   8209 			reg = CSR_READ(sc, WMREG_MDIC);
   8210 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8211 			break;
   8212 		case WM_T_82580:
   8213 		case WM_T_I350:
   8214 		case WM_T_I354:
   8215 		case WM_T_I210:
   8216 		case WM_T_I211:
   8217 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8218 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8219 			break;
   8220 		default:
   8221 			return -1;
   8222 		}
   8223 	}
   8224 
   8225 	return phyid;
   8226 }
   8227 
   8228 
   8229 /*
   8230  * wm_gmii_mediainit:
   8231  *
   8232  *	Initialize media for use on 1000BASE-T devices.
   8233  */
   8234 static void
   8235 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8236 {
   8237 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8238 	struct mii_data *mii = &sc->sc_mii;
   8239 	uint32_t reg;
   8240 
   8241 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8242 		device_xname(sc->sc_dev), __func__));
   8243 
   8244 	/* We have GMII. */
   8245 	sc->sc_flags |= WM_F_HAS_MII;
   8246 
   8247 	if (sc->sc_type == WM_T_80003)
   8248 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8249 	else
   8250 		sc->sc_tipg = TIPG_1000T_DFLT;
   8251 
   8252 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8253 	if ((sc->sc_type == WM_T_82580)
   8254 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8255 	    || (sc->sc_type == WM_T_I211)) {
   8256 		reg = CSR_READ(sc, WMREG_PHPM);
   8257 		reg &= ~PHPM_GO_LINK_D;
   8258 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8259 	}
   8260 
   8261 	/*
   8262 	 * Let the chip set speed/duplex on its own based on
   8263 	 * signals from the PHY.
   8264 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8265 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8266 	 */
   8267 	sc->sc_ctrl |= CTRL_SLU;
   8268 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8269 
   8270 	/* Initialize our media structures and probe the GMII. */
   8271 	mii->mii_ifp = ifp;
   8272 
   8273 	/*
   8274 	 * Determine the PHY access method.
   8275 	 *
   8276 	 *  For SGMII, use SGMII specific method.
   8277 	 *
   8278 	 *  For some devices, we can determine the PHY access method
   8279 	 * from sc_type.
   8280 	 *
   8281 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   8282 	 * access  method by sc_type, so use the PCI product ID for some
   8283 	 * devices.
   8284 	 * For other ICH8 variants, try to use igp's method. If the PHY
   8285 	 * can't detect, then use bm's method.
   8286 	 */
   8287 	switch (prodid) {
   8288 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   8289 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   8290 		/* 82577 */
   8291 		sc->sc_phytype = WMPHY_82577;
   8292 		break;
   8293 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   8294 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   8295 		/* 82578 */
   8296 		sc->sc_phytype = WMPHY_82578;
   8297 		break;
   8298 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8299 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8300 		/* 82579 */
   8301 		sc->sc_phytype = WMPHY_82579;
   8302 		break;
   8303 	case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8304 	case PCI_PRODUCT_INTEL_82801I_BM:
   8305 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8306 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8307 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8308 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8309 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8310 		/* ICH8, 9, 10 with 82567 */
   8311 		sc->sc_phytype = WMPHY_BM;
   8312 		mii->mii_readreg = wm_gmii_bm_readreg;
   8313 		mii->mii_writereg = wm_gmii_bm_writereg;
   8314 		break;
   8315 	default:
   8316 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   8317 		    && !wm_sgmii_uses_mdio(sc)){
   8318 			/* SGMII */
   8319 			mii->mii_readreg = wm_sgmii_readreg;
   8320 			mii->mii_writereg = wm_sgmii_writereg;
   8321 		} else if ((sc->sc_type == WM_T_82574)
   8322 		    || (sc->sc_type == WM_T_82583)) {
   8323 			/* BM2 (phyaddr == 1) */
   8324 			sc->sc_phytype = WMPHY_BM;
   8325 			mii->mii_readreg = wm_gmii_bm_readreg;
   8326 			mii->mii_writereg = wm_gmii_bm_writereg;
   8327 		} else if (sc->sc_type >= WM_T_ICH8) {
   8328 			/* non-82567 ICH8, 9 and 10 */
   8329 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8330 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8331 		} else if (sc->sc_type >= WM_T_80003) {
   8332 			/* 80003 */
   8333 			sc->sc_phytype = WMPHY_GG82563;
   8334 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8335 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8336 		} else if (sc->sc_type >= WM_T_I210) {
   8337 			/* I210 and I211 */
   8338 			sc->sc_phytype = WMPHY_210;
   8339 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8340 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8341 		} else if (sc->sc_type >= WM_T_82580) {
   8342 			/* 82580, I350 and I354 */
   8343 			sc->sc_phytype = WMPHY_82580;
   8344 			mii->mii_readreg = wm_gmii_82580_readreg;
   8345 			mii->mii_writereg = wm_gmii_82580_writereg;
   8346 		} else if (sc->sc_type >= WM_T_82544) {
   8347 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8348 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8349 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8350 		} else {
   8351 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8352 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8353 		}
   8354 		break;
   8355 	}
   8356 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8357 		/* All PCH* use _hv_ */
   8358 		mii->mii_readreg = wm_gmii_hv_readreg;
   8359 		mii->mii_writereg = wm_gmii_hv_writereg;
   8360 	}
   8361 	mii->mii_statchg = wm_gmii_statchg;
   8362 
   8363 	/* get PHY control from SMBus to PCIe */
   8364 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   8365 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   8366 		wm_smbustopci(sc);
   8367 
   8368 	wm_gmii_reset(sc);
   8369 
   8370 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8371 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8372 	    wm_gmii_mediastatus);
   8373 
   8374 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8375 	    || (sc->sc_type == WM_T_82580)
   8376 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8377 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8378 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8379 			/* Attach only one port */
   8380 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8381 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8382 		} else {
   8383 			int i, id;
   8384 			uint32_t ctrl_ext;
   8385 
   8386 			id = wm_get_phy_id_82575(sc);
   8387 			if (id != -1) {
   8388 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8389 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8390 			}
   8391 			if ((id == -1)
   8392 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8393 				/* Power on sgmii phy if it is disabled */
   8394 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8395 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8396 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8397 				CSR_WRITE_FLUSH(sc);
   8398 				delay(300*1000); /* XXX too long */
   8399 
   8400 				/* from 1 to 8 */
   8401 				for (i = 1; i < 8; i++)
   8402 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8403 					    0xffffffff, i, MII_OFFSET_ANY,
   8404 					    MIIF_DOPAUSE);
   8405 
   8406 				/* restore previous sfp cage power state */
   8407 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8408 			}
   8409 		}
   8410 	} else {
   8411 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8412 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8413 	}
   8414 
   8415 	/*
   8416 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8417 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8418 	 */
   8419 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8420 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8421 		wm_set_mdio_slow_mode_hv(sc);
   8422 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8423 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8424 	}
   8425 
   8426 	/*
   8427 	 * (For ICH8 variants)
   8428 	 * If PHY detection failed, use BM's r/w function and retry.
   8429 	 */
   8430 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8431 		/* if failed, retry with *_bm_* */
   8432 		mii->mii_readreg = wm_gmii_bm_readreg;
   8433 		mii->mii_writereg = wm_gmii_bm_writereg;
   8434 
   8435 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8436 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8437 	}
   8438 
   8439 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8440 		/* Any PHY wasn't find */
   8441 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8442 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8443 		sc->sc_phytype = WMPHY_NONE;
   8444 	} else {
   8445 		/*
   8446 		 * PHY Found!
   8447 		 * Check PHY type.
   8448 		 */
   8449 		uint32_t model;
   8450 		struct mii_softc *child;
   8451 
   8452 		child = LIST_FIRST(&mii->mii_phys);
   8453 		model = child->mii_mpd_model;
   8454 		if (model == MII_MODEL_yyINTEL_I82566)
   8455 			sc->sc_phytype = WMPHY_IGP_3;
   8456 
   8457 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8458 	}
   8459 }
   8460 
   8461 /*
   8462  * wm_gmii_mediachange:	[ifmedia interface function]
   8463  *
   8464  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8465  */
   8466 static int
   8467 wm_gmii_mediachange(struct ifnet *ifp)
   8468 {
   8469 	struct wm_softc *sc = ifp->if_softc;
   8470 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8471 	int rc;
   8472 
   8473 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8474 		device_xname(sc->sc_dev), __func__));
   8475 	if ((ifp->if_flags & IFF_UP) == 0)
   8476 		return 0;
   8477 
   8478 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8479 	sc->sc_ctrl |= CTRL_SLU;
   8480 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8481 	    || (sc->sc_type > WM_T_82543)) {
   8482 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8483 	} else {
   8484 		sc->sc_ctrl &= ~CTRL_ASDE;
   8485 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8486 		if (ife->ifm_media & IFM_FDX)
   8487 			sc->sc_ctrl |= CTRL_FD;
   8488 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8489 		case IFM_10_T:
   8490 			sc->sc_ctrl |= CTRL_SPEED_10;
   8491 			break;
   8492 		case IFM_100_TX:
   8493 			sc->sc_ctrl |= CTRL_SPEED_100;
   8494 			break;
   8495 		case IFM_1000_T:
   8496 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8497 			break;
   8498 		default:
   8499 			panic("wm_gmii_mediachange: bad media 0x%x",
   8500 			    ife->ifm_media);
   8501 		}
   8502 	}
   8503 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8504 	if (sc->sc_type <= WM_T_82543)
   8505 		wm_gmii_reset(sc);
   8506 
   8507 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8508 		return 0;
   8509 	return rc;
   8510 }
   8511 
   8512 /*
   8513  * wm_gmii_mediastatus:	[ifmedia interface function]
   8514  *
   8515  *	Get the current interface media status on a 1000BASE-T device.
   8516  */
   8517 static void
   8518 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8519 {
   8520 	struct wm_softc *sc = ifp->if_softc;
   8521 
   8522 	ether_mediastatus(ifp, ifmr);
   8523 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8524 	    | sc->sc_flowflags;
   8525 }
   8526 
   8527 #define	MDI_IO		CTRL_SWDPIN(2)
   8528 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8529 #define	MDI_CLK		CTRL_SWDPIN(3)
   8530 
   8531 static void
   8532 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8533 {
   8534 	uint32_t i, v;
   8535 
   8536 	v = CSR_READ(sc, WMREG_CTRL);
   8537 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8538 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8539 
   8540 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8541 		if (data & i)
   8542 			v |= MDI_IO;
   8543 		else
   8544 			v &= ~MDI_IO;
   8545 		CSR_WRITE(sc, WMREG_CTRL, v);
   8546 		CSR_WRITE_FLUSH(sc);
   8547 		delay(10);
   8548 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8549 		CSR_WRITE_FLUSH(sc);
   8550 		delay(10);
   8551 		CSR_WRITE(sc, WMREG_CTRL, v);
   8552 		CSR_WRITE_FLUSH(sc);
   8553 		delay(10);
   8554 	}
   8555 }
   8556 
   8557 static uint32_t
   8558 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8559 {
   8560 	uint32_t v, i, data = 0;
   8561 
   8562 	v = CSR_READ(sc, WMREG_CTRL);
   8563 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8564 	v |= CTRL_SWDPIO(3);
   8565 
   8566 	CSR_WRITE(sc, WMREG_CTRL, v);
   8567 	CSR_WRITE_FLUSH(sc);
   8568 	delay(10);
   8569 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8570 	CSR_WRITE_FLUSH(sc);
   8571 	delay(10);
   8572 	CSR_WRITE(sc, WMREG_CTRL, v);
   8573 	CSR_WRITE_FLUSH(sc);
   8574 	delay(10);
   8575 
   8576 	for (i = 0; i < 16; i++) {
   8577 		data <<= 1;
   8578 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8579 		CSR_WRITE_FLUSH(sc);
   8580 		delay(10);
   8581 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8582 			data |= 1;
   8583 		CSR_WRITE(sc, WMREG_CTRL, v);
   8584 		CSR_WRITE_FLUSH(sc);
   8585 		delay(10);
   8586 	}
   8587 
   8588 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8589 	CSR_WRITE_FLUSH(sc);
   8590 	delay(10);
   8591 	CSR_WRITE(sc, WMREG_CTRL, v);
   8592 	CSR_WRITE_FLUSH(sc);
   8593 	delay(10);
   8594 
   8595 	return data;
   8596 }
   8597 
   8598 #undef MDI_IO
   8599 #undef MDI_DIR
   8600 #undef MDI_CLK
   8601 
   8602 /*
   8603  * wm_gmii_i82543_readreg:	[mii interface function]
   8604  *
   8605  *	Read a PHY register on the GMII (i82543 version).
   8606  */
   8607 static int
   8608 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8609 {
   8610 	struct wm_softc *sc = device_private(self);
   8611 	int rv;
   8612 
   8613 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8614 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8615 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8616 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8617 
   8618 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8619 	    device_xname(sc->sc_dev), phy, reg, rv));
   8620 
   8621 	return rv;
   8622 }
   8623 
   8624 /*
   8625  * wm_gmii_i82543_writereg:	[mii interface function]
   8626  *
   8627  *	Write a PHY register on the GMII (i82543 version).
   8628  */
   8629 static void
   8630 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8631 {
   8632 	struct wm_softc *sc = device_private(self);
   8633 
   8634 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8635 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8636 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8637 	    (MII_COMMAND_START << 30), 32);
   8638 }
   8639 
   8640 /*
   8641  * wm_gmii_mdic_readreg:	[mii interface function]
   8642  *
   8643  *	Read a PHY register on the GMII.
   8644  */
   8645 static int
   8646 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   8647 {
   8648 	struct wm_softc *sc = device_private(self);
   8649 	uint32_t mdic = 0;
   8650 	int i, rv;
   8651 
   8652 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8653 	    MDIC_REGADD(reg));
   8654 
   8655 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8656 		mdic = CSR_READ(sc, WMREG_MDIC);
   8657 		if (mdic & MDIC_READY)
   8658 			break;
   8659 		delay(50);
   8660 	}
   8661 
   8662 	if ((mdic & MDIC_READY) == 0) {
   8663 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8664 		    device_xname(sc->sc_dev), phy, reg);
   8665 		rv = 0;
   8666 	} else if (mdic & MDIC_E) {
   8667 #if 0 /* This is normal if no PHY is present. */
   8668 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8669 		    device_xname(sc->sc_dev), phy, reg);
   8670 #endif
   8671 		rv = 0;
   8672 	} else {
   8673 		rv = MDIC_DATA(mdic);
   8674 		if (rv == 0xffff)
   8675 			rv = 0;
   8676 	}
   8677 
   8678 	return rv;
   8679 }
   8680 
   8681 /*
   8682  * wm_gmii_mdic_writereg:	[mii interface function]
   8683  *
   8684  *	Write a PHY register on the GMII.
   8685  */
   8686 static void
   8687 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   8688 {
   8689 	struct wm_softc *sc = device_private(self);
   8690 	uint32_t mdic = 0;
   8691 	int i;
   8692 
   8693 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8694 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8695 
   8696 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8697 		mdic = CSR_READ(sc, WMREG_MDIC);
   8698 		if (mdic & MDIC_READY)
   8699 			break;
   8700 		delay(50);
   8701 	}
   8702 
   8703 	if ((mdic & MDIC_READY) == 0)
   8704 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8705 		    device_xname(sc->sc_dev), phy, reg);
   8706 	else if (mdic & MDIC_E)
   8707 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8708 		    device_xname(sc->sc_dev), phy, reg);
   8709 }
   8710 
   8711 /*
   8712  * wm_gmii_i82544_readreg:	[mii interface function]
   8713  *
   8714  *	Read a PHY register on the GMII.
   8715  */
   8716 static int
   8717 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8718 {
   8719 	struct wm_softc *sc = device_private(self);
   8720 	int rv;
   8721 
   8722 	if (sc->phy.acquire(sc)) {
   8723 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8724 		    __func__);
   8725 		return 0;
   8726 	}
   8727 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   8728 	sc->phy.release(sc);
   8729 
   8730 	return rv;
   8731 }
   8732 
   8733 /*
   8734  * wm_gmii_i82544_writereg:	[mii interface function]
   8735  *
   8736  *	Write a PHY register on the GMII.
   8737  */
   8738 static void
   8739 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8740 {
   8741 	struct wm_softc *sc = device_private(self);
   8742 
   8743 	if (sc->phy.acquire(sc)) {
   8744 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8745 		    __func__);
   8746 	}
   8747 	wm_gmii_mdic_writereg(self, phy, reg, val);
   8748 	sc->phy.release(sc);
   8749 }
   8750 
   8751 /*
   8752  * wm_gmii_i80003_readreg:	[mii interface function]
   8753  *
   8754  *	Read a PHY register on the kumeran
   8755  * This could be handled by the PHY layer if we didn't have to lock the
   8756  * ressource ...
   8757  */
   8758 static int
   8759 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8760 {
   8761 	struct wm_softc *sc = device_private(self);
   8762 	int rv;
   8763 
   8764 	if (phy != 1) /* only one PHY on kumeran bus */
   8765 		return 0;
   8766 
   8767 	if (sc->phy.acquire(sc)) {
   8768 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8769 		    __func__);
   8770 		return 0;
   8771 	}
   8772 
   8773 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   8774 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8775 		    reg >> GG82563_PAGE_SHIFT);
   8776 	} else {
   8777 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8778 		    reg >> GG82563_PAGE_SHIFT);
   8779 	}
   8780 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8781 	delay(200);
   8782 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   8783 	delay(200);
   8784 	sc->phy.release(sc);
   8785 
   8786 	return rv;
   8787 }
   8788 
   8789 /*
   8790  * wm_gmii_i80003_writereg:	[mii interface function]
   8791  *
   8792  *	Write a PHY register on the kumeran.
   8793  * This could be handled by the PHY layer if we didn't have to lock the
   8794  * ressource ...
   8795  */
   8796 static void
   8797 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8798 {
   8799 	struct wm_softc *sc = device_private(self);
   8800 
   8801 	if (phy != 1) /* only one PHY on kumeran bus */
   8802 		return;
   8803 
   8804 	if (sc->phy.acquire(sc)) {
   8805 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8806 		    __func__);
   8807 		return;
   8808 	}
   8809 
   8810 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   8811 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8812 		    reg >> GG82563_PAGE_SHIFT);
   8813 	} else {
   8814 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8815 		    reg >> GG82563_PAGE_SHIFT);
   8816 	}
   8817 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8818 	delay(200);
   8819 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   8820 	delay(200);
   8821 
   8822 	sc->phy.release(sc);
   8823 }
   8824 
   8825 /*
   8826  * wm_gmii_bm_readreg:	[mii interface function]
   8827  *
   8828  *	Read a PHY register on the kumeran
   8829  * This could be handled by the PHY layer if we didn't have to lock the
   8830  * ressource ...
   8831  */
   8832 static int
   8833 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8834 {
   8835 	struct wm_softc *sc = device_private(self);
   8836 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   8837 	uint16_t val;
   8838 	int rv;
   8839 
   8840 	if (sc->phy.acquire(sc)) {
   8841 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8842 		    __func__);
   8843 		return 0;
   8844 	}
   8845 
   8846 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   8847 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   8848 		    || (reg == 31)) ? 1 : phy;
   8849 	/* Page 800 works differently than the rest so it has its own func */
   8850 	if (page == BM_WUC_PAGE) {
   8851 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8852 		rv = val;
   8853 		goto release;
   8854 	}
   8855 
   8856 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8857 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   8858 		    && (sc->sc_type != WM_T_82583))
   8859 			wm_gmii_mdic_writereg(self, phy,
   8860 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   8861 		else
   8862 			wm_gmii_mdic_writereg(self, phy,
   8863 			    BME1000_PHY_PAGE_SELECT, page);
   8864 	}
   8865 
   8866 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   8867 
   8868 release:
   8869 	sc->phy.release(sc);
   8870 	return rv;
   8871 }
   8872 
   8873 /*
   8874  * wm_gmii_bm_writereg:	[mii interface function]
   8875  *
   8876  *	Write a PHY register on the kumeran.
   8877  * This could be handled by the PHY layer if we didn't have to lock the
   8878  * ressource ...
   8879  */
   8880 static void
   8881 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8882 {
   8883 	struct wm_softc *sc = device_private(self);
   8884 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   8885 
   8886 	if (sc->phy.acquire(sc)) {
   8887 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8888 		    __func__);
   8889 		return;
   8890 	}
   8891 
   8892 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   8893 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   8894 		    || (reg == 31)) ? 1 : phy;
   8895 	/* Page 800 works differently than the rest so it has its own func */
   8896 	if (page == BM_WUC_PAGE) {
   8897 		uint16_t tmp;
   8898 
   8899 		tmp = val;
   8900 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8901 		goto release;
   8902 	}
   8903 
   8904 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8905 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   8906 		    && (sc->sc_type != WM_T_82583))
   8907 			wm_gmii_mdic_writereg(self, phy,
   8908 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   8909 		else
   8910 			wm_gmii_mdic_writereg(self, phy,
   8911 			    BME1000_PHY_PAGE_SELECT, page);
   8912 	}
   8913 
   8914 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   8915 
   8916 release:
   8917 	sc->phy.release(sc);
   8918 }
   8919 
   8920 static void
   8921 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8922 {
   8923 	struct wm_softc *sc = device_private(self);
   8924 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8925 	uint16_t wuce, reg;
   8926 
   8927 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8928 		device_xname(sc->sc_dev), __func__));
   8929 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8930 	if (sc->sc_type == WM_T_PCH) {
   8931 		/* XXX e1000 driver do nothing... why? */
   8932 	}
   8933 
   8934 	/*
   8935 	 * 1) Enable PHY wakeup register first.
   8936 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   8937 	 */
   8938 
   8939 	/* Set page 769 */
   8940 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8941 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8942 
   8943 	/* Read WUCE and save it */
   8944 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   8945 
   8946 	reg = wuce | BM_WUC_ENABLE_BIT;
   8947 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   8948 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   8949 
   8950 	/* Select page 800 */
   8951 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8952 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   8953 
   8954 	/*
   8955 	 * 2) Access PHY wakeup register.
   8956 	 * See e1000_access_phy_wakeup_reg_bm.
   8957 	 */
   8958 
   8959 	/* Write page 800 */
   8960 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   8961 
   8962 	if (rd)
   8963 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   8964 	else
   8965 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   8966 
   8967 	/*
   8968 	 * 3) Disable PHY wakeup register.
   8969 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   8970 	 */
   8971 	/* Set page 769 */
   8972 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8973 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8974 
   8975 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   8976 }
   8977 
   8978 /*
   8979  * wm_gmii_hv_readreg:	[mii interface function]
   8980  *
   8981  *	Read a PHY register on the kumeran
   8982  * This could be handled by the PHY layer if we didn't have to lock the
   8983  * ressource ...
   8984  */
   8985 static int
   8986 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   8987 {
   8988 	struct wm_softc *sc = device_private(self);
   8989 	int rv;
   8990 
   8991 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8992 		device_xname(sc->sc_dev), __func__));
   8993 	if (sc->phy.acquire(sc)) {
   8994 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8995 		    __func__);
   8996 		return 0;
   8997 	}
   8998 
   8999 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9000 	sc->phy.release(sc);
   9001 	return rv;
   9002 }
   9003 
   9004 static int
   9005 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9006 {
   9007 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9008 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9009 	uint16_t val;
   9010 	int rv;
   9011 
   9012 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9013 
   9014 	/* Page 800 works differently than the rest so it has its own func */
   9015 	if (page == BM_WUC_PAGE) {
   9016 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9017 		return val;
   9018 	}
   9019 
   9020 	/*
   9021 	 * Lower than page 768 works differently than the rest so it has its
   9022 	 * own func
   9023 	 */
   9024 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9025 		printf("gmii_hv_readreg!!!\n");
   9026 		return 0;
   9027 	}
   9028 
   9029 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9030 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9031 		    page << BME1000_PAGE_SHIFT);
   9032 	}
   9033 
   9034 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9035 	return rv;
   9036 }
   9037 
   9038 /*
   9039  * wm_gmii_hv_writereg:	[mii interface function]
   9040  *
   9041  *	Write a PHY register on the kumeran.
   9042  * This could be handled by the PHY layer if we didn't have to lock the
   9043  * ressource ...
   9044  */
   9045 static void
   9046 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9047 {
   9048 	struct wm_softc *sc = device_private(self);
   9049 
   9050 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9051 		device_xname(sc->sc_dev), __func__));
   9052 
   9053 	if (sc->phy.acquire(sc)) {
   9054 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9055 		    __func__);
   9056 		return;
   9057 	}
   9058 
   9059 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   9060 	sc->phy.release(sc);
   9061 }
   9062 
   9063 static void
   9064 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   9065 {
   9066 	struct wm_softc *sc = device_private(self);
   9067 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9068 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9069 
   9070 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9071 
   9072 	/* Page 800 works differently than the rest so it has its own func */
   9073 	if (page == BM_WUC_PAGE) {
   9074 		uint16_t tmp;
   9075 
   9076 		tmp = val;
   9077 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9078 		return;
   9079 	}
   9080 
   9081 	/*
   9082 	 * Lower than page 768 works differently than the rest so it has its
   9083 	 * own func
   9084 	 */
   9085 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9086 		printf("gmii_hv_writereg!!!\n");
   9087 		return;
   9088 	}
   9089 
   9090 	{
   9091 		/*
   9092 		 * XXX Workaround MDIO accesses being disabled after entering
   9093 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9094 		 * register is set)
   9095 		 */
   9096 		if (sc->sc_phytype == WMPHY_82578) {
   9097 			struct mii_softc *child;
   9098 
   9099 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9100 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   9101 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   9102 			    && ((val & (1 << 11)) != 0)) {
   9103 				printf("XXX need workaround\n");
   9104 			}
   9105 		}
   9106 
   9107 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9108 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9109 			    page << BME1000_PAGE_SHIFT);
   9110 		}
   9111 	}
   9112 
   9113 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   9114 }
   9115 
   9116 /*
   9117  * wm_gmii_82580_readreg:	[mii interface function]
   9118  *
   9119  *	Read a PHY register on the 82580 and I350.
   9120  * This could be handled by the PHY layer if we didn't have to lock the
   9121  * ressource ...
   9122  */
   9123 static int
   9124 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   9125 {
   9126 	struct wm_softc *sc = device_private(self);
   9127 	int rv;
   9128 
   9129 	if (sc->phy.acquire(sc) != 0) {
   9130 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9131 		    __func__);
   9132 		return 0;
   9133 	}
   9134 
   9135 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9136 
   9137 	sc->phy.release(sc);
   9138 	return rv;
   9139 }
   9140 
   9141 /*
   9142  * wm_gmii_82580_writereg:	[mii interface function]
   9143  *
   9144  *	Write a PHY register on the 82580 and I350.
   9145  * This could be handled by the PHY layer if we didn't have to lock the
   9146  * ressource ...
   9147  */
   9148 static void
   9149 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   9150 {
   9151 	struct wm_softc *sc = device_private(self);
   9152 
   9153 	if (sc->phy.acquire(sc) != 0) {
   9154 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9155 		    __func__);
   9156 		return;
   9157 	}
   9158 
   9159 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9160 
   9161 	sc->phy.release(sc);
   9162 }
   9163 
   9164 /*
   9165  * wm_gmii_gs40g_readreg:	[mii interface function]
   9166  *
   9167  *	Read a PHY register on the I2100 and I211.
   9168  * This could be handled by the PHY layer if we didn't have to lock the
   9169  * ressource ...
   9170  */
   9171 static int
   9172 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   9173 {
   9174 	struct wm_softc *sc = device_private(self);
   9175 	int page, offset;
   9176 	int rv;
   9177 
   9178 	/* Acquire semaphore */
   9179 	if (sc->phy.acquire(sc)) {
   9180 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9181 		    __func__);
   9182 		return 0;
   9183 	}
   9184 
   9185 	/* Page select */
   9186 	page = reg >> GS40G_PAGE_SHIFT;
   9187 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9188 
   9189 	/* Read reg */
   9190 	offset = reg & GS40G_OFFSET_MASK;
   9191 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   9192 
   9193 	sc->phy.release(sc);
   9194 	return rv;
   9195 }
   9196 
   9197 /*
   9198  * wm_gmii_gs40g_writereg:	[mii interface function]
   9199  *
   9200  *	Write a PHY register on the I210 and I211.
   9201  * This could be handled by the PHY layer if we didn't have to lock the
   9202  * ressource ...
   9203  */
   9204 static void
   9205 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   9206 {
   9207 	struct wm_softc *sc = device_private(self);
   9208 	int page, offset;
   9209 
   9210 	/* Acquire semaphore */
   9211 	if (sc->phy.acquire(sc)) {
   9212 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9213 		    __func__);
   9214 		return;
   9215 	}
   9216 
   9217 	/* Page select */
   9218 	page = reg >> GS40G_PAGE_SHIFT;
   9219 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9220 
   9221 	/* Write reg */
   9222 	offset = reg & GS40G_OFFSET_MASK;
   9223 	wm_gmii_mdic_writereg(self, phy, offset, val);
   9224 
   9225 	/* Release semaphore */
   9226 	sc->phy.release(sc);
   9227 }
   9228 
   9229 /*
   9230  * wm_gmii_statchg:	[mii interface function]
   9231  *
   9232  *	Callback from MII layer when media changes.
   9233  */
   9234 static void
   9235 wm_gmii_statchg(struct ifnet *ifp)
   9236 {
   9237 	struct wm_softc *sc = ifp->if_softc;
   9238 	struct mii_data *mii = &sc->sc_mii;
   9239 
   9240 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   9241 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9242 	sc->sc_fcrtl &= ~FCRTL_XONE;
   9243 
   9244 	/*
   9245 	 * Get flow control negotiation result.
   9246 	 */
   9247 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   9248 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   9249 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   9250 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   9251 	}
   9252 
   9253 	if (sc->sc_flowflags & IFM_FLOW) {
   9254 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   9255 			sc->sc_ctrl |= CTRL_TFCE;
   9256 			sc->sc_fcrtl |= FCRTL_XONE;
   9257 		}
   9258 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   9259 			sc->sc_ctrl |= CTRL_RFCE;
   9260 	}
   9261 
   9262 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   9263 		DPRINTF(WM_DEBUG_LINK,
   9264 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   9265 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9266 	} else {
   9267 		DPRINTF(WM_DEBUG_LINK,
   9268 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   9269 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9270 	}
   9271 
   9272 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9273 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9274 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   9275 						 : WMREG_FCRTL, sc->sc_fcrtl);
   9276 	if (sc->sc_type == WM_T_80003) {
   9277 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   9278 		case IFM_1000_T:
   9279 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9280 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   9281 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9282 			break;
   9283 		default:
   9284 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9285 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   9286 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   9287 			break;
   9288 		}
   9289 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   9290 	}
   9291 }
   9292 
   9293 /*
   9294  * wm_kmrn_readreg:
   9295  *
   9296  *	Read a kumeran register
   9297  */
   9298 static int
   9299 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   9300 {
   9301 	int rv;
   9302 
   9303 	if (sc->sc_type == WM_T_80003)
   9304 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9305 	else
   9306 		rv = sc->phy.acquire(sc);
   9307 	if (rv != 0) {
   9308 		aprint_error_dev(sc->sc_dev,
   9309 		    "%s: failed to get semaphore\n", __func__);
   9310 		return 0;
   9311 	}
   9312 
   9313 	rv = wm_kmrn_readreg_locked(sc, reg);
   9314 
   9315 	if (sc->sc_type == WM_T_80003)
   9316 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9317 	else
   9318 		sc->phy.release(sc);
   9319 
   9320 	return rv;
   9321 }
   9322 
   9323 static int
   9324 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   9325 {
   9326 	int rv;
   9327 
   9328 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9329 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9330 	    KUMCTRLSTA_REN);
   9331 	CSR_WRITE_FLUSH(sc);
   9332 	delay(2);
   9333 
   9334 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9335 
   9336 	return rv;
   9337 }
   9338 
   9339 /*
   9340  * wm_kmrn_writereg:
   9341  *
   9342  *	Write a kumeran register
   9343  */
   9344 static void
   9345 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9346 {
   9347 	int rv;
   9348 
   9349 	if (sc->sc_type == WM_T_80003)
   9350 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9351 	else
   9352 		rv = sc->phy.acquire(sc);
   9353 	if (rv != 0) {
   9354 		aprint_error_dev(sc->sc_dev,
   9355 		    "%s: failed to get semaphore\n", __func__);
   9356 		return;
   9357 	}
   9358 
   9359 	wm_kmrn_writereg_locked(sc, reg, val);
   9360 
   9361 	if (sc->sc_type == WM_T_80003)
   9362 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9363 	else
   9364 		sc->phy.release(sc);
   9365 }
   9366 
   9367 static void
   9368 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   9369 {
   9370 
   9371 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9372 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9373 	    (val & KUMCTRLSTA_MASK));
   9374 }
   9375 
   9376 /* SGMII related */
   9377 
   9378 /*
   9379  * wm_sgmii_uses_mdio
   9380  *
   9381  * Check whether the transaction is to the internal PHY or the external
   9382  * MDIO interface. Return true if it's MDIO.
   9383  */
   9384 static bool
   9385 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9386 {
   9387 	uint32_t reg;
   9388 	bool ismdio = false;
   9389 
   9390 	switch (sc->sc_type) {
   9391 	case WM_T_82575:
   9392 	case WM_T_82576:
   9393 		reg = CSR_READ(sc, WMREG_MDIC);
   9394 		ismdio = ((reg & MDIC_DEST) != 0);
   9395 		break;
   9396 	case WM_T_82580:
   9397 	case WM_T_I350:
   9398 	case WM_T_I354:
   9399 	case WM_T_I210:
   9400 	case WM_T_I211:
   9401 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9402 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9403 		break;
   9404 	default:
   9405 		break;
   9406 	}
   9407 
   9408 	return ismdio;
   9409 }
   9410 
   9411 /*
   9412  * wm_sgmii_readreg:	[mii interface function]
   9413  *
   9414  *	Read a PHY register on the SGMII
   9415  * This could be handled by the PHY layer if we didn't have to lock the
   9416  * ressource ...
   9417  */
   9418 static int
   9419 wm_sgmii_readreg(device_t self, int phy, int reg)
   9420 {
   9421 	struct wm_softc *sc = device_private(self);
   9422 	uint32_t i2ccmd;
   9423 	int i, rv;
   9424 
   9425 	if (sc->phy.acquire(sc)) {
   9426 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9427 		    __func__);
   9428 		return 0;
   9429 	}
   9430 
   9431 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9432 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9433 	    | I2CCMD_OPCODE_READ;
   9434 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9435 
   9436 	/* Poll the ready bit */
   9437 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9438 		delay(50);
   9439 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9440 		if (i2ccmd & I2CCMD_READY)
   9441 			break;
   9442 	}
   9443 	if ((i2ccmd & I2CCMD_READY) == 0)
   9444 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9445 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9446 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9447 
   9448 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9449 
   9450 	sc->phy.release(sc);
   9451 	return rv;
   9452 }
   9453 
   9454 /*
   9455  * wm_sgmii_writereg:	[mii interface function]
   9456  *
   9457  *	Write a PHY register on the SGMII.
   9458  * This could be handled by the PHY layer if we didn't have to lock the
   9459  * ressource ...
   9460  */
   9461 static void
   9462 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9463 {
   9464 	struct wm_softc *sc = device_private(self);
   9465 	uint32_t i2ccmd;
   9466 	int i;
   9467 	int val_swapped;
   9468 
   9469 	if (sc->phy.acquire(sc) != 0) {
   9470 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9471 		    __func__);
   9472 		return;
   9473 	}
   9474 	/* Swap the data bytes for the I2C interface */
   9475 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9476 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9477 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9478 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9479 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9480 
   9481 	/* Poll the ready bit */
   9482 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9483 		delay(50);
   9484 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9485 		if (i2ccmd & I2CCMD_READY)
   9486 			break;
   9487 	}
   9488 	if ((i2ccmd & I2CCMD_READY) == 0)
   9489 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9490 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9491 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9492 
   9493 	sc->phy.release(sc);
   9494 }
   9495 
   9496 /* TBI related */
   9497 
   9498 /*
   9499  * wm_tbi_mediainit:
   9500  *
   9501  *	Initialize media for use on 1000BASE-X devices.
   9502  */
   9503 static void
   9504 wm_tbi_mediainit(struct wm_softc *sc)
   9505 {
   9506 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9507 	const char *sep = "";
   9508 
   9509 	if (sc->sc_type < WM_T_82543)
   9510 		sc->sc_tipg = TIPG_WM_DFLT;
   9511 	else
   9512 		sc->sc_tipg = TIPG_LG_DFLT;
   9513 
   9514 	sc->sc_tbi_serdes_anegticks = 5;
   9515 
   9516 	/* Initialize our media structures */
   9517 	sc->sc_mii.mii_ifp = ifp;
   9518 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9519 
   9520 	if ((sc->sc_type >= WM_T_82575)
   9521 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9522 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9523 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9524 	else
   9525 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9526 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9527 
   9528 	/*
   9529 	 * SWD Pins:
   9530 	 *
   9531 	 *	0 = Link LED (output)
   9532 	 *	1 = Loss Of Signal (input)
   9533 	 */
   9534 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9535 
   9536 	/* XXX Perhaps this is only for TBI */
   9537 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9538 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9539 
   9540 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9541 		sc->sc_ctrl &= ~CTRL_LRST;
   9542 
   9543 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9544 
   9545 #define	ADD(ss, mm, dd)							\
   9546 do {									\
   9547 	aprint_normal("%s%s", sep, ss);					\
   9548 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9549 	sep = ", ";							\
   9550 } while (/*CONSTCOND*/0)
   9551 
   9552 	aprint_normal_dev(sc->sc_dev, "");
   9553 
   9554 	/* Only 82545 is LX */
   9555 	if (sc->sc_type == WM_T_82545) {
   9556 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9557 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9558 	} else {
   9559 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9560 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9561 	}
   9562 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9563 	aprint_normal("\n");
   9564 
   9565 #undef ADD
   9566 
   9567 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9568 }
   9569 
   9570 /*
   9571  * wm_tbi_mediachange:	[ifmedia interface function]
   9572  *
   9573  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9574  */
   9575 static int
   9576 wm_tbi_mediachange(struct ifnet *ifp)
   9577 {
   9578 	struct wm_softc *sc = ifp->if_softc;
   9579 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9580 	uint32_t status;
   9581 	int i;
   9582 
   9583 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9584 		/* XXX need some work for >= 82571 and < 82575 */
   9585 		if (sc->sc_type < WM_T_82575)
   9586 			return 0;
   9587 	}
   9588 
   9589 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9590 	    || (sc->sc_type >= WM_T_82575))
   9591 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9592 
   9593 	sc->sc_ctrl &= ~CTRL_LRST;
   9594 	sc->sc_txcw = TXCW_ANE;
   9595 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9596 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9597 	else if (ife->ifm_media & IFM_FDX)
   9598 		sc->sc_txcw |= TXCW_FD;
   9599 	else
   9600 		sc->sc_txcw |= TXCW_HD;
   9601 
   9602 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9603 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9604 
   9605 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9606 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9607 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9608 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9609 	CSR_WRITE_FLUSH(sc);
   9610 	delay(1000);
   9611 
   9612 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9613 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9614 
   9615 	/*
   9616 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9617 	 * optics detect a signal, 0 if they don't.
   9618 	 */
   9619 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9620 		/* Have signal; wait for the link to come up. */
   9621 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9622 			delay(10000);
   9623 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9624 				break;
   9625 		}
   9626 
   9627 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9628 			    device_xname(sc->sc_dev),i));
   9629 
   9630 		status = CSR_READ(sc, WMREG_STATUS);
   9631 		DPRINTF(WM_DEBUG_LINK,
   9632 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9633 			device_xname(sc->sc_dev),status, STATUS_LU));
   9634 		if (status & STATUS_LU) {
   9635 			/* Link is up. */
   9636 			DPRINTF(WM_DEBUG_LINK,
   9637 			    ("%s: LINK: set media -> link up %s\n",
   9638 			    device_xname(sc->sc_dev),
   9639 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9640 
   9641 			/*
   9642 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9643 			 * so we should update sc->sc_ctrl
   9644 			 */
   9645 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9646 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9647 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9648 			if (status & STATUS_FD)
   9649 				sc->sc_tctl |=
   9650 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9651 			else
   9652 				sc->sc_tctl |=
   9653 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9654 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9655 				sc->sc_fcrtl |= FCRTL_XONE;
   9656 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9657 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9658 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9659 				      sc->sc_fcrtl);
   9660 			sc->sc_tbi_linkup = 1;
   9661 		} else {
   9662 			if (i == WM_LINKUP_TIMEOUT)
   9663 				wm_check_for_link(sc);
   9664 			/* Link is down. */
   9665 			DPRINTF(WM_DEBUG_LINK,
   9666 			    ("%s: LINK: set media -> link down\n",
   9667 			    device_xname(sc->sc_dev)));
   9668 			sc->sc_tbi_linkup = 0;
   9669 		}
   9670 	} else {
   9671 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9672 		    device_xname(sc->sc_dev)));
   9673 		sc->sc_tbi_linkup = 0;
   9674 	}
   9675 
   9676 	wm_tbi_serdes_set_linkled(sc);
   9677 
   9678 	return 0;
   9679 }
   9680 
   9681 /*
   9682  * wm_tbi_mediastatus:	[ifmedia interface function]
   9683  *
   9684  *	Get the current interface media status on a 1000BASE-X device.
   9685  */
   9686 static void
   9687 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9688 {
   9689 	struct wm_softc *sc = ifp->if_softc;
   9690 	uint32_t ctrl, status;
   9691 
   9692 	ifmr->ifm_status = IFM_AVALID;
   9693 	ifmr->ifm_active = IFM_ETHER;
   9694 
   9695 	status = CSR_READ(sc, WMREG_STATUS);
   9696 	if ((status & STATUS_LU) == 0) {
   9697 		ifmr->ifm_active |= IFM_NONE;
   9698 		return;
   9699 	}
   9700 
   9701 	ifmr->ifm_status |= IFM_ACTIVE;
   9702 	/* Only 82545 is LX */
   9703 	if (sc->sc_type == WM_T_82545)
   9704 		ifmr->ifm_active |= IFM_1000_LX;
   9705 	else
   9706 		ifmr->ifm_active |= IFM_1000_SX;
   9707 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9708 		ifmr->ifm_active |= IFM_FDX;
   9709 	else
   9710 		ifmr->ifm_active |= IFM_HDX;
   9711 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9712 	if (ctrl & CTRL_RFCE)
   9713 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9714 	if (ctrl & CTRL_TFCE)
   9715 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9716 }
   9717 
   9718 /* XXX TBI only */
   9719 static int
   9720 wm_check_for_link(struct wm_softc *sc)
   9721 {
   9722 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9723 	uint32_t rxcw;
   9724 	uint32_t ctrl;
   9725 	uint32_t status;
   9726 	uint32_t sig;
   9727 
   9728 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9729 		/* XXX need some work for >= 82571 */
   9730 		if (sc->sc_type >= WM_T_82571) {
   9731 			sc->sc_tbi_linkup = 1;
   9732 			return 0;
   9733 		}
   9734 	}
   9735 
   9736 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9737 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9738 	status = CSR_READ(sc, WMREG_STATUS);
   9739 
   9740 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9741 
   9742 	DPRINTF(WM_DEBUG_LINK,
   9743 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9744 		device_xname(sc->sc_dev), __func__,
   9745 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9746 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   9747 
   9748 	/*
   9749 	 * SWDPIN   LU RXCW
   9750 	 *      0    0    0
   9751 	 *      0    0    1	(should not happen)
   9752 	 *      0    1    0	(should not happen)
   9753 	 *      0    1    1	(should not happen)
   9754 	 *      1    0    0	Disable autonego and force linkup
   9755 	 *      1    0    1	got /C/ but not linkup yet
   9756 	 *      1    1    0	(linkup)
   9757 	 *      1    1    1	If IFM_AUTO, back to autonego
   9758 	 *
   9759 	 */
   9760 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9761 	    && ((status & STATUS_LU) == 0)
   9762 	    && ((rxcw & RXCW_C) == 0)) {
   9763 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9764 			__func__));
   9765 		sc->sc_tbi_linkup = 0;
   9766 		/* Disable auto-negotiation in the TXCW register */
   9767 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9768 
   9769 		/*
   9770 		 * Force link-up and also force full-duplex.
   9771 		 *
   9772 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9773 		 * so we should update sc->sc_ctrl
   9774 		 */
   9775 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9776 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9777 	} else if (((status & STATUS_LU) != 0)
   9778 	    && ((rxcw & RXCW_C) != 0)
   9779 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9780 		sc->sc_tbi_linkup = 1;
   9781 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9782 			__func__));
   9783 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9784 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9785 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9786 	    && ((rxcw & RXCW_C) != 0)) {
   9787 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9788 	} else {
   9789 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9790 			status));
   9791 	}
   9792 
   9793 	return 0;
   9794 }
   9795 
   9796 /*
   9797  * wm_tbi_tick:
   9798  *
   9799  *	Check the link on TBI devices.
   9800  *	This function acts as mii_tick().
   9801  */
   9802 static void
   9803 wm_tbi_tick(struct wm_softc *sc)
   9804 {
   9805 	struct mii_data *mii = &sc->sc_mii;
   9806 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9807 	uint32_t status;
   9808 
   9809 	KASSERT(WM_CORE_LOCKED(sc));
   9810 
   9811 	status = CSR_READ(sc, WMREG_STATUS);
   9812 
   9813 	/* XXX is this needed? */
   9814 	(void)CSR_READ(sc, WMREG_RXCW);
   9815 	(void)CSR_READ(sc, WMREG_CTRL);
   9816 
   9817 	/* set link status */
   9818 	if ((status & STATUS_LU) == 0) {
   9819 		DPRINTF(WM_DEBUG_LINK,
   9820 		    ("%s: LINK: checklink -> down\n",
   9821 			device_xname(sc->sc_dev)));
   9822 		sc->sc_tbi_linkup = 0;
   9823 	} else if (sc->sc_tbi_linkup == 0) {
   9824 		DPRINTF(WM_DEBUG_LINK,
   9825 		    ("%s: LINK: checklink -> up %s\n",
   9826 			device_xname(sc->sc_dev),
   9827 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9828 		sc->sc_tbi_linkup = 1;
   9829 		sc->sc_tbi_serdes_ticks = 0;
   9830 	}
   9831 
   9832 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9833 		goto setled;
   9834 
   9835 	if ((status & STATUS_LU) == 0) {
   9836 		sc->sc_tbi_linkup = 0;
   9837 		/* If the timer expired, retry autonegotiation */
   9838 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9839 		    && (++sc->sc_tbi_serdes_ticks
   9840 			>= sc->sc_tbi_serdes_anegticks)) {
   9841 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9842 			sc->sc_tbi_serdes_ticks = 0;
   9843 			/*
   9844 			 * Reset the link, and let autonegotiation do
   9845 			 * its thing
   9846 			 */
   9847 			sc->sc_ctrl |= CTRL_LRST;
   9848 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9849 			CSR_WRITE_FLUSH(sc);
   9850 			delay(1000);
   9851 			sc->sc_ctrl &= ~CTRL_LRST;
   9852 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9853 			CSR_WRITE_FLUSH(sc);
   9854 			delay(1000);
   9855 			CSR_WRITE(sc, WMREG_TXCW,
   9856 			    sc->sc_txcw & ~TXCW_ANE);
   9857 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9858 		}
   9859 	}
   9860 
   9861 setled:
   9862 	wm_tbi_serdes_set_linkled(sc);
   9863 }
   9864 
   9865 /* SERDES related */
   9866 static void
   9867 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9868 {
   9869 	uint32_t reg;
   9870 
   9871 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9872 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9873 		return;
   9874 
   9875 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9876 	reg |= PCS_CFG_PCS_EN;
   9877 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9878 
   9879 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9880 	reg &= ~CTRL_EXT_SWDPIN(3);
   9881 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9882 	CSR_WRITE_FLUSH(sc);
   9883 }
   9884 
   9885 static int
   9886 wm_serdes_mediachange(struct ifnet *ifp)
   9887 {
   9888 	struct wm_softc *sc = ifp->if_softc;
   9889 	bool pcs_autoneg = true; /* XXX */
   9890 	uint32_t ctrl_ext, pcs_lctl, reg;
   9891 
   9892 	/* XXX Currently, this function is not called on 8257[12] */
   9893 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9894 	    || (sc->sc_type >= WM_T_82575))
   9895 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9896 
   9897 	wm_serdes_power_up_link_82575(sc);
   9898 
   9899 	sc->sc_ctrl |= CTRL_SLU;
   9900 
   9901 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9902 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9903 
   9904 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9905 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9906 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9907 	case CTRL_EXT_LINK_MODE_SGMII:
   9908 		pcs_autoneg = true;
   9909 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9910 		break;
   9911 	case CTRL_EXT_LINK_MODE_1000KX:
   9912 		pcs_autoneg = false;
   9913 		/* FALLTHROUGH */
   9914 	default:
   9915 		if ((sc->sc_type == WM_T_82575)
   9916 		    || (sc->sc_type == WM_T_82576)) {
   9917 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9918 				pcs_autoneg = false;
   9919 		}
   9920 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9921 		    | CTRL_FRCFDX;
   9922 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9923 	}
   9924 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9925 
   9926 	if (pcs_autoneg) {
   9927 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9928 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9929 
   9930 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9931 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9932 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9933 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9934 	} else
   9935 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9936 
   9937 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   9938 
   9939 
   9940 	return 0;
   9941 }
   9942 
   9943 static void
   9944 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9945 {
   9946 	struct wm_softc *sc = ifp->if_softc;
   9947 	struct mii_data *mii = &sc->sc_mii;
   9948 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9949 	uint32_t pcs_adv, pcs_lpab, reg;
   9950 
   9951 	ifmr->ifm_status = IFM_AVALID;
   9952 	ifmr->ifm_active = IFM_ETHER;
   9953 
   9954 	/* Check PCS */
   9955 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9956 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   9957 		ifmr->ifm_active |= IFM_NONE;
   9958 		sc->sc_tbi_linkup = 0;
   9959 		goto setled;
   9960 	}
   9961 
   9962 	sc->sc_tbi_linkup = 1;
   9963 	ifmr->ifm_status |= IFM_ACTIVE;
   9964 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   9965 	if ((reg & PCS_LSTS_FDX) != 0)
   9966 		ifmr->ifm_active |= IFM_FDX;
   9967 	else
   9968 		ifmr->ifm_active |= IFM_HDX;
   9969 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   9970 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9971 		/* Check flow */
   9972 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9973 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9974 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   9975 			goto setled;
   9976 		}
   9977 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9978 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9979 		DPRINTF(WM_DEBUG_LINK,
   9980 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   9981 		if ((pcs_adv & TXCW_SYM_PAUSE)
   9982 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9983 			mii->mii_media_active |= IFM_FLOW
   9984 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9985 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9986 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9987 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   9988 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9989 			mii->mii_media_active |= IFM_FLOW
   9990 			    | IFM_ETH_TXPAUSE;
   9991 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   9992 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9993 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9994 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9995 			mii->mii_media_active |= IFM_FLOW
   9996 			    | IFM_ETH_RXPAUSE;
   9997 		} else {
   9998 		}
   9999 	}
   10000 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10001 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10002 setled:
   10003 	wm_tbi_serdes_set_linkled(sc);
   10004 }
   10005 
   10006 /*
   10007  * wm_serdes_tick:
   10008  *
   10009  *	Check the link on serdes devices.
   10010  */
   10011 static void
   10012 wm_serdes_tick(struct wm_softc *sc)
   10013 {
   10014 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10015 	struct mii_data *mii = &sc->sc_mii;
   10016 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10017 	uint32_t reg;
   10018 
   10019 	KASSERT(WM_CORE_LOCKED(sc));
   10020 
   10021 	mii->mii_media_status = IFM_AVALID;
   10022 	mii->mii_media_active = IFM_ETHER;
   10023 
   10024 	/* Check PCS */
   10025 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10026 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   10027 		mii->mii_media_status |= IFM_ACTIVE;
   10028 		sc->sc_tbi_linkup = 1;
   10029 		sc->sc_tbi_serdes_ticks = 0;
   10030 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   10031 		if ((reg & PCS_LSTS_FDX) != 0)
   10032 			mii->mii_media_active |= IFM_FDX;
   10033 		else
   10034 			mii->mii_media_active |= IFM_HDX;
   10035 	} else {
   10036 		mii->mii_media_status |= IFM_NONE;
   10037 		sc->sc_tbi_linkup = 0;
   10038 		    /* If the timer expired, retry autonegotiation */
   10039 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10040 		    && (++sc->sc_tbi_serdes_ticks
   10041 			>= sc->sc_tbi_serdes_anegticks)) {
   10042 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10043 			sc->sc_tbi_serdes_ticks = 0;
   10044 			/* XXX */
   10045 			wm_serdes_mediachange(ifp);
   10046 		}
   10047 	}
   10048 
   10049 	wm_tbi_serdes_set_linkled(sc);
   10050 }
   10051 
   10052 /* SFP related */
   10053 
   10054 static int
   10055 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   10056 {
   10057 	uint32_t i2ccmd;
   10058 	int i;
   10059 
   10060 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10061 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10062 
   10063 	/* Poll the ready bit */
   10064 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10065 		delay(50);
   10066 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10067 		if (i2ccmd & I2CCMD_READY)
   10068 			break;
   10069 	}
   10070 	if ((i2ccmd & I2CCMD_READY) == 0)
   10071 		return -1;
   10072 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10073 		return -1;
   10074 
   10075 	*data = i2ccmd & 0x00ff;
   10076 
   10077 	return 0;
   10078 }
   10079 
   10080 static uint32_t
   10081 wm_sfp_get_media_type(struct wm_softc *sc)
   10082 {
   10083 	uint32_t ctrl_ext;
   10084 	uint8_t val = 0;
   10085 	int timeout = 3;
   10086 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   10087 	int rv = -1;
   10088 
   10089 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10090 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   10091 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   10092 	CSR_WRITE_FLUSH(sc);
   10093 
   10094 	/* Read SFP module data */
   10095 	while (timeout) {
   10096 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   10097 		if (rv == 0)
   10098 			break;
   10099 		delay(100*1000); /* XXX too big */
   10100 		timeout--;
   10101 	}
   10102 	if (rv != 0)
   10103 		goto out;
   10104 	switch (val) {
   10105 	case SFF_SFP_ID_SFF:
   10106 		aprint_normal_dev(sc->sc_dev,
   10107 		    "Module/Connector soldered to board\n");
   10108 		break;
   10109 	case SFF_SFP_ID_SFP:
   10110 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   10111 		break;
   10112 	case SFF_SFP_ID_UNKNOWN:
   10113 		goto out;
   10114 	default:
   10115 		break;
   10116 	}
   10117 
   10118 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   10119 	if (rv != 0) {
   10120 		goto out;
   10121 	}
   10122 
   10123 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   10124 		mediatype = WM_MEDIATYPE_SERDES;
   10125 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   10126 		sc->sc_flags |= WM_F_SGMII;
   10127 		mediatype = WM_MEDIATYPE_COPPER;
   10128 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   10129 		sc->sc_flags |= WM_F_SGMII;
   10130 		mediatype = WM_MEDIATYPE_SERDES;
   10131 	}
   10132 
   10133 out:
   10134 	/* Restore I2C interface setting */
   10135 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10136 
   10137 	return mediatype;
   10138 }
   10139 /*
   10140  * NVM related.
   10141  * Microwire, SPI (w/wo EERD) and Flash.
   10142  */
   10143 
   10144 /* Both spi and uwire */
   10145 
   10146 /*
   10147  * wm_eeprom_sendbits:
   10148  *
   10149  *	Send a series of bits to the EEPROM.
   10150  */
   10151 static void
   10152 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   10153 {
   10154 	uint32_t reg;
   10155 	int x;
   10156 
   10157 	reg = CSR_READ(sc, WMREG_EECD);
   10158 
   10159 	for (x = nbits; x > 0; x--) {
   10160 		if (bits & (1U << (x - 1)))
   10161 			reg |= EECD_DI;
   10162 		else
   10163 			reg &= ~EECD_DI;
   10164 		CSR_WRITE(sc, WMREG_EECD, reg);
   10165 		CSR_WRITE_FLUSH(sc);
   10166 		delay(2);
   10167 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10168 		CSR_WRITE_FLUSH(sc);
   10169 		delay(2);
   10170 		CSR_WRITE(sc, WMREG_EECD, reg);
   10171 		CSR_WRITE_FLUSH(sc);
   10172 		delay(2);
   10173 	}
   10174 }
   10175 
   10176 /*
   10177  * wm_eeprom_recvbits:
   10178  *
   10179  *	Receive a series of bits from the EEPROM.
   10180  */
   10181 static void
   10182 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   10183 {
   10184 	uint32_t reg, val;
   10185 	int x;
   10186 
   10187 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   10188 
   10189 	val = 0;
   10190 	for (x = nbits; x > 0; x--) {
   10191 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10192 		CSR_WRITE_FLUSH(sc);
   10193 		delay(2);
   10194 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   10195 			val |= (1U << (x - 1));
   10196 		CSR_WRITE(sc, WMREG_EECD, reg);
   10197 		CSR_WRITE_FLUSH(sc);
   10198 		delay(2);
   10199 	}
   10200 	*valp = val;
   10201 }
   10202 
   10203 /* Microwire */
   10204 
   10205 /*
   10206  * wm_nvm_read_uwire:
   10207  *
   10208  *	Read a word from the EEPROM using the MicroWire protocol.
   10209  */
   10210 static int
   10211 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10212 {
   10213 	uint32_t reg, val;
   10214 	int i;
   10215 
   10216 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10217 		device_xname(sc->sc_dev), __func__));
   10218 
   10219 	for (i = 0; i < wordcnt; i++) {
   10220 		/* Clear SK and DI. */
   10221 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   10222 		CSR_WRITE(sc, WMREG_EECD, reg);
   10223 
   10224 		/*
   10225 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   10226 		 * and Xen.
   10227 		 *
   10228 		 * We use this workaround only for 82540 because qemu's
   10229 		 * e1000 act as 82540.
   10230 		 */
   10231 		if (sc->sc_type == WM_T_82540) {
   10232 			reg |= EECD_SK;
   10233 			CSR_WRITE(sc, WMREG_EECD, reg);
   10234 			reg &= ~EECD_SK;
   10235 			CSR_WRITE(sc, WMREG_EECD, reg);
   10236 			CSR_WRITE_FLUSH(sc);
   10237 			delay(2);
   10238 		}
   10239 		/* XXX: end of workaround */
   10240 
   10241 		/* Set CHIP SELECT. */
   10242 		reg |= EECD_CS;
   10243 		CSR_WRITE(sc, WMREG_EECD, reg);
   10244 		CSR_WRITE_FLUSH(sc);
   10245 		delay(2);
   10246 
   10247 		/* Shift in the READ command. */
   10248 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   10249 
   10250 		/* Shift in address. */
   10251 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   10252 
   10253 		/* Shift out the data. */
   10254 		wm_eeprom_recvbits(sc, &val, 16);
   10255 		data[i] = val & 0xffff;
   10256 
   10257 		/* Clear CHIP SELECT. */
   10258 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   10259 		CSR_WRITE(sc, WMREG_EECD, reg);
   10260 		CSR_WRITE_FLUSH(sc);
   10261 		delay(2);
   10262 	}
   10263 
   10264 	return 0;
   10265 }
   10266 
   10267 /* SPI */
   10268 
   10269 /*
   10270  * Set SPI and FLASH related information from the EECD register.
   10271  * For 82541 and 82547, the word size is taken from EEPROM.
   10272  */
   10273 static int
   10274 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   10275 {
   10276 	int size;
   10277 	uint32_t reg;
   10278 	uint16_t data;
   10279 
   10280 	reg = CSR_READ(sc, WMREG_EECD);
   10281 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   10282 
   10283 	/* Read the size of NVM from EECD by default */
   10284 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10285 	switch (sc->sc_type) {
   10286 	case WM_T_82541:
   10287 	case WM_T_82541_2:
   10288 	case WM_T_82547:
   10289 	case WM_T_82547_2:
   10290 		/* Set dummy value to access EEPROM */
   10291 		sc->sc_nvm_wordsize = 64;
   10292 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   10293 		reg = data;
   10294 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10295 		if (size == 0)
   10296 			size = 6; /* 64 word size */
   10297 		else
   10298 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   10299 		break;
   10300 	case WM_T_80003:
   10301 	case WM_T_82571:
   10302 	case WM_T_82572:
   10303 	case WM_T_82573: /* SPI case */
   10304 	case WM_T_82574: /* SPI case */
   10305 	case WM_T_82583: /* SPI case */
   10306 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10307 		if (size > 14)
   10308 			size = 14;
   10309 		break;
   10310 	case WM_T_82575:
   10311 	case WM_T_82576:
   10312 	case WM_T_82580:
   10313 	case WM_T_I350:
   10314 	case WM_T_I354:
   10315 	case WM_T_I210:
   10316 	case WM_T_I211:
   10317 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10318 		if (size > 15)
   10319 			size = 15;
   10320 		break;
   10321 	default:
   10322 		aprint_error_dev(sc->sc_dev,
   10323 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   10324 		return -1;
   10325 		break;
   10326 	}
   10327 
   10328 	sc->sc_nvm_wordsize = 1 << size;
   10329 
   10330 	return 0;
   10331 }
   10332 
   10333 /*
   10334  * wm_nvm_ready_spi:
   10335  *
   10336  *	Wait for a SPI EEPROM to be ready for commands.
   10337  */
   10338 static int
   10339 wm_nvm_ready_spi(struct wm_softc *sc)
   10340 {
   10341 	uint32_t val;
   10342 	int usec;
   10343 
   10344 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10345 		device_xname(sc->sc_dev), __func__));
   10346 
   10347 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10348 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10349 		wm_eeprom_recvbits(sc, &val, 8);
   10350 		if ((val & SPI_SR_RDY) == 0)
   10351 			break;
   10352 	}
   10353 	if (usec >= SPI_MAX_RETRIES) {
   10354 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10355 		return 1;
   10356 	}
   10357 	return 0;
   10358 }
   10359 
   10360 /*
   10361  * wm_nvm_read_spi:
   10362  *
   10363  *	Read a work from the EEPROM using the SPI protocol.
   10364  */
   10365 static int
   10366 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10367 {
   10368 	uint32_t reg, val;
   10369 	int i;
   10370 	uint8_t opc;
   10371 
   10372 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10373 		device_xname(sc->sc_dev), __func__));
   10374 
   10375 	/* Clear SK and CS. */
   10376 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10377 	CSR_WRITE(sc, WMREG_EECD, reg);
   10378 	CSR_WRITE_FLUSH(sc);
   10379 	delay(2);
   10380 
   10381 	if (wm_nvm_ready_spi(sc))
   10382 		return 1;
   10383 
   10384 	/* Toggle CS to flush commands. */
   10385 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10386 	CSR_WRITE_FLUSH(sc);
   10387 	delay(2);
   10388 	CSR_WRITE(sc, WMREG_EECD, reg);
   10389 	CSR_WRITE_FLUSH(sc);
   10390 	delay(2);
   10391 
   10392 	opc = SPI_OPC_READ;
   10393 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10394 		opc |= SPI_OPC_A8;
   10395 
   10396 	wm_eeprom_sendbits(sc, opc, 8);
   10397 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10398 
   10399 	for (i = 0; i < wordcnt; i++) {
   10400 		wm_eeprom_recvbits(sc, &val, 16);
   10401 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10402 	}
   10403 
   10404 	/* Raise CS and clear SK. */
   10405 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10406 	CSR_WRITE(sc, WMREG_EECD, reg);
   10407 	CSR_WRITE_FLUSH(sc);
   10408 	delay(2);
   10409 
   10410 	return 0;
   10411 }
   10412 
   10413 /* Using with EERD */
   10414 
   10415 static int
   10416 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10417 {
   10418 	uint32_t attempts = 100000;
   10419 	uint32_t i, reg = 0;
   10420 	int32_t done = -1;
   10421 
   10422 	for (i = 0; i < attempts; i++) {
   10423 		reg = CSR_READ(sc, rw);
   10424 
   10425 		if (reg & EERD_DONE) {
   10426 			done = 0;
   10427 			break;
   10428 		}
   10429 		delay(5);
   10430 	}
   10431 
   10432 	return done;
   10433 }
   10434 
   10435 static int
   10436 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   10437     uint16_t *data)
   10438 {
   10439 	int i, eerd = 0;
   10440 	int error = 0;
   10441 
   10442 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10443 		device_xname(sc->sc_dev), __func__));
   10444 
   10445 	for (i = 0; i < wordcnt; i++) {
   10446 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   10447 
   10448 		CSR_WRITE(sc, WMREG_EERD, eerd);
   10449 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   10450 		if (error != 0)
   10451 			break;
   10452 
   10453 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   10454 	}
   10455 
   10456 	return error;
   10457 }
   10458 
   10459 /* Flash */
   10460 
   10461 static int
   10462 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10463 {
   10464 	uint32_t eecd;
   10465 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10466 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10467 	uint8_t sig_byte = 0;
   10468 
   10469 	switch (sc->sc_type) {
   10470 	case WM_T_PCH_SPT:
   10471 		/*
   10472 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   10473 		 * sector valid bits from the NVM.
   10474 		 */
   10475 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   10476 		if ((*bank == 0) || (*bank == 1)) {
   10477 			aprint_error_dev(sc->sc_dev,
   10478 			    "%s: no valid NVM bank present (%u)\n", __func__,
   10479 				*bank);
   10480 			return -1;
   10481 		} else {
   10482 			*bank = *bank - 2;
   10483 			return 0;
   10484 		}
   10485 	case WM_T_ICH8:
   10486 	case WM_T_ICH9:
   10487 		eecd = CSR_READ(sc, WMREG_EECD);
   10488 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10489 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10490 			return 0;
   10491 		}
   10492 		/* FALLTHROUGH */
   10493 	default:
   10494 		/* Default to 0 */
   10495 		*bank = 0;
   10496 
   10497 		/* Check bank 0 */
   10498 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10499 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10500 			*bank = 0;
   10501 			return 0;
   10502 		}
   10503 
   10504 		/* Check bank 1 */
   10505 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10506 		    &sig_byte);
   10507 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10508 			*bank = 1;
   10509 			return 0;
   10510 		}
   10511 	}
   10512 
   10513 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10514 		device_xname(sc->sc_dev)));
   10515 	return -1;
   10516 }
   10517 
   10518 /******************************************************************************
   10519  * This function does initial flash setup so that a new read/write/erase cycle
   10520  * can be started.
   10521  *
   10522  * sc - The pointer to the hw structure
   10523  ****************************************************************************/
   10524 static int32_t
   10525 wm_ich8_cycle_init(struct wm_softc *sc)
   10526 {
   10527 	uint16_t hsfsts;
   10528 	int32_t error = 1;
   10529 	int32_t i     = 0;
   10530 
   10531 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10532 
   10533 	/* May be check the Flash Des Valid bit in Hw status */
   10534 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10535 		return error;
   10536 	}
   10537 
   10538 	/* Clear FCERR in Hw status by writing 1 */
   10539 	/* Clear DAEL in Hw status by writing a 1 */
   10540 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10541 
   10542 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10543 
   10544 	/*
   10545 	 * Either we should have a hardware SPI cycle in progress bit to check
   10546 	 * against, in order to start a new cycle or FDONE bit should be
   10547 	 * changed in the hardware so that it is 1 after harware reset, which
   10548 	 * can then be used as an indication whether a cycle is in progress or
   10549 	 * has been completed .. we should also have some software semaphore
   10550 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10551 	 * threads access to those bits can be sequentiallized or a way so that
   10552 	 * 2 threads dont start the cycle at the same time
   10553 	 */
   10554 
   10555 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10556 		/*
   10557 		 * There is no cycle running at present, so we can start a
   10558 		 * cycle
   10559 		 */
   10560 
   10561 		/* Begin by setting Flash Cycle Done. */
   10562 		hsfsts |= HSFSTS_DONE;
   10563 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10564 		error = 0;
   10565 	} else {
   10566 		/*
   10567 		 * otherwise poll for sometime so the current cycle has a
   10568 		 * chance to end before giving up.
   10569 		 */
   10570 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10571 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10572 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10573 				error = 0;
   10574 				break;
   10575 			}
   10576 			delay(1);
   10577 		}
   10578 		if (error == 0) {
   10579 			/*
   10580 			 * Successful in waiting for previous cycle to timeout,
   10581 			 * now set the Flash Cycle Done.
   10582 			 */
   10583 			hsfsts |= HSFSTS_DONE;
   10584 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10585 		}
   10586 	}
   10587 	return error;
   10588 }
   10589 
   10590 /******************************************************************************
   10591  * This function starts a flash cycle and waits for its completion
   10592  *
   10593  * sc - The pointer to the hw structure
   10594  ****************************************************************************/
   10595 static int32_t
   10596 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10597 {
   10598 	uint16_t hsflctl;
   10599 	uint16_t hsfsts;
   10600 	int32_t error = 1;
   10601 	uint32_t i = 0;
   10602 
   10603 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10604 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10605 	hsflctl |= HSFCTL_GO;
   10606 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10607 
   10608 	/* Wait till FDONE bit is set to 1 */
   10609 	do {
   10610 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10611 		if (hsfsts & HSFSTS_DONE)
   10612 			break;
   10613 		delay(1);
   10614 		i++;
   10615 	} while (i < timeout);
   10616 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10617 		error = 0;
   10618 
   10619 	return error;
   10620 }
   10621 
   10622 /******************************************************************************
   10623  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   10624  *
   10625  * sc - The pointer to the hw structure
   10626  * index - The index of the byte or word to read.
   10627  * size - Size of data to read, 1=byte 2=word, 4=dword
   10628  * data - Pointer to the word to store the value read.
   10629  *****************************************************************************/
   10630 static int32_t
   10631 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10632     uint32_t size, uint32_t *data)
   10633 {
   10634 	uint16_t hsfsts;
   10635 	uint16_t hsflctl;
   10636 	uint32_t flash_linear_address;
   10637 	uint32_t flash_data = 0;
   10638 	int32_t error = 1;
   10639 	int32_t count = 0;
   10640 
   10641 	if (size < 1  || size > 4 || data == 0x0 ||
   10642 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10643 		return error;
   10644 
   10645 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10646 	    sc->sc_ich8_flash_base;
   10647 
   10648 	do {
   10649 		delay(1);
   10650 		/* Steps */
   10651 		error = wm_ich8_cycle_init(sc);
   10652 		if (error)
   10653 			break;
   10654 
   10655 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10656 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10657 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10658 		    & HSFCTL_BCOUNT_MASK;
   10659 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10660 		if (sc->sc_type == WM_T_PCH_SPT) {
   10661 			/*
   10662 			 * In SPT, This register is in Lan memory space, not
   10663 			 * flash. Therefore, only 32 bit access is supported.
   10664 			 */
   10665 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   10666 			    (uint32_t)hsflctl);
   10667 		} else
   10668 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10669 
   10670 		/*
   10671 		 * Write the last 24 bits of index into Flash Linear address
   10672 		 * field in Flash Address
   10673 		 */
   10674 		/* TODO: TBD maybe check the index against the size of flash */
   10675 
   10676 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10677 
   10678 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10679 
   10680 		/*
   10681 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10682 		 * the whole sequence a few more times, else read in (shift in)
   10683 		 * the Flash Data0, the order is least significant byte first
   10684 		 * msb to lsb
   10685 		 */
   10686 		if (error == 0) {
   10687 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10688 			if (size == 1)
   10689 				*data = (uint8_t)(flash_data & 0x000000FF);
   10690 			else if (size == 2)
   10691 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10692 			else if (size == 4)
   10693 				*data = (uint32_t)flash_data;
   10694 			break;
   10695 		} else {
   10696 			/*
   10697 			 * If we've gotten here, then things are probably
   10698 			 * completely hosed, but if the error condition is
   10699 			 * detected, it won't hurt to give it another try...
   10700 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10701 			 */
   10702 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10703 			if (hsfsts & HSFSTS_ERR) {
   10704 				/* Repeat for some time before giving up. */
   10705 				continue;
   10706 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10707 				break;
   10708 		}
   10709 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10710 
   10711 	return error;
   10712 }
   10713 
   10714 /******************************************************************************
   10715  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10716  *
   10717  * sc - pointer to wm_hw structure
   10718  * index - The index of the byte to read.
   10719  * data - Pointer to a byte to store the value read.
   10720  *****************************************************************************/
   10721 static int32_t
   10722 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10723 {
   10724 	int32_t status;
   10725 	uint32_t word = 0;
   10726 
   10727 	status = wm_read_ich8_data(sc, index, 1, &word);
   10728 	if (status == 0)
   10729 		*data = (uint8_t)word;
   10730 	else
   10731 		*data = 0;
   10732 
   10733 	return status;
   10734 }
   10735 
   10736 /******************************************************************************
   10737  * Reads a word from the NVM using the ICH8 flash access registers.
   10738  *
   10739  * sc - pointer to wm_hw structure
   10740  * index - The starting byte index of the word to read.
   10741  * data - Pointer to a word to store the value read.
   10742  *****************************************************************************/
   10743 static int32_t
   10744 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10745 {
   10746 	int32_t status;
   10747 	uint32_t word = 0;
   10748 
   10749 	status = wm_read_ich8_data(sc, index, 2, &word);
   10750 	if (status == 0)
   10751 		*data = (uint16_t)word;
   10752 	else
   10753 		*data = 0;
   10754 
   10755 	return status;
   10756 }
   10757 
   10758 /******************************************************************************
   10759  * Reads a dword from the NVM using the ICH8 flash access registers.
   10760  *
   10761  * sc - pointer to wm_hw structure
   10762  * index - The starting byte index of the word to read.
   10763  * data - Pointer to a word to store the value read.
   10764  *****************************************************************************/
   10765 static int32_t
   10766 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   10767 {
   10768 	int32_t status;
   10769 
   10770 	status = wm_read_ich8_data(sc, index, 4, data);
   10771 	return status;
   10772 }
   10773 
   10774 /******************************************************************************
   10775  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10776  * register.
   10777  *
   10778  * sc - Struct containing variables accessed by shared code
   10779  * offset - offset of word in the EEPROM to read
   10780  * data - word read from the EEPROM
   10781  * words - number of words to read
   10782  *****************************************************************************/
   10783 static int
   10784 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10785 {
   10786 	int32_t  error = 0;
   10787 	uint32_t flash_bank = 0;
   10788 	uint32_t act_offset = 0;
   10789 	uint32_t bank_offset = 0;
   10790 	uint16_t word = 0;
   10791 	uint16_t i = 0;
   10792 
   10793 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10794 		device_xname(sc->sc_dev), __func__));
   10795 
   10796 	/*
   10797 	 * We need to know which is the valid flash bank.  In the event
   10798 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10799 	 * managing flash_bank.  So it cannot be trusted and needs
   10800 	 * to be updated with each read.
   10801 	 */
   10802 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10803 	if (error) {
   10804 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10805 			device_xname(sc->sc_dev)));
   10806 		flash_bank = 0;
   10807 	}
   10808 
   10809 	/*
   10810 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10811 	 * size
   10812 	 */
   10813 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10814 
   10815 	error = wm_get_swfwhw_semaphore(sc);
   10816 	if (error) {
   10817 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10818 		    __func__);
   10819 		return error;
   10820 	}
   10821 
   10822 	for (i = 0; i < words; i++) {
   10823 		/* The NVM part needs a byte offset, hence * 2 */
   10824 		act_offset = bank_offset + ((offset + i) * 2);
   10825 		error = wm_read_ich8_word(sc, act_offset, &word);
   10826 		if (error) {
   10827 			aprint_error_dev(sc->sc_dev,
   10828 			    "%s: failed to read NVM\n", __func__);
   10829 			break;
   10830 		}
   10831 		data[i] = word;
   10832 	}
   10833 
   10834 	wm_put_swfwhw_semaphore(sc);
   10835 	return error;
   10836 }
   10837 
   10838 /******************************************************************************
   10839  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   10840  * register.
   10841  *
   10842  * sc - Struct containing variables accessed by shared code
   10843  * offset - offset of word in the EEPROM to read
   10844  * data - word read from the EEPROM
   10845  * words - number of words to read
   10846  *****************************************************************************/
   10847 static int
   10848 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10849 {
   10850 	int32_t  error = 0;
   10851 	uint32_t flash_bank = 0;
   10852 	uint32_t act_offset = 0;
   10853 	uint32_t bank_offset = 0;
   10854 	uint32_t dword = 0;
   10855 	uint16_t i = 0;
   10856 
   10857 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10858 		device_xname(sc->sc_dev), __func__));
   10859 
   10860 	/*
   10861 	 * We need to know which is the valid flash bank.  In the event
   10862 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10863 	 * managing flash_bank.  So it cannot be trusted and needs
   10864 	 * to be updated with each read.
   10865 	 */
   10866 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10867 	if (error) {
   10868 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10869 			device_xname(sc->sc_dev)));
   10870 		flash_bank = 0;
   10871 	}
   10872 
   10873 	/*
   10874 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10875 	 * size
   10876 	 */
   10877 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10878 
   10879 	error = wm_get_swfwhw_semaphore(sc);
   10880 	if (error) {
   10881 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10882 		    __func__);
   10883 		return error;
   10884 	}
   10885 
   10886 	for (i = 0; i < words; i++) {
   10887 		/* The NVM part needs a byte offset, hence * 2 */
   10888 		act_offset = bank_offset + ((offset + i) * 2);
   10889 		/* but we must read dword aligned, so mask ... */
   10890 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   10891 		if (error) {
   10892 			aprint_error_dev(sc->sc_dev,
   10893 			    "%s: failed to read NVM\n", __func__);
   10894 			break;
   10895 		}
   10896 		/* ... and pick out low or high word */
   10897 		if ((act_offset & 0x2) == 0)
   10898 			data[i] = (uint16_t)(dword & 0xFFFF);
   10899 		else
   10900 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   10901 	}
   10902 
   10903 	wm_put_swfwhw_semaphore(sc);
   10904 	return error;
   10905 }
   10906 
   10907 /* iNVM */
   10908 
   10909 static int
   10910 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   10911 {
   10912 	int32_t  rv = 0;
   10913 	uint32_t invm_dword;
   10914 	uint16_t i;
   10915 	uint8_t record_type, word_address;
   10916 
   10917 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10918 		device_xname(sc->sc_dev), __func__));
   10919 
   10920 	for (i = 0; i < INVM_SIZE; i++) {
   10921 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   10922 		/* Get record type */
   10923 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   10924 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   10925 			break;
   10926 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   10927 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   10928 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   10929 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   10930 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   10931 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   10932 			if (word_address == address) {
   10933 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   10934 				rv = 0;
   10935 				break;
   10936 			}
   10937 		}
   10938 	}
   10939 
   10940 	return rv;
   10941 }
   10942 
   10943 static int
   10944 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10945 {
   10946 	int rv = 0;
   10947 	int i;
   10948 
   10949 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10950 		device_xname(sc->sc_dev), __func__));
   10951 
   10952 	for (i = 0; i < words; i++) {
   10953 		switch (offset + i) {
   10954 		case NVM_OFF_MACADDR:
   10955 		case NVM_OFF_MACADDR1:
   10956 		case NVM_OFF_MACADDR2:
   10957 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   10958 			if (rv != 0) {
   10959 				data[i] = 0xffff;
   10960 				rv = -1;
   10961 			}
   10962 			break;
   10963 		case NVM_OFF_CFG2:
   10964 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10965 			if (rv != 0) {
   10966 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   10967 				rv = 0;
   10968 			}
   10969 			break;
   10970 		case NVM_OFF_CFG4:
   10971 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10972 			if (rv != 0) {
   10973 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   10974 				rv = 0;
   10975 			}
   10976 			break;
   10977 		case NVM_OFF_LED_1_CFG:
   10978 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10979 			if (rv != 0) {
   10980 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   10981 				rv = 0;
   10982 			}
   10983 			break;
   10984 		case NVM_OFF_LED_0_2_CFG:
   10985 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10986 			if (rv != 0) {
   10987 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   10988 				rv = 0;
   10989 			}
   10990 			break;
   10991 		case NVM_OFF_ID_LED_SETTINGS:
   10992 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10993 			if (rv != 0) {
   10994 				*data = ID_LED_RESERVED_FFFF;
   10995 				rv = 0;
   10996 			}
   10997 			break;
   10998 		default:
   10999 			DPRINTF(WM_DEBUG_NVM,
   11000 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11001 			*data = NVM_RESERVED_WORD;
   11002 			break;
   11003 		}
   11004 	}
   11005 
   11006 	return rv;
   11007 }
   11008 
   11009 /* Lock, detecting NVM type, validate checksum, version and read */
   11010 
   11011 /*
   11012  * wm_nvm_acquire:
   11013  *
   11014  *	Perform the EEPROM handshake required on some chips.
   11015  */
   11016 static int
   11017 wm_nvm_acquire(struct wm_softc *sc)
   11018 {
   11019 	uint32_t reg;
   11020 	int x;
   11021 	int ret = 0;
   11022 
   11023 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11024 		device_xname(sc->sc_dev), __func__));
   11025 
   11026 	if (sc->sc_type >= WM_T_ICH8) {
   11027 		ret = wm_get_nvm_ich8lan(sc);
   11028 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   11029 		ret = wm_get_swfwhw_semaphore(sc);
   11030 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   11031 		/* This will also do wm_get_swsm_semaphore() if needed */
   11032 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   11033 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11034 		ret = wm_get_swsm_semaphore(sc);
   11035 	}
   11036 
   11037 	if (ret) {
   11038 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11039 			__func__);
   11040 		return 1;
   11041 	}
   11042 
   11043 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11044 		reg = CSR_READ(sc, WMREG_EECD);
   11045 
   11046 		/* Request EEPROM access. */
   11047 		reg |= EECD_EE_REQ;
   11048 		CSR_WRITE(sc, WMREG_EECD, reg);
   11049 
   11050 		/* ..and wait for it to be granted. */
   11051 		for (x = 0; x < 1000; x++) {
   11052 			reg = CSR_READ(sc, WMREG_EECD);
   11053 			if (reg & EECD_EE_GNT)
   11054 				break;
   11055 			delay(5);
   11056 		}
   11057 		if ((reg & EECD_EE_GNT) == 0) {
   11058 			aprint_error_dev(sc->sc_dev,
   11059 			    "could not acquire EEPROM GNT\n");
   11060 			reg &= ~EECD_EE_REQ;
   11061 			CSR_WRITE(sc, WMREG_EECD, reg);
   11062 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11063 				wm_put_swfwhw_semaphore(sc);
   11064 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   11065 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11066 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11067 				wm_put_swsm_semaphore(sc);
   11068 			return 1;
   11069 		}
   11070 	}
   11071 
   11072 	return 0;
   11073 }
   11074 
   11075 /*
   11076  * wm_nvm_release:
   11077  *
   11078  *	Release the EEPROM mutex.
   11079  */
   11080 static void
   11081 wm_nvm_release(struct wm_softc *sc)
   11082 {
   11083 	uint32_t reg;
   11084 
   11085 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11086 		device_xname(sc->sc_dev), __func__));
   11087 
   11088 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11089 		reg = CSR_READ(sc, WMREG_EECD);
   11090 		reg &= ~EECD_EE_REQ;
   11091 		CSR_WRITE(sc, WMREG_EECD, reg);
   11092 	}
   11093 
   11094 	if (sc->sc_type >= WM_T_ICH8) {
   11095 		wm_put_nvm_ich8lan(sc);
   11096 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11097 		wm_put_swfwhw_semaphore(sc);
   11098 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   11099 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11100 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11101 		wm_put_swsm_semaphore(sc);
   11102 }
   11103 
   11104 static int
   11105 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   11106 {
   11107 	uint32_t eecd = 0;
   11108 
   11109 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   11110 	    || sc->sc_type == WM_T_82583) {
   11111 		eecd = CSR_READ(sc, WMREG_EECD);
   11112 
   11113 		/* Isolate bits 15 & 16 */
   11114 		eecd = ((eecd >> 15) & 0x03);
   11115 
   11116 		/* If both bits are set, device is Flash type */
   11117 		if (eecd == 0x03)
   11118 			return 0;
   11119 	}
   11120 	return 1;
   11121 }
   11122 
   11123 static int
   11124 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   11125 {
   11126 	uint32_t eec;
   11127 
   11128 	eec = CSR_READ(sc, WMREG_EEC);
   11129 	if ((eec & EEC_FLASH_DETECTED) != 0)
   11130 		return 1;
   11131 
   11132 	return 0;
   11133 }
   11134 
   11135 /*
   11136  * wm_nvm_validate_checksum
   11137  *
   11138  * The checksum is defined as the sum of the first 64 (16 bit) words.
   11139  */
   11140 static int
   11141 wm_nvm_validate_checksum(struct wm_softc *sc)
   11142 {
   11143 	uint16_t checksum;
   11144 	uint16_t eeprom_data;
   11145 #ifdef WM_DEBUG
   11146 	uint16_t csum_wordaddr, valid_checksum;
   11147 #endif
   11148 	int i;
   11149 
   11150 	checksum = 0;
   11151 
   11152 	/* Don't check for I211 */
   11153 	if (sc->sc_type == WM_T_I211)
   11154 		return 0;
   11155 
   11156 #ifdef WM_DEBUG
   11157 	if (sc->sc_type == WM_T_PCH_LPT) {
   11158 		csum_wordaddr = NVM_OFF_COMPAT;
   11159 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   11160 	} else {
   11161 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   11162 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   11163 	}
   11164 
   11165 	/* Dump EEPROM image for debug */
   11166 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11167 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11168 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   11169 		/* XXX PCH_SPT? */
   11170 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   11171 		if ((eeprom_data & valid_checksum) == 0) {
   11172 			DPRINTF(WM_DEBUG_NVM,
   11173 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   11174 				device_xname(sc->sc_dev), eeprom_data,
   11175 				    valid_checksum));
   11176 		}
   11177 	}
   11178 
   11179 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   11180 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   11181 		for (i = 0; i < NVM_SIZE; i++) {
   11182 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11183 				printf("XXXX ");
   11184 			else
   11185 				printf("%04hx ", eeprom_data);
   11186 			if (i % 8 == 7)
   11187 				printf("\n");
   11188 		}
   11189 	}
   11190 
   11191 #endif /* WM_DEBUG */
   11192 
   11193 	for (i = 0; i < NVM_SIZE; i++) {
   11194 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11195 			return 1;
   11196 		checksum += eeprom_data;
   11197 	}
   11198 
   11199 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   11200 #ifdef WM_DEBUG
   11201 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   11202 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   11203 #endif
   11204 	}
   11205 
   11206 	return 0;
   11207 }
   11208 
   11209 static void
   11210 wm_nvm_version_invm(struct wm_softc *sc)
   11211 {
   11212 	uint32_t dword;
   11213 
   11214 	/*
   11215 	 * Linux's code to decode version is very strange, so we don't
   11216 	 * obey that algorithm and just use word 61 as the document.
   11217 	 * Perhaps it's not perfect though...
   11218 	 *
   11219 	 * Example:
   11220 	 *
   11221 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   11222 	 */
   11223 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   11224 	dword = __SHIFTOUT(dword, INVM_VER_1);
   11225 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   11226 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   11227 }
   11228 
   11229 static void
   11230 wm_nvm_version(struct wm_softc *sc)
   11231 {
   11232 	uint16_t major, minor, build, patch;
   11233 	uint16_t uid0, uid1;
   11234 	uint16_t nvm_data;
   11235 	uint16_t off;
   11236 	bool check_version = false;
   11237 	bool check_optionrom = false;
   11238 	bool have_build = false;
   11239 
   11240 	/*
   11241 	 * Version format:
   11242 	 *
   11243 	 * XYYZ
   11244 	 * X0YZ
   11245 	 * X0YY
   11246 	 *
   11247 	 * Example:
   11248 	 *
   11249 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   11250 	 *	82571	0x50a6	5.10.6?
   11251 	 *	82572	0x506a	5.6.10?
   11252 	 *	82572EI	0x5069	5.6.9?
   11253 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   11254 	 *		0x2013	2.1.3?
   11255 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   11256 	 */
   11257 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   11258 	switch (sc->sc_type) {
   11259 	case WM_T_82571:
   11260 	case WM_T_82572:
   11261 	case WM_T_82574:
   11262 	case WM_T_82583:
   11263 		check_version = true;
   11264 		check_optionrom = true;
   11265 		have_build = true;
   11266 		break;
   11267 	case WM_T_82575:
   11268 	case WM_T_82576:
   11269 	case WM_T_82580:
   11270 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   11271 			check_version = true;
   11272 		break;
   11273 	case WM_T_I211:
   11274 		wm_nvm_version_invm(sc);
   11275 		goto printver;
   11276 	case WM_T_I210:
   11277 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   11278 			wm_nvm_version_invm(sc);
   11279 			goto printver;
   11280 		}
   11281 		/* FALLTHROUGH */
   11282 	case WM_T_I350:
   11283 	case WM_T_I354:
   11284 		check_version = true;
   11285 		check_optionrom = true;
   11286 		break;
   11287 	default:
   11288 		return;
   11289 	}
   11290 	if (check_version) {
   11291 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   11292 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   11293 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   11294 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   11295 			build = nvm_data & NVM_BUILD_MASK;
   11296 			have_build = true;
   11297 		} else
   11298 			minor = nvm_data & 0x00ff;
   11299 
   11300 		/* Decimal */
   11301 		minor = (minor / 16) * 10 + (minor % 16);
   11302 		sc->sc_nvm_ver_major = major;
   11303 		sc->sc_nvm_ver_minor = minor;
   11304 
   11305 printver:
   11306 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   11307 		    sc->sc_nvm_ver_minor);
   11308 		if (have_build) {
   11309 			sc->sc_nvm_ver_build = build;
   11310 			aprint_verbose(".%d", build);
   11311 		}
   11312 	}
   11313 	if (check_optionrom) {
   11314 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   11315 		/* Option ROM Version */
   11316 		if ((off != 0x0000) && (off != 0xffff)) {
   11317 			off += NVM_COMBO_VER_OFF;
   11318 			wm_nvm_read(sc, off + 1, 1, &uid1);
   11319 			wm_nvm_read(sc, off, 1, &uid0);
   11320 			if ((uid0 != 0) && (uid0 != 0xffff)
   11321 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   11322 				/* 16bits */
   11323 				major = uid0 >> 8;
   11324 				build = (uid0 << 8) | (uid1 >> 8);
   11325 				patch = uid1 & 0x00ff;
   11326 				aprint_verbose(", option ROM Version %d.%d.%d",
   11327 				    major, build, patch);
   11328 			}
   11329 		}
   11330 	}
   11331 
   11332 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   11333 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   11334 }
   11335 
   11336 /*
   11337  * wm_nvm_read:
   11338  *
   11339  *	Read data from the serial EEPROM.
   11340  */
   11341 static int
   11342 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11343 {
   11344 	int rv;
   11345 
   11346 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11347 		device_xname(sc->sc_dev), __func__));
   11348 
   11349 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   11350 		return 1;
   11351 
   11352 	if (wm_nvm_acquire(sc))
   11353 		return 1;
   11354 
   11355 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11356 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11357 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   11358 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   11359 	else if (sc->sc_type == WM_T_PCH_SPT)
   11360 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11361 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11362 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11363 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11364 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11365 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11366 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11367 	else
   11368 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11369 
   11370 	wm_nvm_release(sc);
   11371 	return rv;
   11372 }
   11373 
   11374 /*
   11375  * Hardware semaphores.
   11376  * Very complexed...
   11377  */
   11378 
   11379 static int
   11380 wm_get_null(struct wm_softc *sc)
   11381 {
   11382 
   11383 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11384 		device_xname(sc->sc_dev), __func__));
   11385 	return 0;
   11386 }
   11387 
   11388 static void
   11389 wm_put_null(struct wm_softc *sc)
   11390 {
   11391 
   11392 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11393 		device_xname(sc->sc_dev), __func__));
   11394 	return;
   11395 }
   11396 
   11397 /*
   11398  * Get hardware semaphore.
   11399  * Same as e1000_get_hw_semaphore_generic()
   11400  */
   11401 static int
   11402 wm_get_swsm_semaphore(struct wm_softc *sc)
   11403 {
   11404 	int32_t timeout;
   11405 	uint32_t swsm;
   11406 
   11407 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11408 		device_xname(sc->sc_dev), __func__));
   11409 	KASSERT(sc->sc_nvm_wordsize > 0);
   11410 
   11411 	/* Get the SW semaphore. */
   11412 	timeout = sc->sc_nvm_wordsize + 1;
   11413 	while (timeout) {
   11414 		swsm = CSR_READ(sc, WMREG_SWSM);
   11415 
   11416 		if ((swsm & SWSM_SMBI) == 0)
   11417 			break;
   11418 
   11419 		delay(50);
   11420 		timeout--;
   11421 	}
   11422 
   11423 	if (timeout == 0) {
   11424 		aprint_error_dev(sc->sc_dev,
   11425 		    "could not acquire SWSM SMBI\n");
   11426 		return 1;
   11427 	}
   11428 
   11429 	/* Get the FW semaphore. */
   11430 	timeout = sc->sc_nvm_wordsize + 1;
   11431 	while (timeout) {
   11432 		swsm = CSR_READ(sc, WMREG_SWSM);
   11433 		swsm |= SWSM_SWESMBI;
   11434 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   11435 		/* If we managed to set the bit we got the semaphore. */
   11436 		swsm = CSR_READ(sc, WMREG_SWSM);
   11437 		if (swsm & SWSM_SWESMBI)
   11438 			break;
   11439 
   11440 		delay(50);
   11441 		timeout--;
   11442 	}
   11443 
   11444 	if (timeout == 0) {
   11445 		aprint_error_dev(sc->sc_dev,
   11446 		    "could not acquire SWSM SWESMBI\n");
   11447 		/* Release semaphores */
   11448 		wm_put_swsm_semaphore(sc);
   11449 		return 1;
   11450 	}
   11451 	return 0;
   11452 }
   11453 
   11454 /*
   11455  * Put hardware semaphore.
   11456  * Same as e1000_put_hw_semaphore_generic()
   11457  */
   11458 static void
   11459 wm_put_swsm_semaphore(struct wm_softc *sc)
   11460 {
   11461 	uint32_t swsm;
   11462 
   11463 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11464 		device_xname(sc->sc_dev), __func__));
   11465 
   11466 	swsm = CSR_READ(sc, WMREG_SWSM);
   11467 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   11468 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   11469 }
   11470 
   11471 /*
   11472  * Get SW/FW semaphore.
   11473  * Same as e1000_acquire_swfw_sync_82575().
   11474  */
   11475 static int
   11476 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11477 {
   11478 	uint32_t swfw_sync;
   11479 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   11480 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   11481 	int timeout = 200;
   11482 
   11483 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11484 		device_xname(sc->sc_dev), __func__));
   11485 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11486 
   11487 	for (timeout = 0; timeout < 200; timeout++) {
   11488 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11489 			if (wm_get_swsm_semaphore(sc)) {
   11490 				aprint_error_dev(sc->sc_dev,
   11491 				    "%s: failed to get semaphore\n",
   11492 				    __func__);
   11493 				return 1;
   11494 			}
   11495 		}
   11496 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11497 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   11498 			swfw_sync |= swmask;
   11499 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11500 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   11501 				wm_put_swsm_semaphore(sc);
   11502 			return 0;
   11503 		}
   11504 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   11505 			wm_put_swsm_semaphore(sc);
   11506 		delay(5000);
   11507 	}
   11508 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   11509 	    device_xname(sc->sc_dev), mask, swfw_sync);
   11510 	return 1;
   11511 }
   11512 
   11513 static void
   11514 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11515 {
   11516 	uint32_t swfw_sync;
   11517 
   11518 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11519 		device_xname(sc->sc_dev), __func__));
   11520 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11521 
   11522 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11523 		while (wm_get_swsm_semaphore(sc) != 0)
   11524 			continue;
   11525 	}
   11526 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11527 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   11528 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11529 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   11530 		wm_put_swsm_semaphore(sc);
   11531 }
   11532 
   11533 static int
   11534 wm_get_phy_82575(struct wm_softc *sc)
   11535 {
   11536 
   11537 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11538 		device_xname(sc->sc_dev), __func__));
   11539 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11540 }
   11541 
   11542 static void
   11543 wm_put_phy_82575(struct wm_softc *sc)
   11544 {
   11545 
   11546 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11547 		device_xname(sc->sc_dev), __func__));
   11548 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11549 }
   11550 
   11551 static int
   11552 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   11553 {
   11554 	uint32_t ext_ctrl;
   11555 	int timeout = 200;
   11556 
   11557 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11558 		device_xname(sc->sc_dev), __func__));
   11559 
   11560 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11561 	for (timeout = 0; timeout < 200; timeout++) {
   11562 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11563 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11564 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11565 
   11566 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11567 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11568 			return 0;
   11569 		delay(5000);
   11570 	}
   11571 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   11572 	    device_xname(sc->sc_dev), ext_ctrl);
   11573 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11574 	return 1;
   11575 }
   11576 
   11577 static void
   11578 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   11579 {
   11580 	uint32_t ext_ctrl;
   11581 
   11582 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11583 		device_xname(sc->sc_dev), __func__));
   11584 
   11585 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11586 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11587 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11588 
   11589 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11590 }
   11591 
   11592 static int
   11593 wm_get_swflag_ich8lan(struct wm_softc *sc)
   11594 {
   11595 	uint32_t ext_ctrl;
   11596 	int timeout;
   11597 
   11598 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11599 		device_xname(sc->sc_dev), __func__));
   11600 	mutex_enter(sc->sc_ich_phymtx);
   11601 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   11602 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11603 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   11604 			break;
   11605 		delay(1000);
   11606 	}
   11607 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   11608 		printf("%s: SW has already locked the resource\n",
   11609 		    device_xname(sc->sc_dev));
   11610 		goto out;
   11611 	}
   11612 
   11613 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11614 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11615 	for (timeout = 0; timeout < 1000; timeout++) {
   11616 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11617 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11618 			break;
   11619 		delay(1000);
   11620 	}
   11621 	if (timeout >= 1000) {
   11622 		printf("%s: failed to acquire semaphore\n",
   11623 		    device_xname(sc->sc_dev));
   11624 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11625 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11626 		goto out;
   11627 	}
   11628 	return 0;
   11629 
   11630 out:
   11631 	mutex_exit(sc->sc_ich_phymtx);
   11632 	return 1;
   11633 }
   11634 
   11635 static void
   11636 wm_put_swflag_ich8lan(struct wm_softc *sc)
   11637 {
   11638 	uint32_t ext_ctrl;
   11639 
   11640 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11641 		device_xname(sc->sc_dev), __func__));
   11642 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11643 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   11644 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11645 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11646 	} else {
   11647 		printf("%s: Semaphore unexpectedly released\n",
   11648 		    device_xname(sc->sc_dev));
   11649 	}
   11650 
   11651 	mutex_exit(sc->sc_ich_phymtx);
   11652 }
   11653 
   11654 static int
   11655 wm_get_nvm_ich8lan(struct wm_softc *sc)
   11656 {
   11657 
   11658 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11659 		device_xname(sc->sc_dev), __func__));
   11660 	mutex_enter(sc->sc_ich_nvmmtx);
   11661 
   11662 	return 0;
   11663 }
   11664 
   11665 static void
   11666 wm_put_nvm_ich8lan(struct wm_softc *sc)
   11667 {
   11668 
   11669 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11670 		device_xname(sc->sc_dev), __func__));
   11671 	mutex_exit(sc->sc_ich_nvmmtx);
   11672 }
   11673 
   11674 static int
   11675 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   11676 {
   11677 	int i = 0;
   11678 	uint32_t reg;
   11679 
   11680 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11681 		device_xname(sc->sc_dev), __func__));
   11682 
   11683 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11684 	do {
   11685 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   11686 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   11687 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11688 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   11689 			break;
   11690 		delay(2*1000);
   11691 		i++;
   11692 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   11693 
   11694 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   11695 		wm_put_hw_semaphore_82573(sc);
   11696 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   11697 		    device_xname(sc->sc_dev));
   11698 		return -1;
   11699 	}
   11700 
   11701 	return 0;
   11702 }
   11703 
   11704 static void
   11705 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   11706 {
   11707 	uint32_t reg;
   11708 
   11709 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11710 		device_xname(sc->sc_dev), __func__));
   11711 
   11712 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11713 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11714 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11715 }
   11716 
   11717 /*
   11718  * Management mode and power management related subroutines.
   11719  * BMC, AMT, suspend/resume and EEE.
   11720  */
   11721 
   11722 #ifdef WM_WOL
   11723 static int
   11724 wm_check_mng_mode(struct wm_softc *sc)
   11725 {
   11726 	int rv;
   11727 
   11728 	switch (sc->sc_type) {
   11729 	case WM_T_ICH8:
   11730 	case WM_T_ICH9:
   11731 	case WM_T_ICH10:
   11732 	case WM_T_PCH:
   11733 	case WM_T_PCH2:
   11734 	case WM_T_PCH_LPT:
   11735 	case WM_T_PCH_SPT:
   11736 		rv = wm_check_mng_mode_ich8lan(sc);
   11737 		break;
   11738 	case WM_T_82574:
   11739 	case WM_T_82583:
   11740 		rv = wm_check_mng_mode_82574(sc);
   11741 		break;
   11742 	case WM_T_82571:
   11743 	case WM_T_82572:
   11744 	case WM_T_82573:
   11745 	case WM_T_80003:
   11746 		rv = wm_check_mng_mode_generic(sc);
   11747 		break;
   11748 	default:
   11749 		/* noting to do */
   11750 		rv = 0;
   11751 		break;
   11752 	}
   11753 
   11754 	return rv;
   11755 }
   11756 
   11757 static int
   11758 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   11759 {
   11760 	uint32_t fwsm;
   11761 
   11762 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11763 
   11764 	if (((fwsm & FWSM_FW_VALID) != 0)
   11765 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11766 		return 1;
   11767 
   11768 	return 0;
   11769 }
   11770 
   11771 static int
   11772 wm_check_mng_mode_82574(struct wm_softc *sc)
   11773 {
   11774 	uint16_t data;
   11775 
   11776 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11777 
   11778 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   11779 		return 1;
   11780 
   11781 	return 0;
   11782 }
   11783 
   11784 static int
   11785 wm_check_mng_mode_generic(struct wm_softc *sc)
   11786 {
   11787 	uint32_t fwsm;
   11788 
   11789 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11790 
   11791 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   11792 		return 1;
   11793 
   11794 	return 0;
   11795 }
   11796 #endif /* WM_WOL */
   11797 
   11798 static int
   11799 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11800 {
   11801 	uint32_t manc, fwsm, factps;
   11802 
   11803 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11804 		return 0;
   11805 
   11806 	manc = CSR_READ(sc, WMREG_MANC);
   11807 
   11808 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11809 		device_xname(sc->sc_dev), manc));
   11810 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11811 		return 0;
   11812 
   11813 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11814 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11815 		factps = CSR_READ(sc, WMREG_FACTPS);
   11816 		if (((factps & FACTPS_MNGCG) == 0)
   11817 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11818 			return 1;
   11819 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11820 		uint16_t data;
   11821 
   11822 		factps = CSR_READ(sc, WMREG_FACTPS);
   11823 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11824 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11825 			device_xname(sc->sc_dev), factps, data));
   11826 		if (((factps & FACTPS_MNGCG) == 0)
   11827 		    && ((data & NVM_CFG2_MNGM_MASK)
   11828 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11829 			return 1;
   11830 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11831 	    && ((manc & MANC_ASF_EN) == 0))
   11832 		return 1;
   11833 
   11834 	return 0;
   11835 }
   11836 
   11837 static bool
   11838 wm_phy_resetisblocked(struct wm_softc *sc)
   11839 {
   11840 	bool blocked = false;
   11841 	uint32_t reg;
   11842 	int i = 0;
   11843 
   11844 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11845 		device_xname(sc->sc_dev), __func__));
   11846 
   11847 	switch (sc->sc_type) {
   11848 	case WM_T_ICH8:
   11849 	case WM_T_ICH9:
   11850 	case WM_T_ICH10:
   11851 	case WM_T_PCH:
   11852 	case WM_T_PCH2:
   11853 	case WM_T_PCH_LPT:
   11854 	case WM_T_PCH_SPT:
   11855 		do {
   11856 			reg = CSR_READ(sc, WMREG_FWSM);
   11857 			if ((reg & FWSM_RSPCIPHY) == 0) {
   11858 				blocked = true;
   11859 				delay(10*1000);
   11860 				continue;
   11861 			}
   11862 			blocked = false;
   11863 		} while (blocked && (i++ < 30));
   11864 		return blocked;
   11865 		break;
   11866 	case WM_T_82571:
   11867 	case WM_T_82572:
   11868 	case WM_T_82573:
   11869 	case WM_T_82574:
   11870 	case WM_T_82583:
   11871 	case WM_T_80003:
   11872 		reg = CSR_READ(sc, WMREG_MANC);
   11873 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   11874 			return true;
   11875 		else
   11876 			return false;
   11877 		break;
   11878 	default:
   11879 		/* no problem */
   11880 		break;
   11881 	}
   11882 
   11883 	return false;
   11884 }
   11885 
   11886 static void
   11887 wm_get_hw_control(struct wm_softc *sc)
   11888 {
   11889 	uint32_t reg;
   11890 
   11891 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11892 		device_xname(sc->sc_dev), __func__));
   11893 
   11894 	if (sc->sc_type == WM_T_82573) {
   11895 		reg = CSR_READ(sc, WMREG_SWSM);
   11896 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   11897 	} else if (sc->sc_type >= WM_T_82571) {
   11898 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11899 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   11900 	}
   11901 }
   11902 
   11903 static void
   11904 wm_release_hw_control(struct wm_softc *sc)
   11905 {
   11906 	uint32_t reg;
   11907 
   11908 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11909 		device_xname(sc->sc_dev), __func__));
   11910 
   11911 	if (sc->sc_type == WM_T_82573) {
   11912 		reg = CSR_READ(sc, WMREG_SWSM);
   11913 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   11914 	} else if (sc->sc_type >= WM_T_82571) {
   11915 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11916 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   11917 	}
   11918 }
   11919 
   11920 static void
   11921 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   11922 {
   11923 	uint32_t reg;
   11924 
   11925 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11926 		device_xname(sc->sc_dev), __func__));
   11927 
   11928 	if (sc->sc_type < WM_T_PCH2)
   11929 		return;
   11930 
   11931 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11932 
   11933 	if (gate)
   11934 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   11935 	else
   11936 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   11937 
   11938 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11939 }
   11940 
   11941 static void
   11942 wm_smbustopci(struct wm_softc *sc)
   11943 {
   11944 	uint32_t fwsm, reg;
   11945 	int rv = 0;
   11946 
   11947 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11948 		device_xname(sc->sc_dev), __func__));
   11949 
   11950 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   11951 	wm_gate_hw_phy_config_ich8lan(sc, true);
   11952 
   11953 	/* Disable ULP */
   11954 	wm_ulp_disable(sc);
   11955 
   11956 	/* Acquire PHY semaphore */
   11957 	sc->phy.acquire(sc);
   11958 
   11959 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11960 	switch (sc->sc_type) {
   11961 	case WM_T_PCH_LPT:
   11962 	case WM_T_PCH_SPT:
   11963 		if (wm_phy_is_accessible_pchlan(sc))
   11964 			break;
   11965 
   11966 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11967 		reg |= CTRL_EXT_FORCE_SMBUS;
   11968 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11969 #if 0
   11970 		/* XXX Isn't this required??? */
   11971 		CSR_WRITE_FLUSH(sc);
   11972 #endif
   11973 		delay(50 * 1000);
   11974 		/* FALLTHROUGH */
   11975 	case WM_T_PCH2:
   11976 		if (wm_phy_is_accessible_pchlan(sc) == true)
   11977 			break;
   11978 		/* FALLTHROUGH */
   11979 	case WM_T_PCH:
   11980 		if ((sc->sc_type == WM_T_PCH))
   11981 			if ((fwsm & FWSM_FW_VALID) != 0)
   11982 				break;
   11983 
   11984 		if (wm_phy_resetisblocked(sc) == true) {
   11985 			printf("XXX reset is blocked(3)\n");
   11986 			break;
   11987 		}
   11988 
   11989 		wm_toggle_lanphypc_pch_lpt(sc);
   11990 
   11991 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11992 			if (wm_phy_is_accessible_pchlan(sc) == true)
   11993 				break;
   11994 
   11995 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11996 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   11997 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11998 
   11999 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12000 				break;
   12001 			rv = -1;
   12002 		}
   12003 		break;
   12004 	default:
   12005 		break;
   12006 	}
   12007 
   12008 	/* Release semaphore */
   12009 	sc->phy.release(sc);
   12010 
   12011 	if (rv == 0) {
   12012 		if (wm_phy_resetisblocked(sc)) {
   12013 			printf("XXX reset is blocked(4)\n");
   12014 			goto out;
   12015 		}
   12016 		wm_reset_phy(sc);
   12017 		if (wm_phy_resetisblocked(sc))
   12018 			printf("XXX reset is blocked(4)\n");
   12019 	}
   12020 
   12021 out:
   12022 	/*
   12023 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   12024 	 */
   12025 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   12026 		delay(10*1000);
   12027 		wm_gate_hw_phy_config_ich8lan(sc, false);
   12028 	}
   12029 }
   12030 
   12031 static void
   12032 wm_init_manageability(struct wm_softc *sc)
   12033 {
   12034 
   12035 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12036 		device_xname(sc->sc_dev), __func__));
   12037 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12038 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   12039 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12040 
   12041 		/* Disable hardware interception of ARP */
   12042 		manc &= ~MANC_ARP_EN;
   12043 
   12044 		/* Enable receiving management packets to the host */
   12045 		if (sc->sc_type >= WM_T_82571) {
   12046 			manc |= MANC_EN_MNG2HOST;
   12047 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   12048 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   12049 		}
   12050 
   12051 		CSR_WRITE(sc, WMREG_MANC, manc);
   12052 	}
   12053 }
   12054 
   12055 static void
   12056 wm_release_manageability(struct wm_softc *sc)
   12057 {
   12058 
   12059 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12060 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12061 
   12062 		manc |= MANC_ARP_EN;
   12063 		if (sc->sc_type >= WM_T_82571)
   12064 			manc &= ~MANC_EN_MNG2HOST;
   12065 
   12066 		CSR_WRITE(sc, WMREG_MANC, manc);
   12067 	}
   12068 }
   12069 
   12070 static void
   12071 wm_get_wakeup(struct wm_softc *sc)
   12072 {
   12073 
   12074 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   12075 	switch (sc->sc_type) {
   12076 	case WM_T_82573:
   12077 	case WM_T_82583:
   12078 		sc->sc_flags |= WM_F_HAS_AMT;
   12079 		/* FALLTHROUGH */
   12080 	case WM_T_80003:
   12081 	case WM_T_82541:
   12082 	case WM_T_82547:
   12083 	case WM_T_82571:
   12084 	case WM_T_82572:
   12085 	case WM_T_82574:
   12086 	case WM_T_82575:
   12087 	case WM_T_82576:
   12088 	case WM_T_82580:
   12089 	case WM_T_I350:
   12090 	case WM_T_I354:
   12091 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   12092 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   12093 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12094 		break;
   12095 	case WM_T_ICH8:
   12096 	case WM_T_ICH9:
   12097 	case WM_T_ICH10:
   12098 	case WM_T_PCH:
   12099 	case WM_T_PCH2:
   12100 	case WM_T_PCH_LPT:
   12101 	case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
   12102 		sc->sc_flags |= WM_F_HAS_AMT;
   12103 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12104 		break;
   12105 	default:
   12106 		break;
   12107 	}
   12108 
   12109 	/* 1: HAS_MANAGE */
   12110 	if (wm_enable_mng_pass_thru(sc) != 0)
   12111 		sc->sc_flags |= WM_F_HAS_MANAGE;
   12112 
   12113 #ifdef WM_DEBUG
   12114 	printf("\n");
   12115 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   12116 		printf("HAS_AMT,");
   12117 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   12118 		printf("ARC_SUBSYS_VALID,");
   12119 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   12120 		printf("ASF_FIRMWARE_PRES,");
   12121 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   12122 		printf("HAS_MANAGE,");
   12123 	printf("\n");
   12124 #endif
   12125 	/*
   12126 	 * Note that the WOL flags is set after the resetting of the eeprom
   12127 	 * stuff
   12128 	 */
   12129 }
   12130 
   12131 /*
   12132  * Unconfigure Ultra Low Power mode.
   12133  * Only for I217 and newer (see below).
   12134  */
   12135 static void
   12136 wm_ulp_disable(struct wm_softc *sc)
   12137 {
   12138 	uint32_t reg;
   12139 	int i = 0;
   12140 
   12141 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12142 		device_xname(sc->sc_dev), __func__));
   12143 	/* Exclude old devices */
   12144 	if ((sc->sc_type < WM_T_PCH_LPT)
   12145 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   12146 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   12147 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   12148 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   12149 		return;
   12150 
   12151 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   12152 		/* Request ME un-configure ULP mode in the PHY */
   12153 		reg = CSR_READ(sc, WMREG_H2ME);
   12154 		reg &= ~H2ME_ULP;
   12155 		reg |= H2ME_ENFORCE_SETTINGS;
   12156 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12157 
   12158 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   12159 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   12160 			if (i++ == 30) {
   12161 				printf("%s timed out\n", __func__);
   12162 				return;
   12163 			}
   12164 			delay(10 * 1000);
   12165 		}
   12166 		reg = CSR_READ(sc, WMREG_H2ME);
   12167 		reg &= ~H2ME_ENFORCE_SETTINGS;
   12168 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12169 
   12170 		return;
   12171 	}
   12172 
   12173 	/* Acquire semaphore */
   12174 	sc->phy.acquire(sc);
   12175 
   12176 	/* Toggle LANPHYPC */
   12177 	wm_toggle_lanphypc_pch_lpt(sc);
   12178 
   12179 	/* Unforce SMBus mode in PHY */
   12180 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12181 	if (reg == 0x0000 || reg == 0xffff) {
   12182 		uint32_t reg2;
   12183 
   12184 		printf("%s: Force SMBus first.\n", __func__);
   12185 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   12186 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   12187 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   12188 		delay(50 * 1000);
   12189 
   12190 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12191 	}
   12192 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   12193 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   12194 
   12195 	/* Unforce SMBus mode in MAC */
   12196 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12197 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   12198 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12199 
   12200 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   12201 	reg |= HV_PM_CTRL_K1_ENA;
   12202 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   12203 
   12204 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   12205 	reg &= ~(I218_ULP_CONFIG1_IND
   12206 	    | I218_ULP_CONFIG1_STICKY_ULP
   12207 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   12208 	    | I218_ULP_CONFIG1_WOL_HOST
   12209 	    | I218_ULP_CONFIG1_INBAND_EXIT
   12210 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   12211 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   12212 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   12213 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12214 	reg |= I218_ULP_CONFIG1_START;
   12215 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12216 
   12217 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   12218 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   12219 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   12220 
   12221 	/* Release semaphore */
   12222 	sc->phy.release(sc);
   12223 	wm_gmii_reset(sc);
   12224 	delay(50 * 1000);
   12225 }
   12226 
   12227 /* WOL in the newer chipset interfaces (pchlan) */
   12228 static void
   12229 wm_enable_phy_wakeup(struct wm_softc *sc)
   12230 {
   12231 #if 0
   12232 	uint16_t preg;
   12233 
   12234 	/* Copy MAC RARs to PHY RARs */
   12235 
   12236 	/* Copy MAC MTA to PHY MTA */
   12237 
   12238 	/* Configure PHY Rx Control register */
   12239 
   12240 	/* Enable PHY wakeup in MAC register */
   12241 
   12242 	/* Configure and enable PHY wakeup in PHY registers */
   12243 
   12244 	/* Activate PHY wakeup */
   12245 
   12246 	/* XXX */
   12247 #endif
   12248 }
   12249 
   12250 /* Power down workaround on D3 */
   12251 static void
   12252 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   12253 {
   12254 	uint32_t reg;
   12255 	int i;
   12256 
   12257 	for (i = 0; i < 2; i++) {
   12258 		/* Disable link */
   12259 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12260 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12261 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12262 
   12263 		/*
   12264 		 * Call gig speed drop workaround on Gig disable before
   12265 		 * accessing any PHY registers
   12266 		 */
   12267 		if (sc->sc_type == WM_T_ICH8)
   12268 			wm_gig_downshift_workaround_ich8lan(sc);
   12269 
   12270 		/* Write VR power-down enable */
   12271 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12272 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12273 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   12274 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   12275 
   12276 		/* Read it back and test */
   12277 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12278 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12279 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   12280 			break;
   12281 
   12282 		/* Issue PHY reset and repeat at most one more time */
   12283 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   12284 	}
   12285 }
   12286 
   12287 static void
   12288 wm_enable_wakeup(struct wm_softc *sc)
   12289 {
   12290 	uint32_t reg, pmreg;
   12291 	pcireg_t pmode;
   12292 
   12293 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12294 		device_xname(sc->sc_dev), __func__));
   12295 
   12296 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12297 		&pmreg, NULL) == 0)
   12298 		return;
   12299 
   12300 	/* Advertise the wakeup capability */
   12301 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   12302 	    | CTRL_SWDPIN(3));
   12303 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   12304 
   12305 	/* ICH workaround */
   12306 	switch (sc->sc_type) {
   12307 	case WM_T_ICH8:
   12308 	case WM_T_ICH9:
   12309 	case WM_T_ICH10:
   12310 	case WM_T_PCH:
   12311 	case WM_T_PCH2:
   12312 	case WM_T_PCH_LPT:
   12313 	case WM_T_PCH_SPT:
   12314 		/* Disable gig during WOL */
   12315 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12316 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   12317 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12318 		if (sc->sc_type == WM_T_PCH)
   12319 			wm_gmii_reset(sc);
   12320 
   12321 		/* Power down workaround */
   12322 		if (sc->sc_phytype == WMPHY_82577) {
   12323 			struct mii_softc *child;
   12324 
   12325 			/* Assume that the PHY is copper */
   12326 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12327 			if (child->mii_mpd_rev <= 2)
   12328 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   12329 				    (768 << 5) | 25, 0x0444); /* magic num */
   12330 		}
   12331 		break;
   12332 	default:
   12333 		break;
   12334 	}
   12335 
   12336 	/* Keep the laser running on fiber adapters */
   12337 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   12338 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12339 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12340 		reg |= CTRL_EXT_SWDPIN(3);
   12341 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12342 	}
   12343 
   12344 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   12345 #if 0	/* for the multicast packet */
   12346 	reg |= WUFC_MC;
   12347 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   12348 #endif
   12349 
   12350 	if (sc->sc_type >= WM_T_PCH)
   12351 		wm_enable_phy_wakeup(sc);
   12352 	else {
   12353 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   12354 		CSR_WRITE(sc, WMREG_WUFC, reg);
   12355 	}
   12356 
   12357 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12358 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12359 		|| (sc->sc_type == WM_T_PCH2))
   12360 		    && (sc->sc_phytype == WMPHY_IGP_3))
   12361 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   12362 
   12363 	/* Request PME */
   12364 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   12365 #if 0
   12366 	/* Disable WOL */
   12367 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   12368 #else
   12369 	/* For WOL */
   12370 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   12371 #endif
   12372 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   12373 }
   12374 
   12375 /* LPLU */
   12376 
   12377 static void
   12378 wm_lplu_d0_disable(struct wm_softc *sc)
   12379 {
   12380 	uint32_t reg;
   12381 
   12382 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12383 		device_xname(sc->sc_dev), __func__));
   12384 
   12385 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12386 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   12387 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12388 }
   12389 
   12390 static void
   12391 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   12392 {
   12393 	uint32_t reg;
   12394 
   12395 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12396 		device_xname(sc->sc_dev), __func__));
   12397 
   12398 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   12399 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   12400 	reg |= HV_OEM_BITS_ANEGNOW;
   12401 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   12402 }
   12403 
   12404 /* EEE */
   12405 
   12406 static void
   12407 wm_set_eee_i350(struct wm_softc *sc)
   12408 {
   12409 	uint32_t ipcnfg, eeer;
   12410 
   12411 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   12412 	eeer = CSR_READ(sc, WMREG_EEER);
   12413 
   12414 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   12415 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12416 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12417 		    | EEER_LPI_FC);
   12418 	} else {
   12419 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12420 		ipcnfg &= ~IPCNFG_10BASE_TE;
   12421 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12422 		    | EEER_LPI_FC);
   12423 	}
   12424 
   12425 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   12426 	CSR_WRITE(sc, WMREG_EEER, eeer);
   12427 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   12428 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   12429 }
   12430 
   12431 /*
   12432  * Workarounds (mainly PHY related).
   12433  * Basically, PHY's workarounds are in the PHY drivers.
   12434  */
   12435 
   12436 /* Work-around for 82566 Kumeran PCS lock loss */
   12437 static void
   12438 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   12439 {
   12440 #if 0
   12441 	int miistatus, active, i;
   12442 	int reg;
   12443 
   12444 	miistatus = sc->sc_mii.mii_media_status;
   12445 
   12446 	/* If the link is not up, do nothing */
   12447 	if ((miistatus & IFM_ACTIVE) == 0)
   12448 		return;
   12449 
   12450 	active = sc->sc_mii.mii_media_active;
   12451 
   12452 	/* Nothing to do if the link is other than 1Gbps */
   12453 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   12454 		return;
   12455 
   12456 	for (i = 0; i < 10; i++) {
   12457 		/* read twice */
   12458 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12459 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12460 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   12461 			goto out;	/* GOOD! */
   12462 
   12463 		/* Reset the PHY */
   12464 		wm_gmii_reset(sc);
   12465 		delay(5*1000);
   12466 	}
   12467 
   12468 	/* Disable GigE link negotiation */
   12469 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12470 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12471 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12472 
   12473 	/*
   12474 	 * Call gig speed drop workaround on Gig disable before accessing
   12475 	 * any PHY registers.
   12476 	 */
   12477 	wm_gig_downshift_workaround_ich8lan(sc);
   12478 
   12479 out:
   12480 	return;
   12481 #endif
   12482 }
   12483 
   12484 /* WOL from S5 stops working */
   12485 static void
   12486 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   12487 {
   12488 	uint16_t kmrn_reg;
   12489 
   12490 	/* Only for igp3 */
   12491 	if (sc->sc_phytype == WMPHY_IGP_3) {
   12492 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   12493 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   12494 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12495 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   12496 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12497 	}
   12498 }
   12499 
   12500 /*
   12501  * Workaround for pch's PHYs
   12502  * XXX should be moved to new PHY driver?
   12503  */
   12504 static void
   12505 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   12506 {
   12507 
   12508 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12509 		device_xname(sc->sc_dev), __func__));
   12510 	KASSERT(sc->sc_type == WM_T_PCH);
   12511 
   12512 	if (sc->sc_phytype == WMPHY_82577)
   12513 		wm_set_mdio_slow_mode_hv(sc);
   12514 
   12515 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   12516 
   12517 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   12518 
   12519 	/* 82578 */
   12520 	if (sc->sc_phytype == WMPHY_82578) {
   12521 		struct mii_softc *child;
   12522 
   12523 		/*
   12524 		 * Return registers to default by doing a soft reset then
   12525 		 * writing 0x3140 to the control register
   12526 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   12527 		 */
   12528 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12529 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   12530 			PHY_RESET(child);
   12531 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   12532 			    0x3140);
   12533 		}
   12534 	}
   12535 
   12536 	/* Select page 0 */
   12537 	sc->phy.acquire(sc);
   12538 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   12539 	sc->phy.release(sc);
   12540 
   12541 	/*
   12542 	 * Configure the K1 Si workaround during phy reset assuming there is
   12543 	 * link so that it disables K1 if link is in 1Gbps.
   12544 	 */
   12545 	wm_k1_gig_workaround_hv(sc, 1);
   12546 }
   12547 
   12548 static void
   12549 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   12550 {
   12551 
   12552 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12553 		device_xname(sc->sc_dev), __func__));
   12554 	KASSERT(sc->sc_type == WM_T_PCH2);
   12555 
   12556 	wm_set_mdio_slow_mode_hv(sc);
   12557 }
   12558 
   12559 static int
   12560 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   12561 {
   12562 	int k1_enable = sc->sc_nvm_k1_enabled;
   12563 
   12564 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12565 		device_xname(sc->sc_dev), __func__));
   12566 
   12567 	if (sc->phy.acquire(sc) != 0)
   12568 		return -1;
   12569 
   12570 	if (link) {
   12571 		k1_enable = 0;
   12572 
   12573 		/* Link stall fix for link up */
   12574 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   12575 	} else {
   12576 		/* Link stall fix for link down */
   12577 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   12578 	}
   12579 
   12580 	wm_configure_k1_ich8lan(sc, k1_enable);
   12581 	sc->phy.release(sc);
   12582 
   12583 	return 0;
   12584 }
   12585 
   12586 static void
   12587 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   12588 {
   12589 	uint32_t reg;
   12590 
   12591 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   12592 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   12593 	    reg | HV_KMRN_MDIO_SLOW);
   12594 }
   12595 
   12596 static void
   12597 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   12598 {
   12599 	uint32_t ctrl, ctrl_ext, tmp;
   12600 	uint16_t kmrn_reg;
   12601 
   12602 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   12603 
   12604 	if (k1_enable)
   12605 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   12606 	else
   12607 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   12608 
   12609 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   12610 
   12611 	delay(20);
   12612 
   12613 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12614 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12615 
   12616 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   12617 	tmp |= CTRL_FRCSPD;
   12618 
   12619 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   12620 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   12621 	CSR_WRITE_FLUSH(sc);
   12622 	delay(20);
   12623 
   12624 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   12625 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12626 	CSR_WRITE_FLUSH(sc);
   12627 	delay(20);
   12628 }
   12629 
   12630 /* special case - for 82575 - need to do manual init ... */
   12631 static void
   12632 wm_reset_init_script_82575(struct wm_softc *sc)
   12633 {
   12634 	/*
   12635 	 * remark: this is untested code - we have no board without EEPROM
   12636 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   12637 	 */
   12638 
   12639 	/* SerDes configuration via SERDESCTRL */
   12640 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   12641 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   12642 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   12643 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   12644 
   12645 	/* CCM configuration via CCMCTL register */
   12646 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   12647 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   12648 
   12649 	/* PCIe lanes configuration */
   12650 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   12651 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   12652 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   12653 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   12654 
   12655 	/* PCIe PLL Configuration */
   12656 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   12657 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   12658 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   12659 }
   12660 
   12661 static void
   12662 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   12663 {
   12664 	uint32_t reg;
   12665 	uint16_t nvmword;
   12666 	int rv;
   12667 
   12668 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   12669 		return;
   12670 
   12671 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   12672 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   12673 	if (rv != 0) {
   12674 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   12675 		    __func__);
   12676 		return;
   12677 	}
   12678 
   12679 	reg = CSR_READ(sc, WMREG_MDICNFG);
   12680 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   12681 		reg |= MDICNFG_DEST;
   12682 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   12683 		reg |= MDICNFG_COM_MDIO;
   12684 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12685 }
   12686 
   12687 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   12688 
   12689 static bool
   12690 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   12691 {
   12692 	int i;
   12693 	uint32_t reg;
   12694 	uint16_t id1, id2;
   12695 
   12696 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12697 		device_xname(sc->sc_dev), __func__));
   12698 	id1 = id2 = 0xffff;
   12699 	for (i = 0; i < 2; i++) {
   12700 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   12701 		if (MII_INVALIDID(id1))
   12702 			continue;
   12703 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   12704 		if (MII_INVALIDID(id2))
   12705 			continue;
   12706 		break;
   12707 	}
   12708 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   12709 		goto out;
   12710 	}
   12711 
   12712 	if (sc->sc_type < WM_T_PCH_LPT) {
   12713 		sc->phy.release(sc);
   12714 		wm_set_mdio_slow_mode_hv(sc);
   12715 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   12716 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   12717 		sc->phy.acquire(sc);
   12718 	}
   12719 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   12720 		printf("XXX return with false\n");
   12721 		return false;
   12722 	}
   12723 out:
   12724 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   12725 		/* Only unforce SMBus if ME is not active */
   12726 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   12727 			/* Unforce SMBus mode in PHY */
   12728 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   12729 			    CV_SMB_CTRL);
   12730 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   12731 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   12732 			    CV_SMB_CTRL, reg);
   12733 
   12734 			/* Unforce SMBus mode in MAC */
   12735 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12736 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12737 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12738 		}
   12739 	}
   12740 	return true;
   12741 }
   12742 
   12743 static void
   12744 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   12745 {
   12746 	uint32_t reg;
   12747 	int i;
   12748 
   12749 	/* Set PHY Config Counter to 50msec */
   12750 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   12751 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   12752 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   12753 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   12754 
   12755 	/* Toggle LANPHYPC */
   12756 	reg = CSR_READ(sc, WMREG_CTRL);
   12757 	reg |= CTRL_LANPHYPC_OVERRIDE;
   12758 	reg &= ~CTRL_LANPHYPC_VALUE;
   12759 	CSR_WRITE(sc, WMREG_CTRL, reg);
   12760 	CSR_WRITE_FLUSH(sc);
   12761 	delay(1000);
   12762 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   12763 	CSR_WRITE(sc, WMREG_CTRL, reg);
   12764 	CSR_WRITE_FLUSH(sc);
   12765 
   12766 	if (sc->sc_type < WM_T_PCH_LPT)
   12767 		delay(50 * 1000);
   12768 	else {
   12769 		i = 20;
   12770 
   12771 		do {
   12772 			delay(5 * 1000);
   12773 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   12774 		    && i--);
   12775 
   12776 		delay(30 * 1000);
   12777 	}
   12778 }
   12779 
   12780 static int
   12781 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   12782 {
   12783 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   12784 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   12785 	uint32_t rxa;
   12786 	uint16_t scale = 0, lat_enc = 0;
   12787 	int64_t lat_ns, value;
   12788 
   12789 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12790 		device_xname(sc->sc_dev), __func__));
   12791 
   12792 	if (link) {
   12793 		pcireg_t preg;
   12794 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   12795 
   12796 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   12797 
   12798 		/*
   12799 		 * Determine the maximum latency tolerated by the device.
   12800 		 *
   12801 		 * Per the PCIe spec, the tolerated latencies are encoded as
   12802 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   12803 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   12804 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   12805 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   12806 		 */
   12807 		lat_ns = ((int64_t)rxa * 1024 -
   12808 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   12809 		if (lat_ns < 0)
   12810 			lat_ns = 0;
   12811 		else {
   12812 			uint32_t status;
   12813 			uint16_t speed;
   12814 
   12815 			status = CSR_READ(sc, WMREG_STATUS);
   12816 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   12817 			case STATUS_SPEED_10:
   12818 				speed = 10;
   12819 				break;
   12820 			case STATUS_SPEED_100:
   12821 				speed = 100;
   12822 				break;
   12823 			case STATUS_SPEED_1000:
   12824 				speed = 1000;
   12825 				break;
   12826 			default:
   12827 				printf("%s: Unknown speed (status = %08x)\n",
   12828 				    device_xname(sc->sc_dev), status);
   12829 				return -1;
   12830 			}
   12831 			lat_ns /= speed;
   12832 		}
   12833 		value = lat_ns;
   12834 
   12835 		while (value > LTRV_VALUE) {
   12836 			scale ++;
   12837 			value = howmany(value, __BIT(5));
   12838 		}
   12839 		if (scale > LTRV_SCALE_MAX) {
   12840 			printf("%s: Invalid LTR latency scale %d\n",
   12841 			    device_xname(sc->sc_dev), scale);
   12842 			return -1;
   12843 		}
   12844 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   12845 
   12846 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   12847 		    WM_PCI_LTR_CAP_LPT);
   12848 		max_snoop = preg & 0xffff;
   12849 		max_nosnoop = preg >> 16;
   12850 
   12851 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   12852 
   12853 		if (lat_enc > max_ltr_enc) {
   12854 			lat_enc = max_ltr_enc;
   12855 		}
   12856 	}
   12857 	/* Snoop and No-Snoop latencies the same */
   12858 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   12859 	CSR_WRITE(sc, WMREG_LTRV, reg);
   12860 
   12861 	return 0;
   12862 }
   12863 
   12864 /*
   12865  * I210 Errata 25 and I211 Errata 10
   12866  * Slow System Clock.
   12867  */
   12868 static void
   12869 wm_pll_workaround_i210(struct wm_softc *sc)
   12870 {
   12871 	uint32_t mdicnfg, wuc;
   12872 	uint32_t reg;
   12873 	pcireg_t pcireg;
   12874 	uint32_t pmreg;
   12875 	uint16_t nvmword, tmp_nvmword;
   12876 	int phyval;
   12877 	bool wa_done = false;
   12878 	int i;
   12879 
   12880 	/* Save WUC and MDICNFG registers */
   12881 	wuc = CSR_READ(sc, WMREG_WUC);
   12882 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   12883 
   12884 	reg = mdicnfg & ~MDICNFG_DEST;
   12885 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12886 
   12887 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   12888 		nvmword = INVM_DEFAULT_AL;
   12889 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   12890 
   12891 	/* Get Power Management cap offset */
   12892 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12893 		&pmreg, NULL) == 0)
   12894 		return;
   12895 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   12896 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   12897 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   12898 
   12899 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   12900 			break; /* OK */
   12901 		}
   12902 
   12903 		wa_done = true;
   12904 		/* Directly reset the internal PHY */
   12905 		reg = CSR_READ(sc, WMREG_CTRL);
   12906 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   12907 
   12908 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12909 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   12910 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12911 
   12912 		CSR_WRITE(sc, WMREG_WUC, 0);
   12913 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   12914 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12915 
   12916 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   12917 		    pmreg + PCI_PMCSR);
   12918 		pcireg |= PCI_PMCSR_STATE_D3;
   12919 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12920 		    pmreg + PCI_PMCSR, pcireg);
   12921 		delay(1000);
   12922 		pcireg &= ~PCI_PMCSR_STATE_D3;
   12923 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12924 		    pmreg + PCI_PMCSR, pcireg);
   12925 
   12926 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   12927 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12928 
   12929 		/* Restore WUC register */
   12930 		CSR_WRITE(sc, WMREG_WUC, wuc);
   12931 	}
   12932 
   12933 	/* Restore MDICNFG setting */
   12934 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   12935 	if (wa_done)
   12936 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   12937 }
   12938