Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.463
      1 /*	$NetBSD: if_wm.c,v 1.463 2017/01/10 08:22:43 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Advanced Receive Descriptor
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.463 2017/01/10 08:22:43 knakahara Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #endif
    165 
    166 /*
    167  * This device driver's max interrupt numbers.
    168  */
    169 #define WM_MAX_NQUEUEINTR	16
    170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    171 
    172 /*
    173  * Transmit descriptor list size.  Due to errata, we can only have
    174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    175  * on >= 82544.  We tell the upper layers that they can queue a lot
    176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    177  * of them at a time.
    178  *
    179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    180  * chains containing many small mbufs have been observed in zero-copy
    181  * situations with jumbo frames.
    182  */
    183 #define	WM_NTXSEGS		256
    184 #define	WM_IFQUEUELEN		256
    185 #define	WM_TXQUEUELEN_MAX	64
    186 #define	WM_TXQUEUELEN_MAX_82547	16
    187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    190 #define	WM_NTXDESC_82542	256
    191 #define	WM_NTXDESC_82544	4096
    192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    197 
    198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    199 
    200 #define	WM_TXINTERQSIZE		256
    201 
    202 /*
    203  * Receive descriptor list size.  We have one Rx buffer for normal
    204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    205  * packet.  We allocate 256 receive descriptors, each with a 2k
    206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    207  */
    208 #define	WM_NRXDESC		256
    209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    212 
    213 typedef union txdescs {
    214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    216 } txdescs_t;
    217 
    218 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    219 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    220 
    221 /*
    222  * Software state for transmit jobs.
    223  */
    224 struct wm_txsoft {
    225 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    226 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    227 	int txs_firstdesc;		/* first descriptor in packet */
    228 	int txs_lastdesc;		/* last descriptor in packet */
    229 	int txs_ndesc;			/* # of descriptors used */
    230 };
    231 
    232 /*
    233  * Software state for receive buffers.  Each descriptor gets a
    234  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    235  * more than one buffer, we chain them together.
    236  */
    237 struct wm_rxsoft {
    238 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    239 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    240 };
    241 
    242 #define WM_LINKUP_TIMEOUT	50
    243 
    244 static uint16_t swfwphysem[] = {
    245 	SWFW_PHY0_SM,
    246 	SWFW_PHY1_SM,
    247 	SWFW_PHY2_SM,
    248 	SWFW_PHY3_SM
    249 };
    250 
    251 static const uint32_t wm_82580_rxpbs_table[] = {
    252 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    253 };
    254 
    255 struct wm_softc;
    256 
    257 #ifdef WM_EVENT_COUNTERS
    258 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    259 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    260 	struct evcnt qname##_ev_##evname;
    261 
    262 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    263 	do{								\
    264 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    265 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    266 		    "%s%02d%s", #qname, (qnum), #evname);		\
    267 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    268 		    (evtype), NULL, (xname),				\
    269 		    (q)->qname##_##evname##_evcnt_name);		\
    270 	}while(0)
    271 
    272 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    273 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    274 
    275 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    276 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    277 #endif /* WM_EVENT_COUNTERS */
    278 
    279 struct wm_txqueue {
    280 	kmutex_t *txq_lock;		/* lock for tx operations */
    281 
    282 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    283 
    284 	/* Software state for the transmit descriptors. */
    285 	int txq_num;			/* must be a power of two */
    286 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    287 
    288 	/* TX control data structures. */
    289 	int txq_ndesc;			/* must be a power of two */
    290 	size_t txq_descsize;		/* a tx descriptor size */
    291 	txdescs_t *txq_descs_u;
    292         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    293 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    294 	int txq_desc_rseg;		/* real number of control segment */
    295 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    296 #define	txq_descs	txq_descs_u->sctxu_txdescs
    297 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    298 
    299 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    300 
    301 	int txq_free;			/* number of free Tx descriptors */
    302 	int txq_next;			/* next ready Tx descriptor */
    303 
    304 	int txq_sfree;			/* number of free Tx jobs */
    305 	int txq_snext;			/* next free Tx job */
    306 	int txq_sdirty;			/* dirty Tx jobs */
    307 
    308 	/* These 4 variables are used only on the 82547. */
    309 	int txq_fifo_size;		/* Tx FIFO size */
    310 	int txq_fifo_head;		/* current head of FIFO */
    311 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    312 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    313 
    314 	/*
    315 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    316 	 * CPUs. This queue intermediate them without block.
    317 	 */
    318 	pcq_t *txq_interq;
    319 
    320 	/*
    321 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    322 	 * to manage Tx H/W queue's busy flag.
    323 	 */
    324 	int txq_flags;			/* flags for H/W queue, see below */
    325 #define	WM_TXQ_NO_SPACE	0x1
    326 
    327 	bool txq_stopping;
    328 
    329 #ifdef WM_EVENT_COUNTERS
    330 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    331 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    332 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    333 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    334 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    335 						/* XXX not used? */
    336 
    337 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    338 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    339 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    340 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    341 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    342 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    343 
    344 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    345 
    346 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    347 
    348 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    349 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    350 #endif /* WM_EVENT_COUNTERS */
    351 };
    352 
    353 struct wm_rxqueue {
    354 	kmutex_t *rxq_lock;		/* lock for rx operations */
    355 
    356 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    357 
    358 	/* Software state for the receive descriptors. */
    359 	wiseman_rxdesc_t *rxq_descs;
    360 
    361 	/* RX control data structures. */
    362 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    363 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    364 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    365 	int rxq_desc_rseg;		/* real number of control segment */
    366 	size_t rxq_desc_size;		/* control data size */
    367 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    368 
    369 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    370 
    371 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    372 	int rxq_discard;
    373 	int rxq_len;
    374 	struct mbuf *rxq_head;
    375 	struct mbuf *rxq_tail;
    376 	struct mbuf **rxq_tailp;
    377 
    378 	bool rxq_stopping;
    379 
    380 #ifdef WM_EVENT_COUNTERS
    381 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    382 
    383 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    384 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    385 #endif
    386 };
    387 
    388 struct wm_queue {
    389 	int wmq_id;			/* index of transmit and receive queues */
    390 	int wmq_intr_idx;		/* index of MSI-X tables */
    391 
    392 	struct wm_txqueue wmq_txq;
    393 	struct wm_rxqueue wmq_rxq;
    394 };
    395 
    396 struct wm_phyop {
    397 	int (*acquire)(struct wm_softc *);
    398 	void (*release)(struct wm_softc *);
    399 	int reset_delay_us;
    400 };
    401 
    402 /*
    403  * Software state per device.
    404  */
    405 struct wm_softc {
    406 	device_t sc_dev;		/* generic device information */
    407 	bus_space_tag_t sc_st;		/* bus space tag */
    408 	bus_space_handle_t sc_sh;	/* bus space handle */
    409 	bus_size_t sc_ss;		/* bus space size */
    410 	bus_space_tag_t sc_iot;		/* I/O space tag */
    411 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    412 	bus_size_t sc_ios;		/* I/O space size */
    413 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    414 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    415 	bus_size_t sc_flashs;		/* flash registers space size */
    416 	off_t sc_flashreg_offset;	/*
    417 					 * offset to flash registers from
    418 					 * start of BAR
    419 					 */
    420 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    421 
    422 	struct ethercom sc_ethercom;	/* ethernet common data */
    423 	struct mii_data sc_mii;		/* MII/media information */
    424 
    425 	pci_chipset_tag_t sc_pc;
    426 	pcitag_t sc_pcitag;
    427 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    428 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    429 
    430 	uint16_t sc_pcidevid;		/* PCI device ID */
    431 	wm_chip_type sc_type;		/* MAC type */
    432 	int sc_rev;			/* MAC revision */
    433 	wm_phy_type sc_phytype;		/* PHY type */
    434 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    435 #define	WM_MEDIATYPE_UNKNOWN		0x00
    436 #define	WM_MEDIATYPE_FIBER		0x01
    437 #define	WM_MEDIATYPE_COPPER		0x02
    438 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    439 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    440 	int sc_flags;			/* flags; see below */
    441 	int sc_if_flags;		/* last if_flags */
    442 	int sc_flowflags;		/* 802.3x flow control flags */
    443 	int sc_align_tweak;
    444 
    445 	void *sc_ihs[WM_MAX_NINTR];	/*
    446 					 * interrupt cookie.
    447 					 * legacy and msi use sc_ihs[0].
    448 					 */
    449 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    450 	int sc_nintrs;			/* number of interrupts */
    451 
    452 	int sc_link_intr_idx;		/* index of MSI-X tables */
    453 
    454 	callout_t sc_tick_ch;		/* tick callout */
    455 	bool sc_core_stopping;
    456 
    457 	int sc_nvm_ver_major;
    458 	int sc_nvm_ver_minor;
    459 	int sc_nvm_ver_build;
    460 	int sc_nvm_addrbits;		/* NVM address bits */
    461 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    462 	int sc_ich8_flash_base;
    463 	int sc_ich8_flash_bank_size;
    464 	int sc_nvm_k1_enabled;
    465 
    466 	int sc_nqueues;
    467 	struct wm_queue *sc_queue;
    468 
    469 	int sc_affinity_offset;
    470 
    471 #ifdef WM_EVENT_COUNTERS
    472 	/* Event counters. */
    473 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    474 
    475         /* WM_T_82542_2_1 only */
    476 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    477 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    478 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    479 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    480 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    481 #endif /* WM_EVENT_COUNTERS */
    482 
    483 	/* This variable are used only on the 82547. */
    484 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    485 
    486 	uint32_t sc_ctrl;		/* prototype CTRL register */
    487 #if 0
    488 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    489 #endif
    490 	uint32_t sc_icr;		/* prototype interrupt bits */
    491 	uint32_t sc_itr;		/* prototype intr throttling reg */
    492 	uint32_t sc_tctl;		/* prototype TCTL register */
    493 	uint32_t sc_rctl;		/* prototype RCTL register */
    494 	uint32_t sc_txcw;		/* prototype TXCW register */
    495 	uint32_t sc_tipg;		/* prototype TIPG register */
    496 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    497 	uint32_t sc_pba;		/* prototype PBA register */
    498 
    499 	int sc_tbi_linkup;		/* TBI link status */
    500 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    501 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    502 
    503 	int sc_mchash_type;		/* multicast filter offset */
    504 
    505 	krndsource_t rnd_source;	/* random source */
    506 
    507 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    508 
    509 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    510 	kmutex_t *sc_ich_phymtx;	/*
    511 					 * 82574/82583/ICH/PCH specific PHY
    512 					 * mutex. For 82574/82583, the mutex
    513 					 * is used for both PHY and NVM.
    514 					 */
    515 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    516 
    517 	struct wm_phyop phy;
    518 };
    519 
    520 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    521 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    522 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    523 
    524 #ifdef WM_MPSAFE
    525 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    526 #else
    527 #define CALLOUT_FLAGS	0
    528 #endif
    529 
    530 #define	WM_RXCHAIN_RESET(rxq)						\
    531 do {									\
    532 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    533 	*(rxq)->rxq_tailp = NULL;					\
    534 	(rxq)->rxq_len = 0;						\
    535 } while (/*CONSTCOND*/0)
    536 
    537 #define	WM_RXCHAIN_LINK(rxq, m)						\
    538 do {									\
    539 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    540 	(rxq)->rxq_tailp = &(m)->m_next;				\
    541 } while (/*CONSTCOND*/0)
    542 
    543 #ifdef WM_EVENT_COUNTERS
    544 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    545 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    546 
    547 #define WM_Q_EVCNT_INCR(qname, evname)			\
    548 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    549 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    550 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    551 #else /* !WM_EVENT_COUNTERS */
    552 #define	WM_EVCNT_INCR(ev)	/* nothing */
    553 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    554 
    555 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    556 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    557 #endif /* !WM_EVENT_COUNTERS */
    558 
    559 #define	CSR_READ(sc, reg)						\
    560 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    561 #define	CSR_WRITE(sc, reg, val)						\
    562 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    563 #define	CSR_WRITE_FLUSH(sc)						\
    564 	(void) CSR_READ((sc), WMREG_STATUS)
    565 
    566 #define ICH8_FLASH_READ32(sc, reg)					\
    567 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    568 	    (reg) + sc->sc_flashreg_offset)
    569 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    570 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    571 	    (reg) + sc->sc_flashreg_offset, (data))
    572 
    573 #define ICH8_FLASH_READ16(sc, reg)					\
    574 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    575 	    (reg) + sc->sc_flashreg_offset)
    576 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    577 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    578 	    (reg) + sc->sc_flashreg_offset, (data))
    579 
    580 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    581 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    582 
    583 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    584 #define	WM_CDTXADDR_HI(txq, x)						\
    585 	(sizeof(bus_addr_t) == 8 ?					\
    586 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    587 
    588 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    589 #define	WM_CDRXADDR_HI(rxq, x)						\
    590 	(sizeof(bus_addr_t) == 8 ?					\
    591 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    592 
    593 /*
    594  * Register read/write functions.
    595  * Other than CSR_{READ|WRITE}().
    596  */
    597 #if 0
    598 static inline uint32_t wm_io_read(struct wm_softc *, int);
    599 #endif
    600 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    601 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    602 	uint32_t, uint32_t);
    603 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    604 
    605 /*
    606  * Descriptor sync/init functions.
    607  */
    608 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    609 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    610 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    611 
    612 /*
    613  * Device driver interface functions and commonly used functions.
    614  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    615  */
    616 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    617 static int	wm_match(device_t, cfdata_t, void *);
    618 static void	wm_attach(device_t, device_t, void *);
    619 static int	wm_detach(device_t, int);
    620 static bool	wm_suspend(device_t, const pmf_qual_t *);
    621 static bool	wm_resume(device_t, const pmf_qual_t *);
    622 static void	wm_watchdog(struct ifnet *);
    623 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    624 static void	wm_tick(void *);
    625 static int	wm_ifflags_cb(struct ethercom *);
    626 static int	wm_ioctl(struct ifnet *, u_long, void *);
    627 /* MAC address related */
    628 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    629 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    630 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    631 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    632 static void	wm_set_filter(struct wm_softc *);
    633 /* Reset and init related */
    634 static void	wm_set_vlan(struct wm_softc *);
    635 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    636 static void	wm_get_auto_rd_done(struct wm_softc *);
    637 static void	wm_lan_init_done(struct wm_softc *);
    638 static void	wm_get_cfg_done(struct wm_softc *);
    639 static void	wm_initialize_hardware_bits(struct wm_softc *);
    640 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    641 static void	wm_reset_phy(struct wm_softc *);
    642 static void	wm_flush_desc_rings(struct wm_softc *);
    643 static void	wm_reset(struct wm_softc *);
    644 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    645 static void	wm_rxdrain(struct wm_rxqueue *);
    646 static void	wm_rss_getkey(uint8_t *);
    647 static void	wm_init_rss(struct wm_softc *);
    648 static void	wm_adjust_qnum(struct wm_softc *, int);
    649 static int	wm_setup_legacy(struct wm_softc *);
    650 static int	wm_setup_msix(struct wm_softc *);
    651 static int	wm_init(struct ifnet *);
    652 static int	wm_init_locked(struct ifnet *);
    653 static void	wm_turnon(struct wm_softc *);
    654 static void	wm_turnoff(struct wm_softc *);
    655 static void	wm_stop(struct ifnet *, int);
    656 static void	wm_stop_locked(struct ifnet *, int);
    657 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    658 static void	wm_82547_txfifo_stall(void *);
    659 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    660 /* DMA related */
    661 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    662 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    663 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    664 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    665     struct wm_txqueue *);
    666 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    667 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    668 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    669     struct wm_rxqueue *);
    670 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    671 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    672 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    673 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    674 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    675 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    676 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    677     struct wm_txqueue *);
    678 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    679     struct wm_rxqueue *);
    680 static int	wm_alloc_txrx_queues(struct wm_softc *);
    681 static void	wm_free_txrx_queues(struct wm_softc *);
    682 static int	wm_init_txrx_queues(struct wm_softc *);
    683 /* Start */
    684 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    685     uint32_t *, uint8_t *);
    686 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    687 static void	wm_start(struct ifnet *);
    688 static void	wm_start_locked(struct ifnet *);
    689 static int	wm_transmit(struct ifnet *, struct mbuf *);
    690 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    691 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    692 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    693     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    694 static void	wm_nq_start(struct ifnet *);
    695 static void	wm_nq_start_locked(struct ifnet *);
    696 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    697 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    698 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    699 static void	wm_deferred_start(struct ifnet *);
    700 /* Interrupt */
    701 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    702 static void	wm_rxeof(struct wm_rxqueue *);
    703 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    704 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    705 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    706 static void	wm_linkintr(struct wm_softc *, uint32_t);
    707 static int	wm_intr_legacy(void *);
    708 static int	wm_txrxintr_msix(void *);
    709 static int	wm_linkintr_msix(void *);
    710 
    711 /*
    712  * Media related.
    713  * GMII, SGMII, TBI, SERDES and SFP.
    714  */
    715 /* Common */
    716 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    717 /* GMII related */
    718 static void	wm_gmii_reset(struct wm_softc *);
    719 static int	wm_get_phy_id_82575(struct wm_softc *);
    720 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    721 static int	wm_gmii_mediachange(struct ifnet *);
    722 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    723 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    724 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    725 static int	wm_gmii_i82543_readreg(device_t, int, int);
    726 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    727 static int	wm_gmii_mdic_readreg(device_t, int, int);
    728 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    729 static int	wm_gmii_i82544_readreg(device_t, int, int);
    730 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    731 static int	wm_gmii_i80003_readreg(device_t, int, int);
    732 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    733 static int	wm_gmii_bm_readreg(device_t, int, int);
    734 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    735 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    736 static int	wm_gmii_hv_readreg(device_t, int, int);
    737 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    738 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    739 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    740 static int	wm_gmii_82580_readreg(device_t, int, int);
    741 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    742 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    743 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    744 static void	wm_gmii_statchg(struct ifnet *);
    745 /*
    746  * kumeran related (80003, ICH* and PCH*).
    747  * These functions are not for accessing MII registers but for accessing
    748  * kumeran specific registers.
    749  */
    750 static int	wm_kmrn_readreg(struct wm_softc *, int);
    751 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    752 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    753 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    754 /* SGMII */
    755 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    756 static int	wm_sgmii_readreg(device_t, int, int);
    757 static void	wm_sgmii_writereg(device_t, int, int, int);
    758 /* TBI related */
    759 static void	wm_tbi_mediainit(struct wm_softc *);
    760 static int	wm_tbi_mediachange(struct ifnet *);
    761 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    762 static int	wm_check_for_link(struct wm_softc *);
    763 static void	wm_tbi_tick(struct wm_softc *);
    764 /* SERDES related */
    765 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    766 static int	wm_serdes_mediachange(struct ifnet *);
    767 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    768 static void	wm_serdes_tick(struct wm_softc *);
    769 /* SFP related */
    770 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    771 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    772 
    773 /*
    774  * NVM related.
    775  * Microwire, SPI (w/wo EERD) and Flash.
    776  */
    777 /* Misc functions */
    778 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    779 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    780 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    781 /* Microwire */
    782 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    783 /* SPI */
    784 static int	wm_nvm_ready_spi(struct wm_softc *);
    785 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    786 /* Using with EERD */
    787 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    788 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    789 /* Flash */
    790 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    791     unsigned int *);
    792 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    793 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    794 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    795 	uint32_t *);
    796 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    797 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    798 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    799 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    800 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    801 /* iNVM */
    802 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    803 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    804 /* Lock, detecting NVM type, validate checksum and read */
    805 static int	wm_nvm_acquire(struct wm_softc *);
    806 static void	wm_nvm_release(struct wm_softc *);
    807 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    808 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    809 static int	wm_nvm_validate_checksum(struct wm_softc *);
    810 static void	wm_nvm_version_invm(struct wm_softc *);
    811 static void	wm_nvm_version(struct wm_softc *);
    812 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    813 
    814 /*
    815  * Hardware semaphores.
    816  * Very complexed...
    817  */
    818 static int	wm_get_null(struct wm_softc *);
    819 static void	wm_put_null(struct wm_softc *);
    820 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    821 static void	wm_put_swsm_semaphore(struct wm_softc *);
    822 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    823 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    824 static int	wm_get_phy_82575(struct wm_softc *);
    825 static void	wm_put_phy_82575(struct wm_softc *);
    826 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    827 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    828 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    829 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    830 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    831 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    832 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    833 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    834 
    835 /*
    836  * Management mode and power management related subroutines.
    837  * BMC, AMT, suspend/resume and EEE.
    838  */
    839 #if 0
    840 static int	wm_check_mng_mode(struct wm_softc *);
    841 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    842 static int	wm_check_mng_mode_82574(struct wm_softc *);
    843 static int	wm_check_mng_mode_generic(struct wm_softc *);
    844 #endif
    845 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    846 static bool	wm_phy_resetisblocked(struct wm_softc *);
    847 static void	wm_get_hw_control(struct wm_softc *);
    848 static void	wm_release_hw_control(struct wm_softc *);
    849 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    850 static void	wm_smbustopci(struct wm_softc *);
    851 static void	wm_init_manageability(struct wm_softc *);
    852 static void	wm_release_manageability(struct wm_softc *);
    853 static void	wm_get_wakeup(struct wm_softc *);
    854 static void	wm_ulp_disable(struct wm_softc *);
    855 static void	wm_enable_phy_wakeup(struct wm_softc *);
    856 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    857 static void	wm_enable_wakeup(struct wm_softc *);
    858 /* LPLU (Low Power Link Up) */
    859 static void	wm_lplu_d0_disable(struct wm_softc *);
    860 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    861 /* EEE */
    862 static void	wm_set_eee_i350(struct wm_softc *);
    863 
    864 /*
    865  * Workarounds (mainly PHY related).
    866  * Basically, PHY's workarounds are in the PHY drivers.
    867  */
    868 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    869 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    870 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    871 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    872 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    873 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    874 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    875 static void	wm_reset_init_script_82575(struct wm_softc *);
    876 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    877 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    878 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    879 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    880 static void	wm_pll_workaround_i210(struct wm_softc *);
    881 
    882 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    883     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    884 
    885 /*
    886  * Devices supported by this driver.
    887  */
    888 static const struct wm_product {
    889 	pci_vendor_id_t		wmp_vendor;
    890 	pci_product_id_t	wmp_product;
    891 	const char		*wmp_name;
    892 	wm_chip_type		wmp_type;
    893 	uint32_t		wmp_flags;
    894 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    895 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    896 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    897 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    898 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    899 } wm_products[] = {
    900 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    901 	  "Intel i82542 1000BASE-X Ethernet",
    902 	  WM_T_82542_2_1,	WMP_F_FIBER },
    903 
    904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    905 	  "Intel i82543GC 1000BASE-X Ethernet",
    906 	  WM_T_82543,		WMP_F_FIBER },
    907 
    908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    909 	  "Intel i82543GC 1000BASE-T Ethernet",
    910 	  WM_T_82543,		WMP_F_COPPER },
    911 
    912 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    913 	  "Intel i82544EI 1000BASE-T Ethernet",
    914 	  WM_T_82544,		WMP_F_COPPER },
    915 
    916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    917 	  "Intel i82544EI 1000BASE-X Ethernet",
    918 	  WM_T_82544,		WMP_F_FIBER },
    919 
    920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    921 	  "Intel i82544GC 1000BASE-T Ethernet",
    922 	  WM_T_82544,		WMP_F_COPPER },
    923 
    924 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    925 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    926 	  WM_T_82544,		WMP_F_COPPER },
    927 
    928 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    929 	  "Intel i82540EM 1000BASE-T Ethernet",
    930 	  WM_T_82540,		WMP_F_COPPER },
    931 
    932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    933 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    934 	  WM_T_82540,		WMP_F_COPPER },
    935 
    936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    937 	  "Intel i82540EP 1000BASE-T Ethernet",
    938 	  WM_T_82540,		WMP_F_COPPER },
    939 
    940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    941 	  "Intel i82540EP 1000BASE-T Ethernet",
    942 	  WM_T_82540,		WMP_F_COPPER },
    943 
    944 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    945 	  "Intel i82540EP 1000BASE-T Ethernet",
    946 	  WM_T_82540,		WMP_F_COPPER },
    947 
    948 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    949 	  "Intel i82545EM 1000BASE-T Ethernet",
    950 	  WM_T_82545,		WMP_F_COPPER },
    951 
    952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    953 	  "Intel i82545GM 1000BASE-T Ethernet",
    954 	  WM_T_82545_3,		WMP_F_COPPER },
    955 
    956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    957 	  "Intel i82545GM 1000BASE-X Ethernet",
    958 	  WM_T_82545_3,		WMP_F_FIBER },
    959 
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    961 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    962 	  WM_T_82545_3,		WMP_F_SERDES },
    963 
    964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    965 	  "Intel i82546EB 1000BASE-T Ethernet",
    966 	  WM_T_82546,		WMP_F_COPPER },
    967 
    968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    969 	  "Intel i82546EB 1000BASE-T Ethernet",
    970 	  WM_T_82546,		WMP_F_COPPER },
    971 
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    973 	  "Intel i82545EM 1000BASE-X Ethernet",
    974 	  WM_T_82545,		WMP_F_FIBER },
    975 
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    977 	  "Intel i82546EB 1000BASE-X Ethernet",
    978 	  WM_T_82546,		WMP_F_FIBER },
    979 
    980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    981 	  "Intel i82546GB 1000BASE-T Ethernet",
    982 	  WM_T_82546_3,		WMP_F_COPPER },
    983 
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    985 	  "Intel i82546GB 1000BASE-X Ethernet",
    986 	  WM_T_82546_3,		WMP_F_FIBER },
    987 
    988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    989 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    990 	  WM_T_82546_3,		WMP_F_SERDES },
    991 
    992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    993 	  "i82546GB quad-port Gigabit Ethernet",
    994 	  WM_T_82546_3,		WMP_F_COPPER },
    995 
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    997 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    998 	  WM_T_82546_3,		WMP_F_COPPER },
    999 
   1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1001 	  "Intel PRO/1000MT (82546GB)",
   1002 	  WM_T_82546_3,		WMP_F_COPPER },
   1003 
   1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1005 	  "Intel i82541EI 1000BASE-T Ethernet",
   1006 	  WM_T_82541,		WMP_F_COPPER },
   1007 
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1009 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1010 	  WM_T_82541,		WMP_F_COPPER },
   1011 
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1013 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1014 	  WM_T_82541,		WMP_F_COPPER },
   1015 
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1017 	  "Intel i82541ER 1000BASE-T Ethernet",
   1018 	  WM_T_82541_2,		WMP_F_COPPER },
   1019 
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1021 	  "Intel i82541GI 1000BASE-T Ethernet",
   1022 	  WM_T_82541_2,		WMP_F_COPPER },
   1023 
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1025 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1026 	  WM_T_82541_2,		WMP_F_COPPER },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1029 	  "Intel i82541PI 1000BASE-T Ethernet",
   1030 	  WM_T_82541_2,		WMP_F_COPPER },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1033 	  "Intel i82547EI 1000BASE-T Ethernet",
   1034 	  WM_T_82547,		WMP_F_COPPER },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1037 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1038 	  WM_T_82547,		WMP_F_COPPER },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1041 	  "Intel i82547GI 1000BASE-T Ethernet",
   1042 	  WM_T_82547_2,		WMP_F_COPPER },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1045 	  "Intel PRO/1000 PT (82571EB)",
   1046 	  WM_T_82571,		WMP_F_COPPER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1049 	  "Intel PRO/1000 PF (82571EB)",
   1050 	  WM_T_82571,		WMP_F_FIBER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1053 	  "Intel PRO/1000 PB (82571EB)",
   1054 	  WM_T_82571,		WMP_F_SERDES },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1057 	  "Intel PRO/1000 QT (82571EB)",
   1058 	  WM_T_82571,		WMP_F_COPPER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1061 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1062 	  WM_T_82571,		WMP_F_COPPER, },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1065 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1066 	  WM_T_82571,		WMP_F_COPPER, },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1069 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1070 	  WM_T_82571,		WMP_F_SERDES, },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1073 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1074 	  WM_T_82571,		WMP_F_SERDES, },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1077 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1078 	  WM_T_82571,		WMP_F_FIBER, },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1081 	  "Intel i82572EI 1000baseT Ethernet",
   1082 	  WM_T_82572,		WMP_F_COPPER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1085 	  "Intel i82572EI 1000baseX Ethernet",
   1086 	  WM_T_82572,		WMP_F_FIBER },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1089 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1090 	  WM_T_82572,		WMP_F_SERDES },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1093 	  "Intel i82572EI 1000baseT Ethernet",
   1094 	  WM_T_82572,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1097 	  "Intel i82573E",
   1098 	  WM_T_82573,		WMP_F_COPPER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1101 	  "Intel i82573E IAMT",
   1102 	  WM_T_82573,		WMP_F_COPPER },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1105 	  "Intel i82573L Gigabit Ethernet",
   1106 	  WM_T_82573,		WMP_F_COPPER },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1109 	  "Intel i82574L",
   1110 	  WM_T_82574,		WMP_F_COPPER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1113 	  "Intel i82574L",
   1114 	  WM_T_82574,		WMP_F_COPPER },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1117 	  "Intel i82583V",
   1118 	  WM_T_82583,		WMP_F_COPPER },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1121 	  "i80003 dual 1000baseT Ethernet",
   1122 	  WM_T_80003,		WMP_F_COPPER },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1125 	  "i80003 dual 1000baseX Ethernet",
   1126 	  WM_T_80003,		WMP_F_COPPER },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1129 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1130 	  WM_T_80003,		WMP_F_SERDES },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1133 	  "Intel i80003 1000baseT Ethernet",
   1134 	  WM_T_80003,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1137 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1138 	  WM_T_80003,		WMP_F_SERDES },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1141 	  "Intel i82801H (M_AMT) LAN Controller",
   1142 	  WM_T_ICH8,		WMP_F_COPPER },
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1144 	  "Intel i82801H (AMT) LAN Controller",
   1145 	  WM_T_ICH8,		WMP_F_COPPER },
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1147 	  "Intel i82801H LAN Controller",
   1148 	  WM_T_ICH8,		WMP_F_COPPER },
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1150 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1151 	  WM_T_ICH8,		WMP_F_COPPER },
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1153 	  "Intel i82801H (M) LAN Controller",
   1154 	  WM_T_ICH8,		WMP_F_COPPER },
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1156 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1157 	  WM_T_ICH8,		WMP_F_COPPER },
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1159 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1160 	  WM_T_ICH8,		WMP_F_COPPER },
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1162 	  "82567V-3 LAN Controller",
   1163 	  WM_T_ICH8,		WMP_F_COPPER },
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1165 	  "82801I (AMT) LAN Controller",
   1166 	  WM_T_ICH9,		WMP_F_COPPER },
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1168 	  "82801I 10/100 LAN Controller",
   1169 	  WM_T_ICH9,		WMP_F_COPPER },
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1171 	  "82801I (G) 10/100 LAN Controller",
   1172 	  WM_T_ICH9,		WMP_F_COPPER },
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1174 	  "82801I (GT) 10/100 LAN Controller",
   1175 	  WM_T_ICH9,		WMP_F_COPPER },
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1177 	  "82801I (C) LAN Controller",
   1178 	  WM_T_ICH9,		WMP_F_COPPER },
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1180 	  "82801I mobile LAN Controller",
   1181 	  WM_T_ICH9,		WMP_F_COPPER },
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1183 	  "82801I mobile (V) LAN Controller",
   1184 	  WM_T_ICH9,		WMP_F_COPPER },
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1186 	  "82801I mobile (AMT) LAN Controller",
   1187 	  WM_T_ICH9,		WMP_F_COPPER },
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1189 	  "82567LM-4 LAN Controller",
   1190 	  WM_T_ICH9,		WMP_F_COPPER },
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1192 	  "82567LM-2 LAN Controller",
   1193 	  WM_T_ICH10,		WMP_F_COPPER },
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1195 	  "82567LF-2 LAN Controller",
   1196 	  WM_T_ICH10,		WMP_F_COPPER },
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1198 	  "82567LM-3 LAN Controller",
   1199 	  WM_T_ICH10,		WMP_F_COPPER },
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1201 	  "82567LF-3 LAN Controller",
   1202 	  WM_T_ICH10,		WMP_F_COPPER },
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1204 	  "82567V-2 LAN Controller",
   1205 	  WM_T_ICH10,		WMP_F_COPPER },
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1207 	  "82567V-3? LAN Controller",
   1208 	  WM_T_ICH10,		WMP_F_COPPER },
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1210 	  "HANKSVILLE LAN Controller",
   1211 	  WM_T_ICH10,		WMP_F_COPPER },
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1213 	  "PCH LAN (82577LM) Controller",
   1214 	  WM_T_PCH,		WMP_F_COPPER },
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1216 	  "PCH LAN (82577LC) Controller",
   1217 	  WM_T_PCH,		WMP_F_COPPER },
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1219 	  "PCH LAN (82578DM) Controller",
   1220 	  WM_T_PCH,		WMP_F_COPPER },
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1222 	  "PCH LAN (82578DC) Controller",
   1223 	  WM_T_PCH,		WMP_F_COPPER },
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1225 	  "PCH2 LAN (82579LM) Controller",
   1226 	  WM_T_PCH2,		WMP_F_COPPER },
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1228 	  "PCH2 LAN (82579V) Controller",
   1229 	  WM_T_PCH2,		WMP_F_COPPER },
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1231 	  "82575EB dual-1000baseT Ethernet",
   1232 	  WM_T_82575,		WMP_F_COPPER },
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1234 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1235 	  WM_T_82575,		WMP_F_SERDES },
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1237 	  "82575GB quad-1000baseT Ethernet",
   1238 	  WM_T_82575,		WMP_F_COPPER },
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1240 	  "82575GB quad-1000baseT Ethernet (PM)",
   1241 	  WM_T_82575,		WMP_F_COPPER },
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1243 	  "82576 1000BaseT Ethernet",
   1244 	  WM_T_82576,		WMP_F_COPPER },
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1246 	  "82576 1000BaseX Ethernet",
   1247 	  WM_T_82576,		WMP_F_FIBER },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1250 	  "82576 gigabit Ethernet (SERDES)",
   1251 	  WM_T_82576,		WMP_F_SERDES },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1254 	  "82576 quad-1000BaseT Ethernet",
   1255 	  WM_T_82576,		WMP_F_COPPER },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1258 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1259 	  WM_T_82576,		WMP_F_COPPER },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1262 	  "82576 gigabit Ethernet",
   1263 	  WM_T_82576,		WMP_F_COPPER },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1266 	  "82576 gigabit Ethernet (SERDES)",
   1267 	  WM_T_82576,		WMP_F_SERDES },
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1269 	  "82576 quad-gigabit Ethernet (SERDES)",
   1270 	  WM_T_82576,		WMP_F_SERDES },
   1271 
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1273 	  "82580 1000BaseT Ethernet",
   1274 	  WM_T_82580,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1276 	  "82580 1000BaseX Ethernet",
   1277 	  WM_T_82580,		WMP_F_FIBER },
   1278 
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1280 	  "82580 1000BaseT Ethernet (SERDES)",
   1281 	  WM_T_82580,		WMP_F_SERDES },
   1282 
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1284 	  "82580 gigabit Ethernet (SGMII)",
   1285 	  WM_T_82580,		WMP_F_COPPER },
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1287 	  "82580 dual-1000BaseT Ethernet",
   1288 	  WM_T_82580,		WMP_F_COPPER },
   1289 
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1291 	  "82580 quad-1000BaseX Ethernet",
   1292 	  WM_T_82580,		WMP_F_FIBER },
   1293 
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1295 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1296 	  WM_T_82580,		WMP_F_COPPER },
   1297 
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1299 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1300 	  WM_T_82580,		WMP_F_SERDES },
   1301 
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1303 	  "DH89XXCC 1000BASE-KX Ethernet",
   1304 	  WM_T_82580,		WMP_F_SERDES },
   1305 
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1307 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1308 	  WM_T_82580,		WMP_F_SERDES },
   1309 
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1311 	  "I350 Gigabit Network Connection",
   1312 	  WM_T_I350,		WMP_F_COPPER },
   1313 
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1315 	  "I350 Gigabit Fiber Network Connection",
   1316 	  WM_T_I350,		WMP_F_FIBER },
   1317 
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1319 	  "I350 Gigabit Backplane Connection",
   1320 	  WM_T_I350,		WMP_F_SERDES },
   1321 
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1323 	  "I350 Quad Port Gigabit Ethernet",
   1324 	  WM_T_I350,		WMP_F_SERDES },
   1325 
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1327 	  "I350 Gigabit Connection",
   1328 	  WM_T_I350,		WMP_F_COPPER },
   1329 
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1331 	  "I354 Gigabit Ethernet (KX)",
   1332 	  WM_T_I354,		WMP_F_SERDES },
   1333 
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1335 	  "I354 Gigabit Ethernet (SGMII)",
   1336 	  WM_T_I354,		WMP_F_COPPER },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1339 	  "I354 Gigabit Ethernet (2.5G)",
   1340 	  WM_T_I354,		WMP_F_COPPER },
   1341 
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1343 	  "I210-T1 Ethernet Server Adapter",
   1344 	  WM_T_I210,		WMP_F_COPPER },
   1345 
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1347 	  "I210 Ethernet (Copper OEM)",
   1348 	  WM_T_I210,		WMP_F_COPPER },
   1349 
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1351 	  "I210 Ethernet (Copper IT)",
   1352 	  WM_T_I210,		WMP_F_COPPER },
   1353 
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1355 	  "I210 Ethernet (FLASH less)",
   1356 	  WM_T_I210,		WMP_F_COPPER },
   1357 
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1359 	  "I210 Gigabit Ethernet (Fiber)",
   1360 	  WM_T_I210,		WMP_F_FIBER },
   1361 
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1363 	  "I210 Gigabit Ethernet (SERDES)",
   1364 	  WM_T_I210,		WMP_F_SERDES },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1367 	  "I210 Gigabit Ethernet (FLASH less)",
   1368 	  WM_T_I210,		WMP_F_SERDES },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1371 	  "I210 Gigabit Ethernet (SGMII)",
   1372 	  WM_T_I210,		WMP_F_COPPER },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1375 	  "I211 Ethernet (COPPER)",
   1376 	  WM_T_I211,		WMP_F_COPPER },
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1378 	  "I217 V Ethernet Connection",
   1379 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1381 	  "I217 LM Ethernet Connection",
   1382 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1384 	  "I218 V Ethernet Connection",
   1385 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1387 	  "I218 V Ethernet Connection",
   1388 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1390 	  "I218 V Ethernet Connection",
   1391 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1393 	  "I218 LM Ethernet Connection",
   1394 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1396 	  "I218 LM Ethernet Connection",
   1397 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1399 	  "I218 LM Ethernet Connection",
   1400 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1401 #if 0
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1403 	  "I219 V Ethernet Connection",
   1404 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1406 	  "I219 V Ethernet Connection",
   1407 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1409 	  "I219 V Ethernet Connection",
   1410 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1412 	  "I219 V Ethernet Connection",
   1413 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1415 	  "I219 LM Ethernet Connection",
   1416 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1418 	  "I219 LM Ethernet Connection",
   1419 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1421 	  "I219 LM Ethernet Connection",
   1422 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1424 	  "I219 LM Ethernet Connection",
   1425 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1427 	  "I219 LM Ethernet Connection",
   1428 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1429 #endif
   1430 	{ 0,			0,
   1431 	  NULL,
   1432 	  0,			0 },
   1433 };
   1434 
   1435 /*
   1436  * Register read/write functions.
   1437  * Other than CSR_{READ|WRITE}().
   1438  */
   1439 
   1440 #if 0 /* Not currently used */
   1441 static inline uint32_t
   1442 wm_io_read(struct wm_softc *sc, int reg)
   1443 {
   1444 
   1445 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1446 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1447 }
   1448 #endif
   1449 
   1450 static inline void
   1451 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1452 {
   1453 
   1454 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1455 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1456 }
   1457 
   1458 static inline void
   1459 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1460     uint32_t data)
   1461 {
   1462 	uint32_t regval;
   1463 	int i;
   1464 
   1465 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1466 
   1467 	CSR_WRITE(sc, reg, regval);
   1468 
   1469 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1470 		delay(5);
   1471 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1472 			break;
   1473 	}
   1474 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1475 		aprint_error("%s: WARNING:"
   1476 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1477 		    device_xname(sc->sc_dev), reg);
   1478 	}
   1479 }
   1480 
   1481 static inline void
   1482 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1483 {
   1484 	wa->wa_low = htole32(v & 0xffffffffU);
   1485 	if (sizeof(bus_addr_t) == 8)
   1486 		wa->wa_high = htole32((uint64_t) v >> 32);
   1487 	else
   1488 		wa->wa_high = 0;
   1489 }
   1490 
   1491 /*
   1492  * Descriptor sync/init functions.
   1493  */
   1494 static inline void
   1495 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1496 {
   1497 	struct wm_softc *sc = txq->txq_sc;
   1498 
   1499 	/* If it will wrap around, sync to the end of the ring. */
   1500 	if ((start + num) > WM_NTXDESC(txq)) {
   1501 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1502 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1503 		    (WM_NTXDESC(txq) - start), ops);
   1504 		num -= (WM_NTXDESC(txq) - start);
   1505 		start = 0;
   1506 	}
   1507 
   1508 	/* Now sync whatever is left. */
   1509 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1510 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1511 }
   1512 
   1513 static inline void
   1514 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1515 {
   1516 	struct wm_softc *sc = rxq->rxq_sc;
   1517 
   1518 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1519 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1520 }
   1521 
   1522 static inline void
   1523 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1524 {
   1525 	struct wm_softc *sc = rxq->rxq_sc;
   1526 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1527 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1528 	struct mbuf *m = rxs->rxs_mbuf;
   1529 
   1530 	/*
   1531 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1532 	 * so that the payload after the Ethernet header is aligned
   1533 	 * to a 4-byte boundary.
   1534 
   1535 	 * XXX BRAINDAMAGE ALERT!
   1536 	 * The stupid chip uses the same size for every buffer, which
   1537 	 * is set in the Receive Control register.  We are using the 2K
   1538 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1539 	 * reason, we can't "scoot" packets longer than the standard
   1540 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1541 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1542 	 * the upper layer copy the headers.
   1543 	 */
   1544 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1545 
   1546 	wm_set_dma_addr(&rxd->wrx_addr,
   1547 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1548 	rxd->wrx_len = 0;
   1549 	rxd->wrx_cksum = 0;
   1550 	rxd->wrx_status = 0;
   1551 	rxd->wrx_errors = 0;
   1552 	rxd->wrx_special = 0;
   1553 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1554 
   1555 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1556 }
   1557 
   1558 /*
   1559  * Device driver interface functions and commonly used functions.
   1560  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1561  */
   1562 
   1563 /* Lookup supported device table */
   1564 static const struct wm_product *
   1565 wm_lookup(const struct pci_attach_args *pa)
   1566 {
   1567 	const struct wm_product *wmp;
   1568 
   1569 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1570 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1571 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1572 			return wmp;
   1573 	}
   1574 	return NULL;
   1575 }
   1576 
   1577 /* The match function (ca_match) */
   1578 static int
   1579 wm_match(device_t parent, cfdata_t cf, void *aux)
   1580 {
   1581 	struct pci_attach_args *pa = aux;
   1582 
   1583 	if (wm_lookup(pa) != NULL)
   1584 		return 1;
   1585 
   1586 	return 0;
   1587 }
   1588 
   1589 /* The attach function (ca_attach) */
   1590 static void
   1591 wm_attach(device_t parent, device_t self, void *aux)
   1592 {
   1593 	struct wm_softc *sc = device_private(self);
   1594 	struct pci_attach_args *pa = aux;
   1595 	prop_dictionary_t dict;
   1596 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1597 	pci_chipset_tag_t pc = pa->pa_pc;
   1598 	int counts[PCI_INTR_TYPE_SIZE];
   1599 	pci_intr_type_t max_type;
   1600 	const char *eetype, *xname;
   1601 	bus_space_tag_t memt;
   1602 	bus_space_handle_t memh;
   1603 	bus_size_t memsize;
   1604 	int memh_valid;
   1605 	int i, error;
   1606 	const struct wm_product *wmp;
   1607 	prop_data_t ea;
   1608 	prop_number_t pn;
   1609 	uint8_t enaddr[ETHER_ADDR_LEN];
   1610 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1611 	pcireg_t preg, memtype;
   1612 	uint16_t eeprom_data, apme_mask;
   1613 	bool force_clear_smbi;
   1614 	uint32_t link_mode;
   1615 	uint32_t reg;
   1616 	void (*deferred_start_func)(struct ifnet *) = NULL;
   1617 
   1618 	sc->sc_dev = self;
   1619 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1620 	sc->sc_core_stopping = false;
   1621 
   1622 	wmp = wm_lookup(pa);
   1623 #ifdef DIAGNOSTIC
   1624 	if (wmp == NULL) {
   1625 		printf("\n");
   1626 		panic("wm_attach: impossible");
   1627 	}
   1628 #endif
   1629 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1630 
   1631 	sc->sc_pc = pa->pa_pc;
   1632 	sc->sc_pcitag = pa->pa_tag;
   1633 
   1634 	if (pci_dma64_available(pa))
   1635 		sc->sc_dmat = pa->pa_dmat64;
   1636 	else
   1637 		sc->sc_dmat = pa->pa_dmat;
   1638 
   1639 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1640 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1641 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1642 
   1643 	sc->sc_type = wmp->wmp_type;
   1644 
   1645 	/* Set default function pointers */
   1646 	sc->phy.acquire = wm_get_null;
   1647 	sc->phy.release = wm_put_null;
   1648 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1649 
   1650 	if (sc->sc_type < WM_T_82543) {
   1651 		if (sc->sc_rev < 2) {
   1652 			aprint_error_dev(sc->sc_dev,
   1653 			    "i82542 must be at least rev. 2\n");
   1654 			return;
   1655 		}
   1656 		if (sc->sc_rev < 3)
   1657 			sc->sc_type = WM_T_82542_2_0;
   1658 	}
   1659 
   1660 	/*
   1661 	 * Disable MSI for Errata:
   1662 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1663 	 *
   1664 	 *  82544: Errata 25
   1665 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1666 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1667 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1668 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1669 	 *
   1670 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1671 	 *
   1672 	 *  82571 & 82572: Errata 63
   1673 	 */
   1674 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1675 	    || (sc->sc_type == WM_T_82572))
   1676 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1677 
   1678 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1679 	    || (sc->sc_type == WM_T_82580)
   1680 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1681 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1682 		sc->sc_flags |= WM_F_NEWQUEUE;
   1683 
   1684 	/* Set device properties (mactype) */
   1685 	dict = device_properties(sc->sc_dev);
   1686 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1687 
   1688 	/*
   1689 	 * Map the device.  All devices support memory-mapped acccess,
   1690 	 * and it is really required for normal operation.
   1691 	 */
   1692 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1693 	switch (memtype) {
   1694 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1695 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1696 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1697 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1698 		break;
   1699 	default:
   1700 		memh_valid = 0;
   1701 		break;
   1702 	}
   1703 
   1704 	if (memh_valid) {
   1705 		sc->sc_st = memt;
   1706 		sc->sc_sh = memh;
   1707 		sc->sc_ss = memsize;
   1708 	} else {
   1709 		aprint_error_dev(sc->sc_dev,
   1710 		    "unable to map device registers\n");
   1711 		return;
   1712 	}
   1713 
   1714 	/*
   1715 	 * In addition, i82544 and later support I/O mapped indirect
   1716 	 * register access.  It is not desirable (nor supported in
   1717 	 * this driver) to use it for normal operation, though it is
   1718 	 * required to work around bugs in some chip versions.
   1719 	 */
   1720 	if (sc->sc_type >= WM_T_82544) {
   1721 		/* First we have to find the I/O BAR. */
   1722 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1723 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1724 			if (memtype == PCI_MAPREG_TYPE_IO)
   1725 				break;
   1726 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1727 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1728 				i += 4;	/* skip high bits, too */
   1729 		}
   1730 		if (i < PCI_MAPREG_END) {
   1731 			/*
   1732 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1733 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1734 			 * It's no problem because newer chips has no this
   1735 			 * bug.
   1736 			 *
   1737 			 * The i8254x doesn't apparently respond when the
   1738 			 * I/O BAR is 0, which looks somewhat like it's not
   1739 			 * been configured.
   1740 			 */
   1741 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1742 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1743 				aprint_error_dev(sc->sc_dev,
   1744 				    "WARNING: I/O BAR at zero.\n");
   1745 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1746 					0, &sc->sc_iot, &sc->sc_ioh,
   1747 					NULL, &sc->sc_ios) == 0) {
   1748 				sc->sc_flags |= WM_F_IOH_VALID;
   1749 			} else {
   1750 				aprint_error_dev(sc->sc_dev,
   1751 				    "WARNING: unable to map I/O space\n");
   1752 			}
   1753 		}
   1754 
   1755 	}
   1756 
   1757 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1758 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1759 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1760 	if (sc->sc_type < WM_T_82542_2_1)
   1761 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1762 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1763 
   1764 	/* power up chip */
   1765 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1766 	    NULL)) && error != EOPNOTSUPP) {
   1767 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1768 		return;
   1769 	}
   1770 
   1771 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1772 
   1773 	/* Allocation settings */
   1774 	max_type = PCI_INTR_TYPE_MSIX;
   1775 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1776 	counts[PCI_INTR_TYPE_MSI] = 1;
   1777 	counts[PCI_INTR_TYPE_INTX] = 1;
   1778 
   1779 alloc_retry:
   1780 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1781 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1782 		return;
   1783 	}
   1784 
   1785 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1786 		error = wm_setup_msix(sc);
   1787 		if (error) {
   1788 			pci_intr_release(pc, sc->sc_intrs,
   1789 			    counts[PCI_INTR_TYPE_MSIX]);
   1790 
   1791 			/* Setup for MSI: Disable MSI-X */
   1792 			max_type = PCI_INTR_TYPE_MSI;
   1793 			counts[PCI_INTR_TYPE_MSI] = 1;
   1794 			counts[PCI_INTR_TYPE_INTX] = 1;
   1795 			goto alloc_retry;
   1796 		}
   1797 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1798 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1799 		error = wm_setup_legacy(sc);
   1800 		if (error) {
   1801 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1802 			    counts[PCI_INTR_TYPE_MSI]);
   1803 
   1804 			/* The next try is for INTx: Disable MSI */
   1805 			max_type = PCI_INTR_TYPE_INTX;
   1806 			counts[PCI_INTR_TYPE_INTX] = 1;
   1807 			goto alloc_retry;
   1808 		}
   1809 	} else {
   1810 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1811 		error = wm_setup_legacy(sc);
   1812 		if (error) {
   1813 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1814 			    counts[PCI_INTR_TYPE_INTX]);
   1815 			return;
   1816 		}
   1817 	}
   1818 
   1819 	/*
   1820 	 * Check the function ID (unit number of the chip).
   1821 	 */
   1822 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1823 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1824 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1825 	    || (sc->sc_type == WM_T_82580)
   1826 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1827 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1828 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1829 	else
   1830 		sc->sc_funcid = 0;
   1831 
   1832 	/*
   1833 	 * Determine a few things about the bus we're connected to.
   1834 	 */
   1835 	if (sc->sc_type < WM_T_82543) {
   1836 		/* We don't really know the bus characteristics here. */
   1837 		sc->sc_bus_speed = 33;
   1838 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1839 		/*
   1840 		 * CSA (Communication Streaming Architecture) is about as fast
   1841 		 * a 32-bit 66MHz PCI Bus.
   1842 		 */
   1843 		sc->sc_flags |= WM_F_CSA;
   1844 		sc->sc_bus_speed = 66;
   1845 		aprint_verbose_dev(sc->sc_dev,
   1846 		    "Communication Streaming Architecture\n");
   1847 		if (sc->sc_type == WM_T_82547) {
   1848 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1849 			callout_setfunc(&sc->sc_txfifo_ch,
   1850 					wm_82547_txfifo_stall, sc);
   1851 			aprint_verbose_dev(sc->sc_dev,
   1852 			    "using 82547 Tx FIFO stall work-around\n");
   1853 		}
   1854 	} else if (sc->sc_type >= WM_T_82571) {
   1855 		sc->sc_flags |= WM_F_PCIE;
   1856 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1857 		    && (sc->sc_type != WM_T_ICH10)
   1858 		    && (sc->sc_type != WM_T_PCH)
   1859 		    && (sc->sc_type != WM_T_PCH2)
   1860 		    && (sc->sc_type != WM_T_PCH_LPT)
   1861 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1862 			/* ICH* and PCH* have no PCIe capability registers */
   1863 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1864 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1865 				NULL) == 0)
   1866 				aprint_error_dev(sc->sc_dev,
   1867 				    "unable to find PCIe capability\n");
   1868 		}
   1869 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1870 	} else {
   1871 		reg = CSR_READ(sc, WMREG_STATUS);
   1872 		if (reg & STATUS_BUS64)
   1873 			sc->sc_flags |= WM_F_BUS64;
   1874 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1875 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1876 
   1877 			sc->sc_flags |= WM_F_PCIX;
   1878 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1879 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1880 				aprint_error_dev(sc->sc_dev,
   1881 				    "unable to find PCIX capability\n");
   1882 			else if (sc->sc_type != WM_T_82545_3 &&
   1883 				 sc->sc_type != WM_T_82546_3) {
   1884 				/*
   1885 				 * Work around a problem caused by the BIOS
   1886 				 * setting the max memory read byte count
   1887 				 * incorrectly.
   1888 				 */
   1889 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1890 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1891 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1892 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1893 
   1894 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1895 				    PCIX_CMD_BYTECNT_SHIFT;
   1896 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1897 				    PCIX_STATUS_MAXB_SHIFT;
   1898 				if (bytecnt > maxb) {
   1899 					aprint_verbose_dev(sc->sc_dev,
   1900 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1901 					    512 << bytecnt, 512 << maxb);
   1902 					pcix_cmd = (pcix_cmd &
   1903 					    ~PCIX_CMD_BYTECNT_MASK) |
   1904 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1905 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1906 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1907 					    pcix_cmd);
   1908 				}
   1909 			}
   1910 		}
   1911 		/*
   1912 		 * The quad port adapter is special; it has a PCIX-PCIX
   1913 		 * bridge on the board, and can run the secondary bus at
   1914 		 * a higher speed.
   1915 		 */
   1916 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1917 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1918 								      : 66;
   1919 		} else if (sc->sc_flags & WM_F_PCIX) {
   1920 			switch (reg & STATUS_PCIXSPD_MASK) {
   1921 			case STATUS_PCIXSPD_50_66:
   1922 				sc->sc_bus_speed = 66;
   1923 				break;
   1924 			case STATUS_PCIXSPD_66_100:
   1925 				sc->sc_bus_speed = 100;
   1926 				break;
   1927 			case STATUS_PCIXSPD_100_133:
   1928 				sc->sc_bus_speed = 133;
   1929 				break;
   1930 			default:
   1931 				aprint_error_dev(sc->sc_dev,
   1932 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1933 				    reg & STATUS_PCIXSPD_MASK);
   1934 				sc->sc_bus_speed = 66;
   1935 				break;
   1936 			}
   1937 		} else
   1938 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1939 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1940 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1941 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1942 	}
   1943 
   1944 	/* clear interesting stat counters */
   1945 	CSR_READ(sc, WMREG_COLC);
   1946 	CSR_READ(sc, WMREG_RXERRC);
   1947 
   1948 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   1949 	    || (sc->sc_type >= WM_T_ICH8))
   1950 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1951 	if (sc->sc_type >= WM_T_ICH8)
   1952 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1953 
   1954 	/* Set PHY, NVM mutex related stuff */
   1955 	switch (sc->sc_type) {
   1956 	case WM_T_82542_2_0:
   1957 	case WM_T_82542_2_1:
   1958 	case WM_T_82543:
   1959 	case WM_T_82544:
   1960 		/* Microwire */
   1961 		sc->sc_nvm_wordsize = 64;
   1962 		sc->sc_nvm_addrbits = 6;
   1963 		break;
   1964 	case WM_T_82540:
   1965 	case WM_T_82545:
   1966 	case WM_T_82545_3:
   1967 	case WM_T_82546:
   1968 	case WM_T_82546_3:
   1969 		/* Microwire */
   1970 		reg = CSR_READ(sc, WMREG_EECD);
   1971 		if (reg & EECD_EE_SIZE) {
   1972 			sc->sc_nvm_wordsize = 256;
   1973 			sc->sc_nvm_addrbits = 8;
   1974 		} else {
   1975 			sc->sc_nvm_wordsize = 64;
   1976 			sc->sc_nvm_addrbits = 6;
   1977 		}
   1978 		sc->sc_flags |= WM_F_LOCK_EECD;
   1979 		break;
   1980 	case WM_T_82541:
   1981 	case WM_T_82541_2:
   1982 	case WM_T_82547:
   1983 	case WM_T_82547_2:
   1984 		sc->sc_flags |= WM_F_LOCK_EECD;
   1985 		reg = CSR_READ(sc, WMREG_EECD);
   1986 		if (reg & EECD_EE_TYPE) {
   1987 			/* SPI */
   1988 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1989 			wm_nvm_set_addrbits_size_eecd(sc);
   1990 		} else {
   1991 			/* Microwire */
   1992 			if ((reg & EECD_EE_ABITS) != 0) {
   1993 				sc->sc_nvm_wordsize = 256;
   1994 				sc->sc_nvm_addrbits = 8;
   1995 			} else {
   1996 				sc->sc_nvm_wordsize = 64;
   1997 				sc->sc_nvm_addrbits = 6;
   1998 			}
   1999 		}
   2000 		break;
   2001 	case WM_T_82571:
   2002 	case WM_T_82572:
   2003 		/* SPI */
   2004 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2005 		wm_nvm_set_addrbits_size_eecd(sc);
   2006 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2007 		sc->phy.acquire = wm_get_swsm_semaphore;
   2008 		sc->phy.release = wm_put_swsm_semaphore;
   2009 		break;
   2010 	case WM_T_82573:
   2011 	case WM_T_82574:
   2012 	case WM_T_82583:
   2013 		if (sc->sc_type == WM_T_82573) {
   2014 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2015 			sc->phy.acquire = wm_get_swsm_semaphore;
   2016 			sc->phy.release = wm_put_swsm_semaphore;
   2017 		} else {
   2018 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2019 			/* Both PHY and NVM use the same semaphore. */
   2020 			sc->phy.acquire
   2021 			    = wm_get_swfwhw_semaphore;
   2022 			sc->phy.release
   2023 			    = wm_put_swfwhw_semaphore;
   2024 		}
   2025 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2026 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2027 			sc->sc_nvm_wordsize = 2048;
   2028 		} else {
   2029 			/* SPI */
   2030 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2031 			wm_nvm_set_addrbits_size_eecd(sc);
   2032 		}
   2033 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2034 		break;
   2035 	case WM_T_82575:
   2036 	case WM_T_82576:
   2037 	case WM_T_82580:
   2038 	case WM_T_I350:
   2039 	case WM_T_I354:
   2040 	case WM_T_80003:
   2041 		/* SPI */
   2042 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2043 		wm_nvm_set_addrbits_size_eecd(sc);
   2044 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2045 		    | WM_F_LOCK_SWSM;
   2046 		sc->phy.acquire = wm_get_phy_82575;
   2047 		sc->phy.release = wm_put_phy_82575;
   2048 		break;
   2049 	case WM_T_ICH8:
   2050 	case WM_T_ICH9:
   2051 	case WM_T_ICH10:
   2052 	case WM_T_PCH:
   2053 	case WM_T_PCH2:
   2054 	case WM_T_PCH_LPT:
   2055 		/* FLASH */
   2056 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2057 		sc->sc_nvm_wordsize = 2048;
   2058 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2059 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2060 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2061 			aprint_error_dev(sc->sc_dev,
   2062 			    "can't map FLASH registers\n");
   2063 			goto out;
   2064 		}
   2065 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2066 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2067 		    ICH_FLASH_SECTOR_SIZE;
   2068 		sc->sc_ich8_flash_bank_size =
   2069 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2070 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2071 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2072 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2073 		sc->sc_flashreg_offset = 0;
   2074 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2075 		sc->phy.release = wm_put_swflag_ich8lan;
   2076 		break;
   2077 	case WM_T_PCH_SPT:
   2078 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2079 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2080 		sc->sc_flasht = sc->sc_st;
   2081 		sc->sc_flashh = sc->sc_sh;
   2082 		sc->sc_ich8_flash_base = 0;
   2083 		sc->sc_nvm_wordsize =
   2084 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2085 			* NVM_SIZE_MULTIPLIER;
   2086 		/* It is size in bytes, we want words */
   2087 		sc->sc_nvm_wordsize /= 2;
   2088 		/* assume 2 banks */
   2089 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2090 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2091 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2092 		sc->phy.release = wm_put_swflag_ich8lan;
   2093 		break;
   2094 	case WM_T_I210:
   2095 	case WM_T_I211:
   2096 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2097 			wm_nvm_set_addrbits_size_eecd(sc);
   2098 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2099 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2100 		} else {
   2101 			sc->sc_nvm_wordsize = INVM_SIZE;
   2102 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2103 		}
   2104 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2105 		sc->phy.acquire = wm_get_phy_82575;
   2106 		sc->phy.release = wm_put_phy_82575;
   2107 		break;
   2108 	default:
   2109 		break;
   2110 	}
   2111 
   2112 	/* Reset the chip to a known state. */
   2113 	wm_reset(sc);
   2114 
   2115 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2116 	switch (sc->sc_type) {
   2117 	case WM_T_82571:
   2118 	case WM_T_82572:
   2119 		reg = CSR_READ(sc, WMREG_SWSM2);
   2120 		if ((reg & SWSM2_LOCK) == 0) {
   2121 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2122 			force_clear_smbi = true;
   2123 		} else
   2124 			force_clear_smbi = false;
   2125 		break;
   2126 	case WM_T_82573:
   2127 	case WM_T_82574:
   2128 	case WM_T_82583:
   2129 		force_clear_smbi = true;
   2130 		break;
   2131 	default:
   2132 		force_clear_smbi = false;
   2133 		break;
   2134 	}
   2135 	if (force_clear_smbi) {
   2136 		reg = CSR_READ(sc, WMREG_SWSM);
   2137 		if ((reg & SWSM_SMBI) != 0)
   2138 			aprint_error_dev(sc->sc_dev,
   2139 			    "Please update the Bootagent\n");
   2140 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2141 	}
   2142 
   2143 	/*
   2144 	 * Defer printing the EEPROM type until after verifying the checksum
   2145 	 * This allows the EEPROM type to be printed correctly in the case
   2146 	 * that no EEPROM is attached.
   2147 	 */
   2148 	/*
   2149 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2150 	 * this for later, so we can fail future reads from the EEPROM.
   2151 	 */
   2152 	if (wm_nvm_validate_checksum(sc)) {
   2153 		/*
   2154 		 * Read twice again because some PCI-e parts fail the
   2155 		 * first check due to the link being in sleep state.
   2156 		 */
   2157 		if (wm_nvm_validate_checksum(sc))
   2158 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2159 	}
   2160 
   2161 	/* Set device properties (macflags) */
   2162 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2163 
   2164 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2165 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2166 	else {
   2167 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2168 		    sc->sc_nvm_wordsize);
   2169 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2170 			aprint_verbose("iNVM");
   2171 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2172 			aprint_verbose("FLASH(HW)");
   2173 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2174 			aprint_verbose("FLASH");
   2175 		else {
   2176 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2177 				eetype = "SPI";
   2178 			else
   2179 				eetype = "MicroWire";
   2180 			aprint_verbose("(%d address bits) %s EEPROM",
   2181 			    sc->sc_nvm_addrbits, eetype);
   2182 		}
   2183 	}
   2184 	wm_nvm_version(sc);
   2185 	aprint_verbose("\n");
   2186 
   2187 	/* Check for I21[01] PLL workaround */
   2188 	if (sc->sc_type == WM_T_I210)
   2189 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2190 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2191 		/* NVM image release 3.25 has a workaround */
   2192 		if ((sc->sc_nvm_ver_major < 3)
   2193 		    || ((sc->sc_nvm_ver_major == 3)
   2194 			&& (sc->sc_nvm_ver_minor < 25))) {
   2195 			aprint_verbose_dev(sc->sc_dev,
   2196 			    "ROM image version %d.%d is older than 3.25\n",
   2197 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2198 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2199 		}
   2200 	}
   2201 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2202 		wm_pll_workaround_i210(sc);
   2203 
   2204 	wm_get_wakeup(sc);
   2205 
   2206 	/* Non-AMT based hardware can now take control from firmware */
   2207 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2208 		wm_get_hw_control(sc);
   2209 
   2210 	/*
   2211 	 * Read the Ethernet address from the EEPROM, if not first found
   2212 	 * in device properties.
   2213 	 */
   2214 	ea = prop_dictionary_get(dict, "mac-address");
   2215 	if (ea != NULL) {
   2216 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2217 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2218 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2219 	} else {
   2220 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2221 			aprint_error_dev(sc->sc_dev,
   2222 			    "unable to read Ethernet address\n");
   2223 			goto out;
   2224 		}
   2225 	}
   2226 
   2227 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2228 	    ether_sprintf(enaddr));
   2229 
   2230 	/*
   2231 	 * Read the config info from the EEPROM, and set up various
   2232 	 * bits in the control registers based on their contents.
   2233 	 */
   2234 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2235 	if (pn != NULL) {
   2236 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2237 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2238 	} else {
   2239 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2240 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2241 			goto out;
   2242 		}
   2243 	}
   2244 
   2245 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2246 	if (pn != NULL) {
   2247 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2248 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2249 	} else {
   2250 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2251 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2252 			goto out;
   2253 		}
   2254 	}
   2255 
   2256 	/* check for WM_F_WOL */
   2257 	switch (sc->sc_type) {
   2258 	case WM_T_82542_2_0:
   2259 	case WM_T_82542_2_1:
   2260 	case WM_T_82543:
   2261 		/* dummy? */
   2262 		eeprom_data = 0;
   2263 		apme_mask = NVM_CFG3_APME;
   2264 		break;
   2265 	case WM_T_82544:
   2266 		apme_mask = NVM_CFG2_82544_APM_EN;
   2267 		eeprom_data = cfg2;
   2268 		break;
   2269 	case WM_T_82546:
   2270 	case WM_T_82546_3:
   2271 	case WM_T_82571:
   2272 	case WM_T_82572:
   2273 	case WM_T_82573:
   2274 	case WM_T_82574:
   2275 	case WM_T_82583:
   2276 	case WM_T_80003:
   2277 	default:
   2278 		apme_mask = NVM_CFG3_APME;
   2279 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2280 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2281 		break;
   2282 	case WM_T_82575:
   2283 	case WM_T_82576:
   2284 	case WM_T_82580:
   2285 	case WM_T_I350:
   2286 	case WM_T_I354: /* XXX ok? */
   2287 	case WM_T_ICH8:
   2288 	case WM_T_ICH9:
   2289 	case WM_T_ICH10:
   2290 	case WM_T_PCH:
   2291 	case WM_T_PCH2:
   2292 	case WM_T_PCH_LPT:
   2293 	case WM_T_PCH_SPT:
   2294 		/* XXX The funcid should be checked on some devices */
   2295 		apme_mask = WUC_APME;
   2296 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2297 		break;
   2298 	}
   2299 
   2300 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2301 	if ((eeprom_data & apme_mask) != 0)
   2302 		sc->sc_flags |= WM_F_WOL;
   2303 #ifdef WM_DEBUG
   2304 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2305 		printf("WOL\n");
   2306 #endif
   2307 
   2308 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2309 		/* Check NVM for autonegotiation */
   2310 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2311 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2312 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2313 		}
   2314 	}
   2315 
   2316 	/*
   2317 	 * XXX need special handling for some multiple port cards
   2318 	 * to disable a paticular port.
   2319 	 */
   2320 
   2321 	if (sc->sc_type >= WM_T_82544) {
   2322 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2323 		if (pn != NULL) {
   2324 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2325 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2326 		} else {
   2327 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2328 				aprint_error_dev(sc->sc_dev,
   2329 				    "unable to read SWDPIN\n");
   2330 				goto out;
   2331 			}
   2332 		}
   2333 	}
   2334 
   2335 	if (cfg1 & NVM_CFG1_ILOS)
   2336 		sc->sc_ctrl |= CTRL_ILOS;
   2337 
   2338 	/*
   2339 	 * XXX
   2340 	 * This code isn't correct because pin 2 and 3 are located
   2341 	 * in different position on newer chips. Check all datasheet.
   2342 	 *
   2343 	 * Until resolve this problem, check if a chip < 82580
   2344 	 */
   2345 	if (sc->sc_type <= WM_T_82580) {
   2346 		if (sc->sc_type >= WM_T_82544) {
   2347 			sc->sc_ctrl |=
   2348 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2349 			    CTRL_SWDPIO_SHIFT;
   2350 			sc->sc_ctrl |=
   2351 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2352 			    CTRL_SWDPINS_SHIFT;
   2353 		} else {
   2354 			sc->sc_ctrl |=
   2355 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2356 			    CTRL_SWDPIO_SHIFT;
   2357 		}
   2358 	}
   2359 
   2360 	/* XXX For other than 82580? */
   2361 	if (sc->sc_type == WM_T_82580) {
   2362 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2363 		if (nvmword & __BIT(13))
   2364 			sc->sc_ctrl |= CTRL_ILOS;
   2365 	}
   2366 
   2367 #if 0
   2368 	if (sc->sc_type >= WM_T_82544) {
   2369 		if (cfg1 & NVM_CFG1_IPS0)
   2370 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2371 		if (cfg1 & NVM_CFG1_IPS1)
   2372 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2373 		sc->sc_ctrl_ext |=
   2374 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2375 		    CTRL_EXT_SWDPIO_SHIFT;
   2376 		sc->sc_ctrl_ext |=
   2377 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2378 		    CTRL_EXT_SWDPINS_SHIFT;
   2379 	} else {
   2380 		sc->sc_ctrl_ext |=
   2381 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2382 		    CTRL_EXT_SWDPIO_SHIFT;
   2383 	}
   2384 #endif
   2385 
   2386 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2387 #if 0
   2388 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2389 #endif
   2390 
   2391 	if (sc->sc_type == WM_T_PCH) {
   2392 		uint16_t val;
   2393 
   2394 		/* Save the NVM K1 bit setting */
   2395 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2396 
   2397 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2398 			sc->sc_nvm_k1_enabled = 1;
   2399 		else
   2400 			sc->sc_nvm_k1_enabled = 0;
   2401 	}
   2402 
   2403 	/*
   2404 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2405 	 * media structures accordingly.
   2406 	 */
   2407 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2408 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2409 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2410 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2411 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2412 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2413 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2414 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2415 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2416 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2417 	    || (sc->sc_type ==WM_T_I211)) {
   2418 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2419 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2420 		switch (link_mode) {
   2421 		case CTRL_EXT_LINK_MODE_1000KX:
   2422 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2423 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2424 			break;
   2425 		case CTRL_EXT_LINK_MODE_SGMII:
   2426 			if (wm_sgmii_uses_mdio(sc)) {
   2427 				aprint_verbose_dev(sc->sc_dev,
   2428 				    "SGMII(MDIO)\n");
   2429 				sc->sc_flags |= WM_F_SGMII;
   2430 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2431 				break;
   2432 			}
   2433 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2434 			/*FALLTHROUGH*/
   2435 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2436 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2437 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2438 				if (link_mode
   2439 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2440 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2441 					sc->sc_flags |= WM_F_SGMII;
   2442 				} else {
   2443 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2444 					aprint_verbose_dev(sc->sc_dev,
   2445 					    "SERDES\n");
   2446 				}
   2447 				break;
   2448 			}
   2449 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2450 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2451 
   2452 			/* Change current link mode setting */
   2453 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2454 			switch (sc->sc_mediatype) {
   2455 			case WM_MEDIATYPE_COPPER:
   2456 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2457 				break;
   2458 			case WM_MEDIATYPE_SERDES:
   2459 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2460 				break;
   2461 			default:
   2462 				break;
   2463 			}
   2464 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2465 			break;
   2466 		case CTRL_EXT_LINK_MODE_GMII:
   2467 		default:
   2468 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2469 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2470 			break;
   2471 		}
   2472 
   2473 		reg &= ~CTRL_EXT_I2C_ENA;
   2474 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2475 			reg |= CTRL_EXT_I2C_ENA;
   2476 		else
   2477 			reg &= ~CTRL_EXT_I2C_ENA;
   2478 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2479 
   2480 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2481 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2482 		else
   2483 			wm_tbi_mediainit(sc);
   2484 	} else if (sc->sc_type < WM_T_82543 ||
   2485 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2486 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2487 			aprint_error_dev(sc->sc_dev,
   2488 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2489 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2490 		}
   2491 		wm_tbi_mediainit(sc);
   2492 	} else {
   2493 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2494 			aprint_error_dev(sc->sc_dev,
   2495 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2496 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2497 		}
   2498 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2499 	}
   2500 
   2501 	ifp = &sc->sc_ethercom.ec_if;
   2502 	xname = device_xname(sc->sc_dev);
   2503 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2504 	ifp->if_softc = sc;
   2505 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2506 	ifp->if_extflags = IFEF_START_MPSAFE;
   2507 	ifp->if_ioctl = wm_ioctl;
   2508 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2509 		ifp->if_start = wm_nq_start;
   2510 		if (sc->sc_nqueues > 1) {
   2511 			ifp->if_transmit = wm_nq_transmit;
   2512 			deferred_start_func = wm_deferred_start;
   2513 		}
   2514 	} else {
   2515 		ifp->if_start = wm_start;
   2516 		if (sc->sc_nqueues > 1) {
   2517 			ifp->if_transmit = wm_transmit;
   2518 			deferred_start_func = wm_deferred_start;
   2519 		}
   2520 	}
   2521 	ifp->if_watchdog = wm_watchdog;
   2522 	ifp->if_init = wm_init;
   2523 	ifp->if_stop = wm_stop;
   2524 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2525 	IFQ_SET_READY(&ifp->if_snd);
   2526 
   2527 	/* Check for jumbo frame */
   2528 	switch (sc->sc_type) {
   2529 	case WM_T_82573:
   2530 		/* XXX limited to 9234 if ASPM is disabled */
   2531 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2532 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2533 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2534 		break;
   2535 	case WM_T_82571:
   2536 	case WM_T_82572:
   2537 	case WM_T_82574:
   2538 	case WM_T_82575:
   2539 	case WM_T_82576:
   2540 	case WM_T_82580:
   2541 	case WM_T_I350:
   2542 	case WM_T_I354: /* XXXX ok? */
   2543 	case WM_T_I210:
   2544 	case WM_T_I211:
   2545 	case WM_T_80003:
   2546 	case WM_T_ICH9:
   2547 	case WM_T_ICH10:
   2548 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2549 	case WM_T_PCH_LPT:
   2550 	case WM_T_PCH_SPT:
   2551 		/* XXX limited to 9234 */
   2552 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2553 		break;
   2554 	case WM_T_PCH:
   2555 		/* XXX limited to 4096 */
   2556 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2557 		break;
   2558 	case WM_T_82542_2_0:
   2559 	case WM_T_82542_2_1:
   2560 	case WM_T_82583:
   2561 	case WM_T_ICH8:
   2562 		/* No support for jumbo frame */
   2563 		break;
   2564 	default:
   2565 		/* ETHER_MAX_LEN_JUMBO */
   2566 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2567 		break;
   2568 	}
   2569 
   2570 	/* If we're a i82543 or greater, we can support VLANs. */
   2571 	if (sc->sc_type >= WM_T_82543)
   2572 		sc->sc_ethercom.ec_capabilities |=
   2573 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2574 
   2575 	/*
   2576 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2577 	 * on i82543 and later.
   2578 	 */
   2579 	if (sc->sc_type >= WM_T_82543) {
   2580 		ifp->if_capabilities |=
   2581 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2582 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2583 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2584 		    IFCAP_CSUM_TCPv6_Tx |
   2585 		    IFCAP_CSUM_UDPv6_Tx;
   2586 	}
   2587 
   2588 	/*
   2589 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2590 	 *
   2591 	 *	82541GI (8086:1076) ... no
   2592 	 *	82572EI (8086:10b9) ... yes
   2593 	 */
   2594 	if (sc->sc_type >= WM_T_82571) {
   2595 		ifp->if_capabilities |=
   2596 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2597 	}
   2598 
   2599 	/*
   2600 	 * If we're a i82544 or greater (except i82547), we can do
   2601 	 * TCP segmentation offload.
   2602 	 */
   2603 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2604 		ifp->if_capabilities |= IFCAP_TSOv4;
   2605 	}
   2606 
   2607 	if (sc->sc_type >= WM_T_82571) {
   2608 		ifp->if_capabilities |= IFCAP_TSOv6;
   2609 	}
   2610 
   2611 #ifdef WM_MPSAFE
   2612 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2613 #else
   2614 	sc->sc_core_lock = NULL;
   2615 #endif
   2616 
   2617 	/* Attach the interface. */
   2618 	if_initialize(ifp);
   2619 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2620 	if_deferred_start_init(ifp, deferred_start_func);
   2621 	ether_ifattach(ifp, enaddr);
   2622 	if_register(ifp);
   2623 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2624 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2625 			  RND_FLAG_DEFAULT);
   2626 
   2627 #ifdef WM_EVENT_COUNTERS
   2628 	/* Attach event counters. */
   2629 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2630 	    NULL, xname, "linkintr");
   2631 
   2632 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2633 	    NULL, xname, "tx_xoff");
   2634 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2635 	    NULL, xname, "tx_xon");
   2636 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2637 	    NULL, xname, "rx_xoff");
   2638 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2639 	    NULL, xname, "rx_xon");
   2640 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2641 	    NULL, xname, "rx_macctl");
   2642 #endif /* WM_EVENT_COUNTERS */
   2643 
   2644 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2645 		pmf_class_network_register(self, ifp);
   2646 	else
   2647 		aprint_error_dev(self, "couldn't establish power handler\n");
   2648 
   2649 	sc->sc_flags |= WM_F_ATTACHED;
   2650  out:
   2651 	return;
   2652 }
   2653 
   2654 /* The detach function (ca_detach) */
   2655 static int
   2656 wm_detach(device_t self, int flags __unused)
   2657 {
   2658 	struct wm_softc *sc = device_private(self);
   2659 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2660 	int i;
   2661 
   2662 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2663 		return 0;
   2664 
   2665 	/* Stop the interface. Callouts are stopped in it. */
   2666 	wm_stop(ifp, 1);
   2667 
   2668 	pmf_device_deregister(self);
   2669 
   2670 	/* Tell the firmware about the release */
   2671 	WM_CORE_LOCK(sc);
   2672 	wm_release_manageability(sc);
   2673 	wm_release_hw_control(sc);
   2674 	wm_enable_wakeup(sc);
   2675 	WM_CORE_UNLOCK(sc);
   2676 
   2677 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2678 
   2679 	/* Delete all remaining media. */
   2680 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2681 
   2682 	ether_ifdetach(ifp);
   2683 	if_detach(ifp);
   2684 	if_percpuq_destroy(sc->sc_ipq);
   2685 
   2686 	/* Unload RX dmamaps and free mbufs */
   2687 	for (i = 0; i < sc->sc_nqueues; i++) {
   2688 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2689 		mutex_enter(rxq->rxq_lock);
   2690 		wm_rxdrain(rxq);
   2691 		mutex_exit(rxq->rxq_lock);
   2692 	}
   2693 	/* Must unlock here */
   2694 
   2695 	/* Disestablish the interrupt handler */
   2696 	for (i = 0; i < sc->sc_nintrs; i++) {
   2697 		if (sc->sc_ihs[i] != NULL) {
   2698 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2699 			sc->sc_ihs[i] = NULL;
   2700 		}
   2701 	}
   2702 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2703 
   2704 	wm_free_txrx_queues(sc);
   2705 
   2706 	/* Unmap the registers */
   2707 	if (sc->sc_ss) {
   2708 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2709 		sc->sc_ss = 0;
   2710 	}
   2711 	if (sc->sc_ios) {
   2712 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2713 		sc->sc_ios = 0;
   2714 	}
   2715 	if (sc->sc_flashs) {
   2716 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2717 		sc->sc_flashs = 0;
   2718 	}
   2719 
   2720 	if (sc->sc_core_lock)
   2721 		mutex_obj_free(sc->sc_core_lock);
   2722 	if (sc->sc_ich_phymtx)
   2723 		mutex_obj_free(sc->sc_ich_phymtx);
   2724 	if (sc->sc_ich_nvmmtx)
   2725 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2726 
   2727 	return 0;
   2728 }
   2729 
   2730 static bool
   2731 wm_suspend(device_t self, const pmf_qual_t *qual)
   2732 {
   2733 	struct wm_softc *sc = device_private(self);
   2734 
   2735 	wm_release_manageability(sc);
   2736 	wm_release_hw_control(sc);
   2737 	wm_enable_wakeup(sc);
   2738 
   2739 	return true;
   2740 }
   2741 
   2742 static bool
   2743 wm_resume(device_t self, const pmf_qual_t *qual)
   2744 {
   2745 	struct wm_softc *sc = device_private(self);
   2746 
   2747 	wm_init_manageability(sc);
   2748 
   2749 	return true;
   2750 }
   2751 
   2752 /*
   2753  * wm_watchdog:		[ifnet interface function]
   2754  *
   2755  *	Watchdog timer handler.
   2756  */
   2757 static void
   2758 wm_watchdog(struct ifnet *ifp)
   2759 {
   2760 	int qid;
   2761 	struct wm_softc *sc = ifp->if_softc;
   2762 
   2763 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2764 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2765 
   2766 		wm_watchdog_txq(ifp, txq);
   2767 	}
   2768 
   2769 	/* Reset the interface. */
   2770 	(void) wm_init(ifp);
   2771 
   2772 	/*
   2773 	 * There are still some upper layer processing which call
   2774 	 * ifp->if_start(). e.g. ALTQ
   2775 	 */
   2776 	/* Try to get more packets going. */
   2777 	ifp->if_start(ifp);
   2778 }
   2779 
   2780 static void
   2781 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2782 {
   2783 	struct wm_softc *sc = ifp->if_softc;
   2784 
   2785 	/*
   2786 	 * Since we're using delayed interrupts, sweep up
   2787 	 * before we report an error.
   2788 	 */
   2789 	mutex_enter(txq->txq_lock);
   2790 	wm_txeof(sc, txq);
   2791 	mutex_exit(txq->txq_lock);
   2792 
   2793 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2794 #ifdef WM_DEBUG
   2795 		int i, j;
   2796 		struct wm_txsoft *txs;
   2797 #endif
   2798 		log(LOG_ERR,
   2799 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2800 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2801 		    txq->txq_next);
   2802 		ifp->if_oerrors++;
   2803 #ifdef WM_DEBUG
   2804 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2805 		    i = WM_NEXTTXS(txq, i)) {
   2806 		    txs = &txq->txq_soft[i];
   2807 		    printf("txs %d tx %d -> %d\n",
   2808 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2809 		    for (j = txs->txs_firstdesc; ;
   2810 			j = WM_NEXTTX(txq, j)) {
   2811 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2812 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2813 			printf("\t %#08x%08x\n",
   2814 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2815 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2816 			if (j == txs->txs_lastdesc)
   2817 				break;
   2818 			}
   2819 		}
   2820 #endif
   2821 	}
   2822 }
   2823 
   2824 /*
   2825  * wm_tick:
   2826  *
   2827  *	One second timer, used to check link status, sweep up
   2828  *	completed transmit jobs, etc.
   2829  */
   2830 static void
   2831 wm_tick(void *arg)
   2832 {
   2833 	struct wm_softc *sc = arg;
   2834 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2835 #ifndef WM_MPSAFE
   2836 	int s = splnet();
   2837 #endif
   2838 
   2839 	WM_CORE_LOCK(sc);
   2840 
   2841 	if (sc->sc_core_stopping)
   2842 		goto out;
   2843 
   2844 	if (sc->sc_type >= WM_T_82542_2_1) {
   2845 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2846 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2847 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2848 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2849 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2850 	}
   2851 
   2852 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2853 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2854 	    + CSR_READ(sc, WMREG_CRCERRS)
   2855 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2856 	    + CSR_READ(sc, WMREG_SYMERRC)
   2857 	    + CSR_READ(sc, WMREG_RXERRC)
   2858 	    + CSR_READ(sc, WMREG_SEC)
   2859 	    + CSR_READ(sc, WMREG_CEXTERR)
   2860 	    + CSR_READ(sc, WMREG_RLEC);
   2861 	/*
   2862 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2863 	 * memory. It does not mean the number of dropped packet. Because
   2864 	 * ethernet controller can receive packets in such case if there is
   2865 	 * space in phy's FIFO.
   2866 	 *
   2867 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2868 	 * own EVCNT instead of if_iqdrops.
   2869 	 */
   2870 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2871 
   2872 	if (sc->sc_flags & WM_F_HAS_MII)
   2873 		mii_tick(&sc->sc_mii);
   2874 	else if ((sc->sc_type >= WM_T_82575)
   2875 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2876 		wm_serdes_tick(sc);
   2877 	else
   2878 		wm_tbi_tick(sc);
   2879 
   2880 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2881 out:
   2882 	WM_CORE_UNLOCK(sc);
   2883 #ifndef WM_MPSAFE
   2884 	splx(s);
   2885 #endif
   2886 }
   2887 
   2888 static int
   2889 wm_ifflags_cb(struct ethercom *ec)
   2890 {
   2891 	struct ifnet *ifp = &ec->ec_if;
   2892 	struct wm_softc *sc = ifp->if_softc;
   2893 	int rc = 0;
   2894 
   2895 	WM_CORE_LOCK(sc);
   2896 
   2897 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2898 	sc->sc_if_flags = ifp->if_flags;
   2899 
   2900 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2901 		rc = ENETRESET;
   2902 		goto out;
   2903 	}
   2904 
   2905 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2906 		wm_set_filter(sc);
   2907 
   2908 	wm_set_vlan(sc);
   2909 
   2910 out:
   2911 	WM_CORE_UNLOCK(sc);
   2912 
   2913 	return rc;
   2914 }
   2915 
   2916 /*
   2917  * wm_ioctl:		[ifnet interface function]
   2918  *
   2919  *	Handle control requests from the operator.
   2920  */
   2921 static int
   2922 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2923 {
   2924 	struct wm_softc *sc = ifp->if_softc;
   2925 	struct ifreq *ifr = (struct ifreq *) data;
   2926 	struct ifaddr *ifa = (struct ifaddr *)data;
   2927 	struct sockaddr_dl *sdl;
   2928 	int s, error;
   2929 
   2930 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2931 		device_xname(sc->sc_dev), __func__));
   2932 
   2933 #ifndef WM_MPSAFE
   2934 	s = splnet();
   2935 #endif
   2936 	switch (cmd) {
   2937 	case SIOCSIFMEDIA:
   2938 	case SIOCGIFMEDIA:
   2939 		WM_CORE_LOCK(sc);
   2940 		/* Flow control requires full-duplex mode. */
   2941 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2942 		    (ifr->ifr_media & IFM_FDX) == 0)
   2943 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2944 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2945 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2946 				/* We can do both TXPAUSE and RXPAUSE. */
   2947 				ifr->ifr_media |=
   2948 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2949 			}
   2950 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2951 		}
   2952 		WM_CORE_UNLOCK(sc);
   2953 #ifdef WM_MPSAFE
   2954 		s = splnet();
   2955 #endif
   2956 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2957 #ifdef WM_MPSAFE
   2958 		splx(s);
   2959 #endif
   2960 		break;
   2961 	case SIOCINITIFADDR:
   2962 		WM_CORE_LOCK(sc);
   2963 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2964 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2965 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2966 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2967 			/* unicast address is first multicast entry */
   2968 			wm_set_filter(sc);
   2969 			error = 0;
   2970 			WM_CORE_UNLOCK(sc);
   2971 			break;
   2972 		}
   2973 		WM_CORE_UNLOCK(sc);
   2974 		/*FALLTHROUGH*/
   2975 	default:
   2976 #ifdef WM_MPSAFE
   2977 		s = splnet();
   2978 #endif
   2979 		/* It may call wm_start, so unlock here */
   2980 		error = ether_ioctl(ifp, cmd, data);
   2981 #ifdef WM_MPSAFE
   2982 		splx(s);
   2983 #endif
   2984 		if (error != ENETRESET)
   2985 			break;
   2986 
   2987 		error = 0;
   2988 
   2989 		if (cmd == SIOCSIFCAP) {
   2990 			error = (*ifp->if_init)(ifp);
   2991 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2992 			;
   2993 		else if (ifp->if_flags & IFF_RUNNING) {
   2994 			/*
   2995 			 * Multicast list has changed; set the hardware filter
   2996 			 * accordingly.
   2997 			 */
   2998 			WM_CORE_LOCK(sc);
   2999 			wm_set_filter(sc);
   3000 			WM_CORE_UNLOCK(sc);
   3001 		}
   3002 		break;
   3003 	}
   3004 
   3005 #ifndef WM_MPSAFE
   3006 	splx(s);
   3007 #endif
   3008 	return error;
   3009 }
   3010 
   3011 /* MAC address related */
   3012 
   3013 /*
   3014  * Get the offset of MAC address and return it.
   3015  * If error occured, use offset 0.
   3016  */
   3017 static uint16_t
   3018 wm_check_alt_mac_addr(struct wm_softc *sc)
   3019 {
   3020 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3021 	uint16_t offset = NVM_OFF_MACADDR;
   3022 
   3023 	/* Try to read alternative MAC address pointer */
   3024 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3025 		return 0;
   3026 
   3027 	/* Check pointer if it's valid or not. */
   3028 	if ((offset == 0x0000) || (offset == 0xffff))
   3029 		return 0;
   3030 
   3031 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3032 	/*
   3033 	 * Check whether alternative MAC address is valid or not.
   3034 	 * Some cards have non 0xffff pointer but those don't use
   3035 	 * alternative MAC address in reality.
   3036 	 *
   3037 	 * Check whether the broadcast bit is set or not.
   3038 	 */
   3039 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3040 		if (((myea[0] & 0xff) & 0x01) == 0)
   3041 			return offset; /* Found */
   3042 
   3043 	/* Not found */
   3044 	return 0;
   3045 }
   3046 
   3047 static int
   3048 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3049 {
   3050 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3051 	uint16_t offset = NVM_OFF_MACADDR;
   3052 	int do_invert = 0;
   3053 
   3054 	switch (sc->sc_type) {
   3055 	case WM_T_82580:
   3056 	case WM_T_I350:
   3057 	case WM_T_I354:
   3058 		/* EEPROM Top Level Partitioning */
   3059 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3060 		break;
   3061 	case WM_T_82571:
   3062 	case WM_T_82575:
   3063 	case WM_T_82576:
   3064 	case WM_T_80003:
   3065 	case WM_T_I210:
   3066 	case WM_T_I211:
   3067 		offset = wm_check_alt_mac_addr(sc);
   3068 		if (offset == 0)
   3069 			if ((sc->sc_funcid & 0x01) == 1)
   3070 				do_invert = 1;
   3071 		break;
   3072 	default:
   3073 		if ((sc->sc_funcid & 0x01) == 1)
   3074 			do_invert = 1;
   3075 		break;
   3076 	}
   3077 
   3078 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3079 		goto bad;
   3080 
   3081 	enaddr[0] = myea[0] & 0xff;
   3082 	enaddr[1] = myea[0] >> 8;
   3083 	enaddr[2] = myea[1] & 0xff;
   3084 	enaddr[3] = myea[1] >> 8;
   3085 	enaddr[4] = myea[2] & 0xff;
   3086 	enaddr[5] = myea[2] >> 8;
   3087 
   3088 	/*
   3089 	 * Toggle the LSB of the MAC address on the second port
   3090 	 * of some dual port cards.
   3091 	 */
   3092 	if (do_invert != 0)
   3093 		enaddr[5] ^= 1;
   3094 
   3095 	return 0;
   3096 
   3097  bad:
   3098 	return -1;
   3099 }
   3100 
   3101 /*
   3102  * wm_set_ral:
   3103  *
   3104  *	Set an entery in the receive address list.
   3105  */
   3106 static void
   3107 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3108 {
   3109 	uint32_t ral_lo, ral_hi;
   3110 
   3111 	if (enaddr != NULL) {
   3112 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3113 		    (enaddr[3] << 24);
   3114 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3115 		ral_hi |= RAL_AV;
   3116 	} else {
   3117 		ral_lo = 0;
   3118 		ral_hi = 0;
   3119 	}
   3120 
   3121 	if (sc->sc_type >= WM_T_82544) {
   3122 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3123 		    ral_lo);
   3124 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3125 		    ral_hi);
   3126 	} else {
   3127 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3128 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3129 	}
   3130 }
   3131 
   3132 /*
   3133  * wm_mchash:
   3134  *
   3135  *	Compute the hash of the multicast address for the 4096-bit
   3136  *	multicast filter.
   3137  */
   3138 static uint32_t
   3139 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3140 {
   3141 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3142 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3143 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3144 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3145 	uint32_t hash;
   3146 
   3147 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3148 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3149 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3150 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3151 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3152 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3153 		return (hash & 0x3ff);
   3154 	}
   3155 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3156 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3157 
   3158 	return (hash & 0xfff);
   3159 }
   3160 
   3161 /*
   3162  * wm_set_filter:
   3163  *
   3164  *	Set up the receive filter.
   3165  */
   3166 static void
   3167 wm_set_filter(struct wm_softc *sc)
   3168 {
   3169 	struct ethercom *ec = &sc->sc_ethercom;
   3170 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3171 	struct ether_multi *enm;
   3172 	struct ether_multistep step;
   3173 	bus_addr_t mta_reg;
   3174 	uint32_t hash, reg, bit;
   3175 	int i, size, ralmax;
   3176 
   3177 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3178 		device_xname(sc->sc_dev), __func__));
   3179 
   3180 	if (sc->sc_type >= WM_T_82544)
   3181 		mta_reg = WMREG_CORDOVA_MTA;
   3182 	else
   3183 		mta_reg = WMREG_MTA;
   3184 
   3185 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3186 
   3187 	if (ifp->if_flags & IFF_BROADCAST)
   3188 		sc->sc_rctl |= RCTL_BAM;
   3189 	if (ifp->if_flags & IFF_PROMISC) {
   3190 		sc->sc_rctl |= RCTL_UPE;
   3191 		goto allmulti;
   3192 	}
   3193 
   3194 	/*
   3195 	 * Set the station address in the first RAL slot, and
   3196 	 * clear the remaining slots.
   3197 	 */
   3198 	if (sc->sc_type == WM_T_ICH8)
   3199 		size = WM_RAL_TABSIZE_ICH8 -1;
   3200 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3201 	    || (sc->sc_type == WM_T_PCH))
   3202 		size = WM_RAL_TABSIZE_ICH8;
   3203 	else if (sc->sc_type == WM_T_PCH2)
   3204 		size = WM_RAL_TABSIZE_PCH2;
   3205 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3206 		size = WM_RAL_TABSIZE_PCH_LPT;
   3207 	else if (sc->sc_type == WM_T_82575)
   3208 		size = WM_RAL_TABSIZE_82575;
   3209 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3210 		size = WM_RAL_TABSIZE_82576;
   3211 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3212 		size = WM_RAL_TABSIZE_I350;
   3213 	else
   3214 		size = WM_RAL_TABSIZE;
   3215 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3216 
   3217 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3218 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3219 		switch (i) {
   3220 		case 0:
   3221 			/* We can use all entries */
   3222 			ralmax = size;
   3223 			break;
   3224 		case 1:
   3225 			/* Only RAR[0] */
   3226 			ralmax = 1;
   3227 			break;
   3228 		default:
   3229 			/* available SHRA + RAR[0] */
   3230 			ralmax = i + 1;
   3231 		}
   3232 	} else
   3233 		ralmax = size;
   3234 	for (i = 1; i < size; i++) {
   3235 		if (i < ralmax)
   3236 			wm_set_ral(sc, NULL, i);
   3237 	}
   3238 
   3239 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3240 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3241 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3242 	    || (sc->sc_type == WM_T_PCH_SPT))
   3243 		size = WM_ICH8_MC_TABSIZE;
   3244 	else
   3245 		size = WM_MC_TABSIZE;
   3246 	/* Clear out the multicast table. */
   3247 	for (i = 0; i < size; i++)
   3248 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3249 
   3250 	ETHER_LOCK(ec);
   3251 	ETHER_FIRST_MULTI(step, ec, enm);
   3252 	while (enm != NULL) {
   3253 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3254 			ETHER_UNLOCK(ec);
   3255 			/*
   3256 			 * We must listen to a range of multicast addresses.
   3257 			 * For now, just accept all multicasts, rather than
   3258 			 * trying to set only those filter bits needed to match
   3259 			 * the range.  (At this time, the only use of address
   3260 			 * ranges is for IP multicast routing, for which the
   3261 			 * range is big enough to require all bits set.)
   3262 			 */
   3263 			goto allmulti;
   3264 		}
   3265 
   3266 		hash = wm_mchash(sc, enm->enm_addrlo);
   3267 
   3268 		reg = (hash >> 5);
   3269 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3270 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3271 		    || (sc->sc_type == WM_T_PCH2)
   3272 		    || (sc->sc_type == WM_T_PCH_LPT)
   3273 		    || (sc->sc_type == WM_T_PCH_SPT))
   3274 			reg &= 0x1f;
   3275 		else
   3276 			reg &= 0x7f;
   3277 		bit = hash & 0x1f;
   3278 
   3279 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3280 		hash |= 1U << bit;
   3281 
   3282 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3283 			/*
   3284 			 * 82544 Errata 9: Certain register cannot be written
   3285 			 * with particular alignments in PCI-X bus operation
   3286 			 * (FCAH, MTA and VFTA).
   3287 			 */
   3288 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3289 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3290 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3291 		} else
   3292 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3293 
   3294 		ETHER_NEXT_MULTI(step, enm);
   3295 	}
   3296 	ETHER_UNLOCK(ec);
   3297 
   3298 	ifp->if_flags &= ~IFF_ALLMULTI;
   3299 	goto setit;
   3300 
   3301  allmulti:
   3302 	ifp->if_flags |= IFF_ALLMULTI;
   3303 	sc->sc_rctl |= RCTL_MPE;
   3304 
   3305  setit:
   3306 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3307 }
   3308 
   3309 /* Reset and init related */
   3310 
   3311 static void
   3312 wm_set_vlan(struct wm_softc *sc)
   3313 {
   3314 
   3315 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3316 		device_xname(sc->sc_dev), __func__));
   3317 
   3318 	/* Deal with VLAN enables. */
   3319 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3320 		sc->sc_ctrl |= CTRL_VME;
   3321 	else
   3322 		sc->sc_ctrl &= ~CTRL_VME;
   3323 
   3324 	/* Write the control registers. */
   3325 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3326 }
   3327 
   3328 static void
   3329 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3330 {
   3331 	uint32_t gcr;
   3332 	pcireg_t ctrl2;
   3333 
   3334 	gcr = CSR_READ(sc, WMREG_GCR);
   3335 
   3336 	/* Only take action if timeout value is defaulted to 0 */
   3337 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3338 		goto out;
   3339 
   3340 	if ((gcr & GCR_CAP_VER2) == 0) {
   3341 		gcr |= GCR_CMPL_TMOUT_10MS;
   3342 		goto out;
   3343 	}
   3344 
   3345 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3346 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3347 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3348 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3349 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3350 
   3351 out:
   3352 	/* Disable completion timeout resend */
   3353 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3354 
   3355 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3356 }
   3357 
   3358 void
   3359 wm_get_auto_rd_done(struct wm_softc *sc)
   3360 {
   3361 	int i;
   3362 
   3363 	/* wait for eeprom to reload */
   3364 	switch (sc->sc_type) {
   3365 	case WM_T_82571:
   3366 	case WM_T_82572:
   3367 	case WM_T_82573:
   3368 	case WM_T_82574:
   3369 	case WM_T_82583:
   3370 	case WM_T_82575:
   3371 	case WM_T_82576:
   3372 	case WM_T_82580:
   3373 	case WM_T_I350:
   3374 	case WM_T_I354:
   3375 	case WM_T_I210:
   3376 	case WM_T_I211:
   3377 	case WM_T_80003:
   3378 	case WM_T_ICH8:
   3379 	case WM_T_ICH9:
   3380 		for (i = 0; i < 10; i++) {
   3381 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3382 				break;
   3383 			delay(1000);
   3384 		}
   3385 		if (i == 10) {
   3386 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3387 			    "complete\n", device_xname(sc->sc_dev));
   3388 		}
   3389 		break;
   3390 	default:
   3391 		break;
   3392 	}
   3393 }
   3394 
   3395 void
   3396 wm_lan_init_done(struct wm_softc *sc)
   3397 {
   3398 	uint32_t reg = 0;
   3399 	int i;
   3400 
   3401 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3402 		device_xname(sc->sc_dev), __func__));
   3403 
   3404 	/* Wait for eeprom to reload */
   3405 	switch (sc->sc_type) {
   3406 	case WM_T_ICH10:
   3407 	case WM_T_PCH:
   3408 	case WM_T_PCH2:
   3409 	case WM_T_PCH_LPT:
   3410 	case WM_T_PCH_SPT:
   3411 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3412 			reg = CSR_READ(sc, WMREG_STATUS);
   3413 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3414 				break;
   3415 			delay(100);
   3416 		}
   3417 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3418 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3419 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3420 		}
   3421 		break;
   3422 	default:
   3423 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3424 		    __func__);
   3425 		break;
   3426 	}
   3427 
   3428 	reg &= ~STATUS_LAN_INIT_DONE;
   3429 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3430 }
   3431 
   3432 void
   3433 wm_get_cfg_done(struct wm_softc *sc)
   3434 {
   3435 	int mask;
   3436 	uint32_t reg;
   3437 	int i;
   3438 
   3439 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3440 		device_xname(sc->sc_dev), __func__));
   3441 
   3442 	/* Wait for eeprom to reload */
   3443 	switch (sc->sc_type) {
   3444 	case WM_T_82542_2_0:
   3445 	case WM_T_82542_2_1:
   3446 		/* null */
   3447 		break;
   3448 	case WM_T_82543:
   3449 	case WM_T_82544:
   3450 	case WM_T_82540:
   3451 	case WM_T_82545:
   3452 	case WM_T_82545_3:
   3453 	case WM_T_82546:
   3454 	case WM_T_82546_3:
   3455 	case WM_T_82541:
   3456 	case WM_T_82541_2:
   3457 	case WM_T_82547:
   3458 	case WM_T_82547_2:
   3459 	case WM_T_82573:
   3460 	case WM_T_82574:
   3461 	case WM_T_82583:
   3462 		/* generic */
   3463 		delay(10*1000);
   3464 		break;
   3465 	case WM_T_80003:
   3466 	case WM_T_82571:
   3467 	case WM_T_82572:
   3468 	case WM_T_82575:
   3469 	case WM_T_82576:
   3470 	case WM_T_82580:
   3471 	case WM_T_I350:
   3472 	case WM_T_I354:
   3473 	case WM_T_I210:
   3474 	case WM_T_I211:
   3475 		if (sc->sc_type == WM_T_82571) {
   3476 			/* Only 82571 shares port 0 */
   3477 			mask = EEMNGCTL_CFGDONE_0;
   3478 		} else
   3479 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3480 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3481 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3482 				break;
   3483 			delay(1000);
   3484 		}
   3485 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3486 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3487 				device_xname(sc->sc_dev), __func__));
   3488 		}
   3489 		break;
   3490 	case WM_T_ICH8:
   3491 	case WM_T_ICH9:
   3492 	case WM_T_ICH10:
   3493 	case WM_T_PCH:
   3494 	case WM_T_PCH2:
   3495 	case WM_T_PCH_LPT:
   3496 	case WM_T_PCH_SPT:
   3497 		delay(10*1000);
   3498 		if (sc->sc_type >= WM_T_ICH10)
   3499 			wm_lan_init_done(sc);
   3500 		else
   3501 			wm_get_auto_rd_done(sc);
   3502 
   3503 		reg = CSR_READ(sc, WMREG_STATUS);
   3504 		if ((reg & STATUS_PHYRA) != 0)
   3505 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3506 		break;
   3507 	default:
   3508 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3509 		    __func__);
   3510 		break;
   3511 	}
   3512 }
   3513 
   3514 /* Init hardware bits */
   3515 void
   3516 wm_initialize_hardware_bits(struct wm_softc *sc)
   3517 {
   3518 	uint32_t tarc0, tarc1, reg;
   3519 
   3520 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3521 		device_xname(sc->sc_dev), __func__));
   3522 
   3523 	/* For 82571 variant, 80003 and ICHs */
   3524 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3525 	    || (sc->sc_type >= WM_T_80003)) {
   3526 
   3527 		/* Transmit Descriptor Control 0 */
   3528 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3529 		reg |= TXDCTL_COUNT_DESC;
   3530 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3531 
   3532 		/* Transmit Descriptor Control 1 */
   3533 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3534 		reg |= TXDCTL_COUNT_DESC;
   3535 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3536 
   3537 		/* TARC0 */
   3538 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3539 		switch (sc->sc_type) {
   3540 		case WM_T_82571:
   3541 		case WM_T_82572:
   3542 		case WM_T_82573:
   3543 		case WM_T_82574:
   3544 		case WM_T_82583:
   3545 		case WM_T_80003:
   3546 			/* Clear bits 30..27 */
   3547 			tarc0 &= ~__BITS(30, 27);
   3548 			break;
   3549 		default:
   3550 			break;
   3551 		}
   3552 
   3553 		switch (sc->sc_type) {
   3554 		case WM_T_82571:
   3555 		case WM_T_82572:
   3556 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3557 
   3558 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3559 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3560 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3561 			/* 8257[12] Errata No.7 */
   3562 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3563 
   3564 			/* TARC1 bit 28 */
   3565 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3566 				tarc1 &= ~__BIT(28);
   3567 			else
   3568 				tarc1 |= __BIT(28);
   3569 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3570 
   3571 			/*
   3572 			 * 8257[12] Errata No.13
   3573 			 * Disable Dyamic Clock Gating.
   3574 			 */
   3575 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3576 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3577 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3578 			break;
   3579 		case WM_T_82573:
   3580 		case WM_T_82574:
   3581 		case WM_T_82583:
   3582 			if ((sc->sc_type == WM_T_82574)
   3583 			    || (sc->sc_type == WM_T_82583))
   3584 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3585 
   3586 			/* Extended Device Control */
   3587 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3588 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3589 			reg |= __BIT(22);	/* Set bit 22 */
   3590 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3591 
   3592 			/* Device Control */
   3593 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3594 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3595 
   3596 			/* PCIe Control Register */
   3597 			/*
   3598 			 * 82573 Errata (unknown).
   3599 			 *
   3600 			 * 82574 Errata 25 and 82583 Errata 12
   3601 			 * "Dropped Rx Packets":
   3602 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3603 			 */
   3604 			reg = CSR_READ(sc, WMREG_GCR);
   3605 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3606 			CSR_WRITE(sc, WMREG_GCR, reg);
   3607 
   3608 			if ((sc->sc_type == WM_T_82574)
   3609 			    || (sc->sc_type == WM_T_82583)) {
   3610 				/*
   3611 				 * Document says this bit must be set for
   3612 				 * proper operation.
   3613 				 */
   3614 				reg = CSR_READ(sc, WMREG_GCR);
   3615 				reg |= __BIT(22);
   3616 				CSR_WRITE(sc, WMREG_GCR, reg);
   3617 
   3618 				/*
   3619 				 * Apply workaround for hardware errata
   3620 				 * documented in errata docs Fixes issue where
   3621 				 * some error prone or unreliable PCIe
   3622 				 * completions are occurring, particularly
   3623 				 * with ASPM enabled. Without fix, issue can
   3624 				 * cause Tx timeouts.
   3625 				 */
   3626 				reg = CSR_READ(sc, WMREG_GCR2);
   3627 				reg |= __BIT(0);
   3628 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3629 			}
   3630 			break;
   3631 		case WM_T_80003:
   3632 			/* TARC0 */
   3633 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3634 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3635 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3636 
   3637 			/* TARC1 bit 28 */
   3638 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3639 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3640 				tarc1 &= ~__BIT(28);
   3641 			else
   3642 				tarc1 |= __BIT(28);
   3643 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3644 			break;
   3645 		case WM_T_ICH8:
   3646 		case WM_T_ICH9:
   3647 		case WM_T_ICH10:
   3648 		case WM_T_PCH:
   3649 		case WM_T_PCH2:
   3650 		case WM_T_PCH_LPT:
   3651 		case WM_T_PCH_SPT:
   3652 			/* TARC0 */
   3653 			if ((sc->sc_type == WM_T_ICH8)
   3654 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3655 				/* Set TARC0 bits 29 and 28 */
   3656 				tarc0 |= __BITS(29, 28);
   3657 			}
   3658 			/* Set TARC0 bits 23,24,26,27 */
   3659 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3660 
   3661 			/* CTRL_EXT */
   3662 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3663 			reg |= __BIT(22);	/* Set bit 22 */
   3664 			/*
   3665 			 * Enable PHY low-power state when MAC is at D3
   3666 			 * w/o WoL
   3667 			 */
   3668 			if (sc->sc_type >= WM_T_PCH)
   3669 				reg |= CTRL_EXT_PHYPDEN;
   3670 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3671 
   3672 			/* TARC1 */
   3673 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3674 			/* bit 28 */
   3675 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3676 				tarc1 &= ~__BIT(28);
   3677 			else
   3678 				tarc1 |= __BIT(28);
   3679 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3680 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3681 
   3682 			/* Device Status */
   3683 			if (sc->sc_type == WM_T_ICH8) {
   3684 				reg = CSR_READ(sc, WMREG_STATUS);
   3685 				reg &= ~__BIT(31);
   3686 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3687 
   3688 			}
   3689 
   3690 			/* IOSFPC */
   3691 			if (sc->sc_type == WM_T_PCH_SPT) {
   3692 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3693 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3694 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3695 			}
   3696 			/*
   3697 			 * Work-around descriptor data corruption issue during
   3698 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3699 			 * capability.
   3700 			 */
   3701 			reg = CSR_READ(sc, WMREG_RFCTL);
   3702 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3703 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3704 			break;
   3705 		default:
   3706 			break;
   3707 		}
   3708 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3709 
   3710 		switch (sc->sc_type) {
   3711 		/*
   3712 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3713 		 * Avoid RSS Hash Value bug.
   3714 		 */
   3715 		case WM_T_82571:
   3716 		case WM_T_82572:
   3717 		case WM_T_82573:
   3718 		case WM_T_80003:
   3719 		case WM_T_ICH8:
   3720 			reg = CSR_READ(sc, WMREG_RFCTL);
   3721 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3722 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3723 			break;
   3724 		/*
   3725 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   3726 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   3727 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   3728 		 * Correctly by the Device"
   3729 		 *
   3730 		 * I354(C2000) Errata AVR53:
   3731 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   3732 		 * Hang"
   3733 		 */
   3734 		case WM_T_82575:
   3735 		case WM_T_82576:
   3736 		case WM_T_82580:
   3737 		case WM_T_I350:
   3738 		case WM_T_I210:
   3739 		case WM_T_I211:
   3740 		case WM_T_I354:
   3741 			reg = CSR_READ(sc, WMREG_RFCTL);
   3742 			reg |= WMREG_RFCTL_IPV6EXDIS;
   3743 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3744 			break;
   3745 		default:
   3746 			break;
   3747 		}
   3748 	}
   3749 }
   3750 
   3751 static uint32_t
   3752 wm_rxpbs_adjust_82580(uint32_t val)
   3753 {
   3754 	uint32_t rv = 0;
   3755 
   3756 	if (val < __arraycount(wm_82580_rxpbs_table))
   3757 		rv = wm_82580_rxpbs_table[val];
   3758 
   3759 	return rv;
   3760 }
   3761 
   3762 /*
   3763  * wm_reset_phy:
   3764  *
   3765  *	generic PHY reset function.
   3766  *	Same as e1000_phy_hw_reset_generic()
   3767  */
   3768 static void
   3769 wm_reset_phy(struct wm_softc *sc)
   3770 {
   3771 	uint32_t reg;
   3772 
   3773 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3774 		device_xname(sc->sc_dev), __func__));
   3775 	if (wm_phy_resetisblocked(sc))
   3776 		return;
   3777 
   3778 	sc->phy.acquire(sc);
   3779 
   3780 	reg = CSR_READ(sc, WMREG_CTRL);
   3781 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3782 	CSR_WRITE_FLUSH(sc);
   3783 
   3784 	delay(sc->phy.reset_delay_us);
   3785 
   3786 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3787 	CSR_WRITE_FLUSH(sc);
   3788 
   3789 	delay(150);
   3790 
   3791 	sc->phy.release(sc);
   3792 
   3793 	wm_get_cfg_done(sc);
   3794 }
   3795 
   3796 static void
   3797 wm_flush_desc_rings(struct wm_softc *sc)
   3798 {
   3799 	pcireg_t preg;
   3800 	uint32_t reg;
   3801 	int nexttx;
   3802 
   3803 	/* First, disable MULR fix in FEXTNVM11 */
   3804 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3805 	reg |= FEXTNVM11_DIS_MULRFIX;
   3806 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3807 
   3808 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3809 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3810 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3811 		struct wm_txqueue *txq;
   3812 		wiseman_txdesc_t *txd;
   3813 
   3814 		/* TX */
   3815 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3816 		    device_xname(sc->sc_dev), preg, reg);
   3817 		reg = CSR_READ(sc, WMREG_TCTL);
   3818 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3819 
   3820 		txq = &sc->sc_queue[0].wmq_txq;
   3821 		nexttx = txq->txq_next;
   3822 		txd = &txq->txq_descs[nexttx];
   3823 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3824 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3825 		txd->wtx_fields.wtxu_status = 0;
   3826 		txd->wtx_fields.wtxu_options = 0;
   3827 		txd->wtx_fields.wtxu_vlan = 0;
   3828 
   3829 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3830 			BUS_SPACE_BARRIER_WRITE);
   3831 
   3832 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3833 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3834 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3835 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3836 		delay(250);
   3837 	}
   3838 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3839 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3840 		uint32_t rctl;
   3841 
   3842 		/* RX */
   3843 		printf("%s: Need RX flush (reg = %08x)\n",
   3844 		    device_xname(sc->sc_dev), preg);
   3845 		rctl = CSR_READ(sc, WMREG_RCTL);
   3846 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3847 		CSR_WRITE_FLUSH(sc);
   3848 		delay(150);
   3849 
   3850 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3851 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3852 		reg &= 0xffffc000;
   3853 		/*
   3854 		 * update thresholds: prefetch threshold to 31, host threshold
   3855 		 * to 1 and make sure the granularity is "descriptors" and not
   3856 		 * "cache lines"
   3857 		 */
   3858 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   3859 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   3860 
   3861 		/*
   3862 		 * momentarily enable the RX ring for the changes to take
   3863 		 * effect
   3864 		 */
   3865 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   3866 		CSR_WRITE_FLUSH(sc);
   3867 		delay(150);
   3868 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3869 	}
   3870 }
   3871 
   3872 /*
   3873  * wm_reset:
   3874  *
   3875  *	Reset the i82542 chip.
   3876  */
   3877 static void
   3878 wm_reset(struct wm_softc *sc)
   3879 {
   3880 	int phy_reset = 0;
   3881 	int i, error = 0;
   3882 	uint32_t reg;
   3883 
   3884 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3885 		device_xname(sc->sc_dev), __func__));
   3886 	KASSERT(sc->sc_type != 0);
   3887 
   3888 	/*
   3889 	 * Allocate on-chip memory according to the MTU size.
   3890 	 * The Packet Buffer Allocation register must be written
   3891 	 * before the chip is reset.
   3892 	 */
   3893 	switch (sc->sc_type) {
   3894 	case WM_T_82547:
   3895 	case WM_T_82547_2:
   3896 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3897 		    PBA_22K : PBA_30K;
   3898 		for (i = 0; i < sc->sc_nqueues; i++) {
   3899 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3900 			txq->txq_fifo_head = 0;
   3901 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3902 			txq->txq_fifo_size =
   3903 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3904 			txq->txq_fifo_stall = 0;
   3905 		}
   3906 		break;
   3907 	case WM_T_82571:
   3908 	case WM_T_82572:
   3909 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3910 	case WM_T_80003:
   3911 		sc->sc_pba = PBA_32K;
   3912 		break;
   3913 	case WM_T_82573:
   3914 		sc->sc_pba = PBA_12K;
   3915 		break;
   3916 	case WM_T_82574:
   3917 	case WM_T_82583:
   3918 		sc->sc_pba = PBA_20K;
   3919 		break;
   3920 	case WM_T_82576:
   3921 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3922 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3923 		break;
   3924 	case WM_T_82580:
   3925 	case WM_T_I350:
   3926 	case WM_T_I354:
   3927 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3928 		break;
   3929 	case WM_T_I210:
   3930 	case WM_T_I211:
   3931 		sc->sc_pba = PBA_34K;
   3932 		break;
   3933 	case WM_T_ICH8:
   3934 		/* Workaround for a bit corruption issue in FIFO memory */
   3935 		sc->sc_pba = PBA_8K;
   3936 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3937 		break;
   3938 	case WM_T_ICH9:
   3939 	case WM_T_ICH10:
   3940 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3941 		    PBA_14K : PBA_10K;
   3942 		break;
   3943 	case WM_T_PCH:
   3944 	case WM_T_PCH2:
   3945 	case WM_T_PCH_LPT:
   3946 	case WM_T_PCH_SPT:
   3947 		sc->sc_pba = PBA_26K;
   3948 		break;
   3949 	default:
   3950 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3951 		    PBA_40K : PBA_48K;
   3952 		break;
   3953 	}
   3954 	/*
   3955 	 * Only old or non-multiqueue devices have the PBA register
   3956 	 * XXX Need special handling for 82575.
   3957 	 */
   3958 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3959 	    || (sc->sc_type == WM_T_82575))
   3960 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3961 
   3962 	/* Prevent the PCI-E bus from sticking */
   3963 	if (sc->sc_flags & WM_F_PCIE) {
   3964 		int timeout = 800;
   3965 
   3966 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3967 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3968 
   3969 		while (timeout--) {
   3970 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3971 			    == 0)
   3972 				break;
   3973 			delay(100);
   3974 		}
   3975 	}
   3976 
   3977 	/* Set the completion timeout for interface */
   3978 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3979 	    || (sc->sc_type == WM_T_82580)
   3980 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3981 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3982 		wm_set_pcie_completion_timeout(sc);
   3983 
   3984 	/* Clear interrupt */
   3985 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3986 	if (sc->sc_nintrs > 1) {
   3987 		if (sc->sc_type != WM_T_82574) {
   3988 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3989 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3990 		} else {
   3991 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3992 		}
   3993 	}
   3994 
   3995 	/* Stop the transmit and receive processes. */
   3996 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3997 	sc->sc_rctl &= ~RCTL_EN;
   3998 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3999 	CSR_WRITE_FLUSH(sc);
   4000 
   4001 	/* XXX set_tbi_sbp_82543() */
   4002 
   4003 	delay(10*1000);
   4004 
   4005 	/* Must acquire the MDIO ownership before MAC reset */
   4006 	switch (sc->sc_type) {
   4007 	case WM_T_82573:
   4008 	case WM_T_82574:
   4009 	case WM_T_82583:
   4010 		error = wm_get_hw_semaphore_82573(sc);
   4011 		break;
   4012 	default:
   4013 		break;
   4014 	}
   4015 
   4016 	/*
   4017 	 * 82541 Errata 29? & 82547 Errata 28?
   4018 	 * See also the description about PHY_RST bit in CTRL register
   4019 	 * in 8254x_GBe_SDM.pdf.
   4020 	 */
   4021 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4022 		CSR_WRITE(sc, WMREG_CTRL,
   4023 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4024 		CSR_WRITE_FLUSH(sc);
   4025 		delay(5000);
   4026 	}
   4027 
   4028 	switch (sc->sc_type) {
   4029 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4030 	case WM_T_82541:
   4031 	case WM_T_82541_2:
   4032 	case WM_T_82547:
   4033 	case WM_T_82547_2:
   4034 		/*
   4035 		 * On some chipsets, a reset through a memory-mapped write
   4036 		 * cycle can cause the chip to reset before completing the
   4037 		 * write cycle.  This causes major headache that can be
   4038 		 * avoided by issuing the reset via indirect register writes
   4039 		 * through I/O space.
   4040 		 *
   4041 		 * So, if we successfully mapped the I/O BAR at attach time,
   4042 		 * use that.  Otherwise, try our luck with a memory-mapped
   4043 		 * reset.
   4044 		 */
   4045 		if (sc->sc_flags & WM_F_IOH_VALID)
   4046 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4047 		else
   4048 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4049 		break;
   4050 	case WM_T_82545_3:
   4051 	case WM_T_82546_3:
   4052 		/* Use the shadow control register on these chips. */
   4053 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4054 		break;
   4055 	case WM_T_80003:
   4056 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4057 		sc->phy.acquire(sc);
   4058 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4059 		sc->phy.release(sc);
   4060 		break;
   4061 	case WM_T_ICH8:
   4062 	case WM_T_ICH9:
   4063 	case WM_T_ICH10:
   4064 	case WM_T_PCH:
   4065 	case WM_T_PCH2:
   4066 	case WM_T_PCH_LPT:
   4067 	case WM_T_PCH_SPT:
   4068 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4069 		if (wm_phy_resetisblocked(sc) == false) {
   4070 			/*
   4071 			 * Gate automatic PHY configuration by hardware on
   4072 			 * non-managed 82579
   4073 			 */
   4074 			if ((sc->sc_type == WM_T_PCH2)
   4075 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4076 				== 0))
   4077 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4078 
   4079 			reg |= CTRL_PHY_RESET;
   4080 			phy_reset = 1;
   4081 		} else
   4082 			printf("XXX reset is blocked!!!\n");
   4083 		sc->phy.acquire(sc);
   4084 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4085 		/* Don't insert a completion barrier when reset */
   4086 		delay(20*1000);
   4087 		mutex_exit(sc->sc_ich_phymtx);
   4088 		break;
   4089 	case WM_T_82580:
   4090 	case WM_T_I350:
   4091 	case WM_T_I354:
   4092 	case WM_T_I210:
   4093 	case WM_T_I211:
   4094 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4095 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4096 			CSR_WRITE_FLUSH(sc);
   4097 		delay(5000);
   4098 		break;
   4099 	case WM_T_82542_2_0:
   4100 	case WM_T_82542_2_1:
   4101 	case WM_T_82543:
   4102 	case WM_T_82540:
   4103 	case WM_T_82545:
   4104 	case WM_T_82546:
   4105 	case WM_T_82571:
   4106 	case WM_T_82572:
   4107 	case WM_T_82573:
   4108 	case WM_T_82574:
   4109 	case WM_T_82575:
   4110 	case WM_T_82576:
   4111 	case WM_T_82583:
   4112 	default:
   4113 		/* Everything else can safely use the documented method. */
   4114 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4115 		break;
   4116 	}
   4117 
   4118 	/* Must release the MDIO ownership after MAC reset */
   4119 	switch (sc->sc_type) {
   4120 	case WM_T_82573:
   4121 	case WM_T_82574:
   4122 	case WM_T_82583:
   4123 		if (error == 0)
   4124 			wm_put_hw_semaphore_82573(sc);
   4125 		break;
   4126 	default:
   4127 		break;
   4128 	}
   4129 
   4130 	if (phy_reset != 0)
   4131 		wm_get_cfg_done(sc);
   4132 
   4133 	/* reload EEPROM */
   4134 	switch (sc->sc_type) {
   4135 	case WM_T_82542_2_0:
   4136 	case WM_T_82542_2_1:
   4137 	case WM_T_82543:
   4138 	case WM_T_82544:
   4139 		delay(10);
   4140 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4141 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4142 		CSR_WRITE_FLUSH(sc);
   4143 		delay(2000);
   4144 		break;
   4145 	case WM_T_82540:
   4146 	case WM_T_82545:
   4147 	case WM_T_82545_3:
   4148 	case WM_T_82546:
   4149 	case WM_T_82546_3:
   4150 		delay(5*1000);
   4151 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4152 		break;
   4153 	case WM_T_82541:
   4154 	case WM_T_82541_2:
   4155 	case WM_T_82547:
   4156 	case WM_T_82547_2:
   4157 		delay(20000);
   4158 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4159 		break;
   4160 	case WM_T_82571:
   4161 	case WM_T_82572:
   4162 	case WM_T_82573:
   4163 	case WM_T_82574:
   4164 	case WM_T_82583:
   4165 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4166 			delay(10);
   4167 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4168 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4169 			CSR_WRITE_FLUSH(sc);
   4170 		}
   4171 		/* check EECD_EE_AUTORD */
   4172 		wm_get_auto_rd_done(sc);
   4173 		/*
   4174 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4175 		 * is set.
   4176 		 */
   4177 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4178 		    || (sc->sc_type == WM_T_82583))
   4179 			delay(25*1000);
   4180 		break;
   4181 	case WM_T_82575:
   4182 	case WM_T_82576:
   4183 	case WM_T_82580:
   4184 	case WM_T_I350:
   4185 	case WM_T_I354:
   4186 	case WM_T_I210:
   4187 	case WM_T_I211:
   4188 	case WM_T_80003:
   4189 		/* check EECD_EE_AUTORD */
   4190 		wm_get_auto_rd_done(sc);
   4191 		break;
   4192 	case WM_T_ICH8:
   4193 	case WM_T_ICH9:
   4194 	case WM_T_ICH10:
   4195 	case WM_T_PCH:
   4196 	case WM_T_PCH2:
   4197 	case WM_T_PCH_LPT:
   4198 	case WM_T_PCH_SPT:
   4199 		break;
   4200 	default:
   4201 		panic("%s: unknown type\n", __func__);
   4202 	}
   4203 
   4204 	/* Check whether EEPROM is present or not */
   4205 	switch (sc->sc_type) {
   4206 	case WM_T_82575:
   4207 	case WM_T_82576:
   4208 	case WM_T_82580:
   4209 	case WM_T_I350:
   4210 	case WM_T_I354:
   4211 	case WM_T_ICH8:
   4212 	case WM_T_ICH9:
   4213 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4214 			/* Not found */
   4215 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4216 			if (sc->sc_type == WM_T_82575)
   4217 				wm_reset_init_script_82575(sc);
   4218 		}
   4219 		break;
   4220 	default:
   4221 		break;
   4222 	}
   4223 
   4224 	if ((sc->sc_type == WM_T_82580)
   4225 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4226 		/* clear global device reset status bit */
   4227 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4228 	}
   4229 
   4230 	/* Clear any pending interrupt events. */
   4231 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4232 	reg = CSR_READ(sc, WMREG_ICR);
   4233 	if (sc->sc_nintrs > 1) {
   4234 		if (sc->sc_type != WM_T_82574) {
   4235 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4236 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4237 		} else
   4238 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4239 	}
   4240 
   4241 	/* reload sc_ctrl */
   4242 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4243 
   4244 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4245 		wm_set_eee_i350(sc);
   4246 
   4247 	/* Clear the host wakeup bit after lcd reset */
   4248 	if (sc->sc_type >= WM_T_PCH) {
   4249 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4250 		    BM_PORT_GEN_CFG);
   4251 		reg &= ~BM_WUC_HOST_WU_BIT;
   4252 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4253 		    BM_PORT_GEN_CFG, reg);
   4254 	}
   4255 
   4256 	/*
   4257 	 * For PCH, this write will make sure that any noise will be detected
   4258 	 * as a CRC error and be dropped rather than show up as a bad packet
   4259 	 * to the DMA engine
   4260 	 */
   4261 	if (sc->sc_type == WM_T_PCH)
   4262 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4263 
   4264 	if (sc->sc_type >= WM_T_82544)
   4265 		CSR_WRITE(sc, WMREG_WUC, 0);
   4266 
   4267 	wm_reset_mdicnfg_82580(sc);
   4268 
   4269 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4270 		wm_pll_workaround_i210(sc);
   4271 }
   4272 
   4273 /*
   4274  * wm_add_rxbuf:
   4275  *
   4276  *	Add a receive buffer to the indiciated descriptor.
   4277  */
   4278 static int
   4279 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4280 {
   4281 	struct wm_softc *sc = rxq->rxq_sc;
   4282 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4283 	struct mbuf *m;
   4284 	int error;
   4285 
   4286 	KASSERT(mutex_owned(rxq->rxq_lock));
   4287 
   4288 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4289 	if (m == NULL)
   4290 		return ENOBUFS;
   4291 
   4292 	MCLGET(m, M_DONTWAIT);
   4293 	if ((m->m_flags & M_EXT) == 0) {
   4294 		m_freem(m);
   4295 		return ENOBUFS;
   4296 	}
   4297 
   4298 	if (rxs->rxs_mbuf != NULL)
   4299 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4300 
   4301 	rxs->rxs_mbuf = m;
   4302 
   4303 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4304 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4305 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4306 	if (error) {
   4307 		/* XXX XXX XXX */
   4308 		aprint_error_dev(sc->sc_dev,
   4309 		    "unable to load rx DMA map %d, error = %d\n",
   4310 		    idx, error);
   4311 		panic("wm_add_rxbuf");
   4312 	}
   4313 
   4314 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4315 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4316 
   4317 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4318 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4319 			wm_init_rxdesc(rxq, idx);
   4320 	} else
   4321 		wm_init_rxdesc(rxq, idx);
   4322 
   4323 	return 0;
   4324 }
   4325 
   4326 /*
   4327  * wm_rxdrain:
   4328  *
   4329  *	Drain the receive queue.
   4330  */
   4331 static void
   4332 wm_rxdrain(struct wm_rxqueue *rxq)
   4333 {
   4334 	struct wm_softc *sc = rxq->rxq_sc;
   4335 	struct wm_rxsoft *rxs;
   4336 	int i;
   4337 
   4338 	KASSERT(mutex_owned(rxq->rxq_lock));
   4339 
   4340 	for (i = 0; i < WM_NRXDESC; i++) {
   4341 		rxs = &rxq->rxq_soft[i];
   4342 		if (rxs->rxs_mbuf != NULL) {
   4343 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4344 			m_freem(rxs->rxs_mbuf);
   4345 			rxs->rxs_mbuf = NULL;
   4346 		}
   4347 	}
   4348 }
   4349 
   4350 
   4351 /*
   4352  * XXX copy from FreeBSD's sys/net/rss_config.c
   4353  */
   4354 /*
   4355  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4356  * effectiveness may be limited by algorithm choice and available entropy
   4357  * during the boot.
   4358  *
   4359  * XXXRW: And that we don't randomize it yet!
   4360  *
   4361  * This is the default Microsoft RSS specification key which is also
   4362  * the Chelsio T5 firmware default key.
   4363  */
   4364 #define RSS_KEYSIZE 40
   4365 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4366 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4367 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4368 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4369 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4370 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4371 };
   4372 
   4373 /*
   4374  * Caller must pass an array of size sizeof(rss_key).
   4375  *
   4376  * XXX
   4377  * As if_ixgbe may use this function, this function should not be
   4378  * if_wm specific function.
   4379  */
   4380 static void
   4381 wm_rss_getkey(uint8_t *key)
   4382 {
   4383 
   4384 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4385 }
   4386 
   4387 /*
   4388  * Setup registers for RSS.
   4389  *
   4390  * XXX not yet VMDq support
   4391  */
   4392 static void
   4393 wm_init_rss(struct wm_softc *sc)
   4394 {
   4395 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4396 	int i;
   4397 
   4398 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4399 
   4400 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4401 		int qid, reta_ent;
   4402 
   4403 		qid  = i % sc->sc_nqueues;
   4404 		switch(sc->sc_type) {
   4405 		case WM_T_82574:
   4406 			reta_ent = __SHIFTIN(qid,
   4407 			    RETA_ENT_QINDEX_MASK_82574);
   4408 			break;
   4409 		case WM_T_82575:
   4410 			reta_ent = __SHIFTIN(qid,
   4411 			    RETA_ENT_QINDEX1_MASK_82575);
   4412 			break;
   4413 		default:
   4414 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4415 			break;
   4416 		}
   4417 
   4418 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4419 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4420 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4421 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4422 	}
   4423 
   4424 	wm_rss_getkey((uint8_t *)rss_key);
   4425 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4426 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4427 
   4428 	if (sc->sc_type == WM_T_82574)
   4429 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4430 	else
   4431 		mrqc = MRQC_ENABLE_RSS_MQ;
   4432 
   4433 	/*
   4434 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4435 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4436 	 */
   4437 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4438 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4439 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4440 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4441 
   4442 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4443 }
   4444 
   4445 /*
   4446  * Adjust TX and RX queue numbers which the system actulally uses.
   4447  *
   4448  * The numbers are affected by below parameters.
   4449  *     - The nubmer of hardware queues
   4450  *     - The number of MSI-X vectors (= "nvectors" argument)
   4451  *     - ncpu
   4452  */
   4453 static void
   4454 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4455 {
   4456 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4457 
   4458 	if (nvectors < 2) {
   4459 		sc->sc_nqueues = 1;
   4460 		return;
   4461 	}
   4462 
   4463 	switch(sc->sc_type) {
   4464 	case WM_T_82572:
   4465 		hw_ntxqueues = 2;
   4466 		hw_nrxqueues = 2;
   4467 		break;
   4468 	case WM_T_82574:
   4469 		hw_ntxqueues = 2;
   4470 		hw_nrxqueues = 2;
   4471 		break;
   4472 	case WM_T_82575:
   4473 		hw_ntxqueues = 4;
   4474 		hw_nrxqueues = 4;
   4475 		break;
   4476 	case WM_T_82576:
   4477 		hw_ntxqueues = 16;
   4478 		hw_nrxqueues = 16;
   4479 		break;
   4480 	case WM_T_82580:
   4481 	case WM_T_I350:
   4482 	case WM_T_I354:
   4483 		hw_ntxqueues = 8;
   4484 		hw_nrxqueues = 8;
   4485 		break;
   4486 	case WM_T_I210:
   4487 		hw_ntxqueues = 4;
   4488 		hw_nrxqueues = 4;
   4489 		break;
   4490 	case WM_T_I211:
   4491 		hw_ntxqueues = 2;
   4492 		hw_nrxqueues = 2;
   4493 		break;
   4494 		/*
   4495 		 * As below ethernet controllers does not support MSI-X,
   4496 		 * this driver let them not use multiqueue.
   4497 		 *     - WM_T_80003
   4498 		 *     - WM_T_ICH8
   4499 		 *     - WM_T_ICH9
   4500 		 *     - WM_T_ICH10
   4501 		 *     - WM_T_PCH
   4502 		 *     - WM_T_PCH2
   4503 		 *     - WM_T_PCH_LPT
   4504 		 */
   4505 	default:
   4506 		hw_ntxqueues = 1;
   4507 		hw_nrxqueues = 1;
   4508 		break;
   4509 	}
   4510 
   4511 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4512 
   4513 	/*
   4514 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4515 	 * the number of queues used actually.
   4516 	 */
   4517 	if (nvectors < hw_nqueues + 1) {
   4518 		sc->sc_nqueues = nvectors - 1;
   4519 	} else {
   4520 		sc->sc_nqueues = hw_nqueues;
   4521 	}
   4522 
   4523 	/*
   4524 	 * As queues more then cpus cannot improve scaling, we limit
   4525 	 * the number of queues used actually.
   4526 	 */
   4527 	if (ncpu < sc->sc_nqueues)
   4528 		sc->sc_nqueues = ncpu;
   4529 }
   4530 
   4531 /*
   4532  * Both single interrupt MSI and INTx can use this function.
   4533  */
   4534 static int
   4535 wm_setup_legacy(struct wm_softc *sc)
   4536 {
   4537 	pci_chipset_tag_t pc = sc->sc_pc;
   4538 	const char *intrstr = NULL;
   4539 	char intrbuf[PCI_INTRSTR_LEN];
   4540 	int error;
   4541 
   4542 	error = wm_alloc_txrx_queues(sc);
   4543 	if (error) {
   4544 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4545 		    error);
   4546 		return ENOMEM;
   4547 	}
   4548 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4549 	    sizeof(intrbuf));
   4550 #ifdef WM_MPSAFE
   4551 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4552 #endif
   4553 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4554 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4555 	if (sc->sc_ihs[0] == NULL) {
   4556 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4557 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4558 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4559 		return ENOMEM;
   4560 	}
   4561 
   4562 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4563 	sc->sc_nintrs = 1;
   4564 	return 0;
   4565 }
   4566 
   4567 static int
   4568 wm_setup_msix(struct wm_softc *sc)
   4569 {
   4570 	void *vih;
   4571 	kcpuset_t *affinity;
   4572 	int qidx, error, intr_idx, txrx_established;
   4573 	pci_chipset_tag_t pc = sc->sc_pc;
   4574 	const char *intrstr = NULL;
   4575 	char intrbuf[PCI_INTRSTR_LEN];
   4576 	char intr_xname[INTRDEVNAMEBUF];
   4577 
   4578 	if (sc->sc_nqueues < ncpu) {
   4579 		/*
   4580 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4581 		 * interrupts start from CPU#1.
   4582 		 */
   4583 		sc->sc_affinity_offset = 1;
   4584 	} else {
   4585 		/*
   4586 		 * In this case, this device use all CPUs. So, we unify
   4587 		 * affinitied cpu_index to msix vector number for readability.
   4588 		 */
   4589 		sc->sc_affinity_offset = 0;
   4590 	}
   4591 
   4592 	error = wm_alloc_txrx_queues(sc);
   4593 	if (error) {
   4594 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4595 		    error);
   4596 		return ENOMEM;
   4597 	}
   4598 
   4599 	kcpuset_create(&affinity, false);
   4600 	intr_idx = 0;
   4601 
   4602 	/*
   4603 	 * TX and RX
   4604 	 */
   4605 	txrx_established = 0;
   4606 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4607 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4608 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4609 
   4610 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4611 		    sizeof(intrbuf));
   4612 #ifdef WM_MPSAFE
   4613 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4614 		    PCI_INTR_MPSAFE, true);
   4615 #endif
   4616 		memset(intr_xname, 0, sizeof(intr_xname));
   4617 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4618 		    device_xname(sc->sc_dev), qidx);
   4619 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4620 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4621 		if (vih == NULL) {
   4622 			aprint_error_dev(sc->sc_dev,
   4623 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4624 			    intrstr ? " at " : "",
   4625 			    intrstr ? intrstr : "");
   4626 
   4627 			goto fail;
   4628 		}
   4629 		kcpuset_zero(affinity);
   4630 		/* Round-robin affinity */
   4631 		kcpuset_set(affinity, affinity_to);
   4632 		error = interrupt_distribute(vih, affinity, NULL);
   4633 		if (error == 0) {
   4634 			aprint_normal_dev(sc->sc_dev,
   4635 			    "for TX and RX interrupting at %s affinity to %u\n",
   4636 			    intrstr, affinity_to);
   4637 		} else {
   4638 			aprint_normal_dev(sc->sc_dev,
   4639 			    "for TX and RX interrupting at %s\n", intrstr);
   4640 		}
   4641 		sc->sc_ihs[intr_idx] = vih;
   4642 		wmq->wmq_id= qidx;
   4643 		wmq->wmq_intr_idx = intr_idx;
   4644 
   4645 		txrx_established++;
   4646 		intr_idx++;
   4647 	}
   4648 
   4649 	/*
   4650 	 * LINK
   4651 	 */
   4652 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4653 	    sizeof(intrbuf));
   4654 #ifdef WM_MPSAFE
   4655 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4656 #endif
   4657 	memset(intr_xname, 0, sizeof(intr_xname));
   4658 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4659 	    device_xname(sc->sc_dev));
   4660 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4661 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4662 	if (vih == NULL) {
   4663 		aprint_error_dev(sc->sc_dev,
   4664 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4665 		    intrstr ? " at " : "",
   4666 		    intrstr ? intrstr : "");
   4667 
   4668 		goto fail;
   4669 	}
   4670 	/* keep default affinity to LINK interrupt */
   4671 	aprint_normal_dev(sc->sc_dev,
   4672 	    "for LINK interrupting at %s\n", intrstr);
   4673 	sc->sc_ihs[intr_idx] = vih;
   4674 	sc->sc_link_intr_idx = intr_idx;
   4675 
   4676 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4677 	kcpuset_destroy(affinity);
   4678 	return 0;
   4679 
   4680  fail:
   4681 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4682 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4683 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4684 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4685 	}
   4686 
   4687 	kcpuset_destroy(affinity);
   4688 	return ENOMEM;
   4689 }
   4690 
   4691 static void
   4692 wm_turnon(struct wm_softc *sc)
   4693 {
   4694 	int i;
   4695 
   4696 	KASSERT(WM_CORE_LOCKED(sc));
   4697 
   4698 	for(i = 0; i < sc->sc_nqueues; i++) {
   4699 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4700 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4701 
   4702 		mutex_enter(txq->txq_lock);
   4703 		txq->txq_stopping = false;
   4704 		mutex_exit(txq->txq_lock);
   4705 
   4706 		mutex_enter(rxq->rxq_lock);
   4707 		rxq->rxq_stopping = false;
   4708 		mutex_exit(rxq->rxq_lock);
   4709 	}
   4710 
   4711 	sc->sc_core_stopping = false;
   4712 }
   4713 
   4714 static void
   4715 wm_turnoff(struct wm_softc *sc)
   4716 {
   4717 	int i;
   4718 
   4719 	KASSERT(WM_CORE_LOCKED(sc));
   4720 
   4721 	sc->sc_core_stopping = true;
   4722 
   4723 	for(i = 0; i < sc->sc_nqueues; i++) {
   4724 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4725 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4726 
   4727 		mutex_enter(rxq->rxq_lock);
   4728 		rxq->rxq_stopping = true;
   4729 		mutex_exit(rxq->rxq_lock);
   4730 
   4731 		mutex_enter(txq->txq_lock);
   4732 		txq->txq_stopping = true;
   4733 		mutex_exit(txq->txq_lock);
   4734 	}
   4735 }
   4736 
   4737 /*
   4738  * wm_init:		[ifnet interface function]
   4739  *
   4740  *	Initialize the interface.
   4741  */
   4742 static int
   4743 wm_init(struct ifnet *ifp)
   4744 {
   4745 	struct wm_softc *sc = ifp->if_softc;
   4746 	int ret;
   4747 
   4748 	WM_CORE_LOCK(sc);
   4749 	ret = wm_init_locked(ifp);
   4750 	WM_CORE_UNLOCK(sc);
   4751 
   4752 	return ret;
   4753 }
   4754 
   4755 static int
   4756 wm_init_locked(struct ifnet *ifp)
   4757 {
   4758 	struct wm_softc *sc = ifp->if_softc;
   4759 	int i, j, trynum, error = 0;
   4760 	uint32_t reg;
   4761 
   4762 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4763 		device_xname(sc->sc_dev), __func__));
   4764 	KASSERT(WM_CORE_LOCKED(sc));
   4765 
   4766 	/*
   4767 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4768 	 * There is a small but measurable benefit to avoiding the adjusment
   4769 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4770 	 * on such platforms.  One possibility is that the DMA itself is
   4771 	 * slightly more efficient if the front of the entire packet (instead
   4772 	 * of the front of the headers) is aligned.
   4773 	 *
   4774 	 * Note we must always set align_tweak to 0 if we are using
   4775 	 * jumbo frames.
   4776 	 */
   4777 #ifdef __NO_STRICT_ALIGNMENT
   4778 	sc->sc_align_tweak = 0;
   4779 #else
   4780 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4781 		sc->sc_align_tweak = 0;
   4782 	else
   4783 		sc->sc_align_tweak = 2;
   4784 #endif /* __NO_STRICT_ALIGNMENT */
   4785 
   4786 	/* Cancel any pending I/O. */
   4787 	wm_stop_locked(ifp, 0);
   4788 
   4789 	/* update statistics before reset */
   4790 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4791 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4792 
   4793 	/* PCH_SPT hardware workaround */
   4794 	if (sc->sc_type == WM_T_PCH_SPT)
   4795 		wm_flush_desc_rings(sc);
   4796 
   4797 	/* Reset the chip to a known state. */
   4798 	wm_reset(sc);
   4799 
   4800 	/* AMT based hardware can now take control from firmware */
   4801 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4802 		wm_get_hw_control(sc);
   4803 
   4804 	/* Init hardware bits */
   4805 	wm_initialize_hardware_bits(sc);
   4806 
   4807 	/* Reset the PHY. */
   4808 	if (sc->sc_flags & WM_F_HAS_MII)
   4809 		wm_gmii_reset(sc);
   4810 
   4811 	/* Calculate (E)ITR value */
   4812 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4813 		sc->sc_itr = 450;	/* For EITR */
   4814 	} else if (sc->sc_type >= WM_T_82543) {
   4815 		/*
   4816 		 * Set up the interrupt throttling register (units of 256ns)
   4817 		 * Note that a footnote in Intel's documentation says this
   4818 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4819 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4820 		 * that that is also true for the 1024ns units of the other
   4821 		 * interrupt-related timer registers -- so, really, we ought
   4822 		 * to divide this value by 4 when the link speed is low.
   4823 		 *
   4824 		 * XXX implement this division at link speed change!
   4825 		 */
   4826 
   4827 		/*
   4828 		 * For N interrupts/sec, set this value to:
   4829 		 * 1000000000 / (N * 256).  Note that we set the
   4830 		 * absolute and packet timer values to this value
   4831 		 * divided by 4 to get "simple timer" behavior.
   4832 		 */
   4833 
   4834 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4835 	}
   4836 
   4837 	error = wm_init_txrx_queues(sc);
   4838 	if (error)
   4839 		goto out;
   4840 
   4841 	/*
   4842 	 * Clear out the VLAN table -- we don't use it (yet).
   4843 	 */
   4844 	CSR_WRITE(sc, WMREG_VET, 0);
   4845 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4846 		trynum = 10; /* Due to hw errata */
   4847 	else
   4848 		trynum = 1;
   4849 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4850 		for (j = 0; j < trynum; j++)
   4851 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4852 
   4853 	/*
   4854 	 * Set up flow-control parameters.
   4855 	 *
   4856 	 * XXX Values could probably stand some tuning.
   4857 	 */
   4858 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4859 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4860 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4861 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4862 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4863 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4864 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4865 	}
   4866 
   4867 	sc->sc_fcrtl = FCRTL_DFLT;
   4868 	if (sc->sc_type < WM_T_82543) {
   4869 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4870 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4871 	} else {
   4872 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4873 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4874 	}
   4875 
   4876 	if (sc->sc_type == WM_T_80003)
   4877 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4878 	else
   4879 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4880 
   4881 	/* Writes the control register. */
   4882 	wm_set_vlan(sc);
   4883 
   4884 	if (sc->sc_flags & WM_F_HAS_MII) {
   4885 		int val;
   4886 
   4887 		switch (sc->sc_type) {
   4888 		case WM_T_80003:
   4889 		case WM_T_ICH8:
   4890 		case WM_T_ICH9:
   4891 		case WM_T_ICH10:
   4892 		case WM_T_PCH:
   4893 		case WM_T_PCH2:
   4894 		case WM_T_PCH_LPT:
   4895 		case WM_T_PCH_SPT:
   4896 			/*
   4897 			 * Set the mac to wait the maximum time between each
   4898 			 * iteration and increase the max iterations when
   4899 			 * polling the phy; this fixes erroneous timeouts at
   4900 			 * 10Mbps.
   4901 			 */
   4902 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4903 			    0xFFFF);
   4904 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4905 			val |= 0x3F;
   4906 			wm_kmrn_writereg(sc,
   4907 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4908 			break;
   4909 		default:
   4910 			break;
   4911 		}
   4912 
   4913 		if (sc->sc_type == WM_T_80003) {
   4914 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4915 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4916 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4917 
   4918 			/* Bypass RX and TX FIFO's */
   4919 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4920 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4921 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4922 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4923 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4924 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4925 		}
   4926 	}
   4927 #if 0
   4928 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4929 #endif
   4930 
   4931 	/* Set up checksum offload parameters. */
   4932 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4933 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4934 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4935 		reg |= RXCSUM_IPOFL;
   4936 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4937 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4938 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4939 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4940 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4941 
   4942 	/* Set up MSI-X */
   4943 	if (sc->sc_nintrs > 1) {
   4944 		uint32_t ivar;
   4945 		struct wm_queue *wmq;
   4946 		int qid, qintr_idx;
   4947 
   4948 		if (sc->sc_type == WM_T_82575) {
   4949 			/* Interrupt control */
   4950 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4951 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4952 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4953 
   4954 			/* TX and RX */
   4955 			for (i = 0; i < sc->sc_nqueues; i++) {
   4956 				wmq = &sc->sc_queue[i];
   4957 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   4958 				    EITR_TX_QUEUE(wmq->wmq_id)
   4959 				    | EITR_RX_QUEUE(wmq->wmq_id));
   4960 			}
   4961 			/* Link status */
   4962 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4963 			    EITR_OTHER);
   4964 		} else if (sc->sc_type == WM_T_82574) {
   4965 			/* Interrupt control */
   4966 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4967 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4968 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4969 
   4970 			ivar = 0;
   4971 			/* TX and RX */
   4972 			for (i = 0; i < sc->sc_nqueues; i++) {
   4973 				wmq = &sc->sc_queue[i];
   4974 				qid = wmq->wmq_id;
   4975 				qintr_idx = wmq->wmq_intr_idx;
   4976 
   4977 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4978 				    IVAR_TX_MASK_Q_82574(qid));
   4979 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4980 				    IVAR_RX_MASK_Q_82574(qid));
   4981 			}
   4982 			/* Link status */
   4983 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4984 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   4985 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4986 		} else {
   4987 			/* Interrupt control */
   4988 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   4989 			    | GPIE_EIAME | GPIE_PBA);
   4990 
   4991 			switch (sc->sc_type) {
   4992 			case WM_T_82580:
   4993 			case WM_T_I350:
   4994 			case WM_T_I354:
   4995 			case WM_T_I210:
   4996 			case WM_T_I211:
   4997 				/* TX and RX */
   4998 				for (i = 0; i < sc->sc_nqueues; i++) {
   4999 					wmq = &sc->sc_queue[i];
   5000 					qid = wmq->wmq_id;
   5001 					qintr_idx = wmq->wmq_intr_idx;
   5002 
   5003 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5004 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5005 					ivar |= __SHIFTIN((qintr_idx
   5006 						| IVAR_VALID),
   5007 					    IVAR_TX_MASK_Q(qid));
   5008 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5009 					ivar |= __SHIFTIN((qintr_idx
   5010 						| IVAR_VALID),
   5011 					    IVAR_RX_MASK_Q(qid));
   5012 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5013 				}
   5014 				break;
   5015 			case WM_T_82576:
   5016 				/* TX and RX */
   5017 				for (i = 0; i < sc->sc_nqueues; i++) {
   5018 					wmq = &sc->sc_queue[i];
   5019 					qid = wmq->wmq_id;
   5020 					qintr_idx = wmq->wmq_intr_idx;
   5021 
   5022 					ivar = CSR_READ(sc,
   5023 					    WMREG_IVAR_Q_82576(qid));
   5024 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5025 					ivar |= __SHIFTIN((qintr_idx
   5026 						| IVAR_VALID),
   5027 					    IVAR_TX_MASK_Q_82576(qid));
   5028 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5029 					ivar |= __SHIFTIN((qintr_idx
   5030 						| IVAR_VALID),
   5031 					    IVAR_RX_MASK_Q_82576(qid));
   5032 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5033 					    ivar);
   5034 				}
   5035 				break;
   5036 			default:
   5037 				break;
   5038 			}
   5039 
   5040 			/* Link status */
   5041 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5042 			    IVAR_MISC_OTHER);
   5043 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5044 		}
   5045 
   5046 		if (sc->sc_nqueues > 1) {
   5047 			wm_init_rss(sc);
   5048 
   5049 			/*
   5050 			** NOTE: Receive Full-Packet Checksum Offload
   5051 			** is mutually exclusive with Multiqueue. However
   5052 			** this is not the same as TCP/IP checksums which
   5053 			** still work.
   5054 			*/
   5055 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5056 			reg |= RXCSUM_PCSD;
   5057 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5058 		}
   5059 	}
   5060 
   5061 	/* Set up the interrupt registers. */
   5062 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5063 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5064 	    ICR_RXO | ICR_RXT0;
   5065 	if (sc->sc_nintrs > 1) {
   5066 		uint32_t mask;
   5067 		struct wm_queue *wmq;
   5068 
   5069 		switch (sc->sc_type) {
   5070 		case WM_T_82574:
   5071 			CSR_WRITE(sc, WMREG_EIAC_82574,
   5072 			    WMREG_EIAC_82574_MSIX_MASK);
   5073 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   5074 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5075 			break;
   5076 		default:
   5077 			if (sc->sc_type == WM_T_82575) {
   5078 				mask = 0;
   5079 				for (i = 0; i < sc->sc_nqueues; i++) {
   5080 					wmq = &sc->sc_queue[i];
   5081 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5082 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5083 				}
   5084 				mask |= EITR_OTHER;
   5085 			} else {
   5086 				mask = 0;
   5087 				for (i = 0; i < sc->sc_nqueues; i++) {
   5088 					wmq = &sc->sc_queue[i];
   5089 					mask |= 1 << wmq->wmq_intr_idx;
   5090 				}
   5091 				mask |= 1 << sc->sc_link_intr_idx;
   5092 			}
   5093 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5094 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5095 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5096 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5097 			break;
   5098 		}
   5099 	} else
   5100 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5101 
   5102 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5103 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5104 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5105 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   5106 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5107 		reg |= KABGTXD_BGSQLBIAS;
   5108 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5109 	}
   5110 
   5111 	/* Set up the inter-packet gap. */
   5112 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5113 
   5114 	if (sc->sc_type >= WM_T_82543) {
   5115 		/*
   5116 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   5117 		 * the multi queue function with MSI-X.
   5118 		 */
   5119 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5120 			int qidx;
   5121 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5122 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   5123 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
   5124 				    sc->sc_itr);
   5125 			}
   5126 			/*
   5127 			 * Link interrupts occur much less than TX
   5128 			 * interrupts and RX interrupts. So, we don't
   5129 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5130 			 * FreeBSD's if_igb.
   5131 			 */
   5132 		} else
   5133 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   5134 	}
   5135 
   5136 	/* Set the VLAN ethernetype. */
   5137 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5138 
   5139 	/*
   5140 	 * Set up the transmit control register; we start out with
   5141 	 * a collision distance suitable for FDX, but update it whe
   5142 	 * we resolve the media type.
   5143 	 */
   5144 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5145 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5146 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5147 	if (sc->sc_type >= WM_T_82571)
   5148 		sc->sc_tctl |= TCTL_MULR;
   5149 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5150 
   5151 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5152 		/* Write TDT after TCTL.EN is set. See the document. */
   5153 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5154 	}
   5155 
   5156 	if (sc->sc_type == WM_T_80003) {
   5157 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5158 		reg &= ~TCTL_EXT_GCEX_MASK;
   5159 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5160 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5161 	}
   5162 
   5163 	/* Set the media. */
   5164 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5165 		goto out;
   5166 
   5167 	/* Configure for OS presence */
   5168 	wm_init_manageability(sc);
   5169 
   5170 	/*
   5171 	 * Set up the receive control register; we actually program
   5172 	 * the register when we set the receive filter.  Use multicast
   5173 	 * address offset type 0.
   5174 	 *
   5175 	 * Only the i82544 has the ability to strip the incoming
   5176 	 * CRC, so we don't enable that feature.
   5177 	 */
   5178 	sc->sc_mchash_type = 0;
   5179 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5180 	    | RCTL_MO(sc->sc_mchash_type);
   5181 
   5182 	/*
   5183 	 * The I350 has a bug where it always strips the CRC whether
   5184 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5185 	 */
   5186 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5187 	    || (sc->sc_type == WM_T_I210))
   5188 		sc->sc_rctl |= RCTL_SECRC;
   5189 
   5190 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5191 	    && (ifp->if_mtu > ETHERMTU)) {
   5192 		sc->sc_rctl |= RCTL_LPE;
   5193 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5194 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5195 	}
   5196 
   5197 	if (MCLBYTES == 2048) {
   5198 		sc->sc_rctl |= RCTL_2k;
   5199 	} else {
   5200 		if (sc->sc_type >= WM_T_82543) {
   5201 			switch (MCLBYTES) {
   5202 			case 4096:
   5203 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5204 				break;
   5205 			case 8192:
   5206 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5207 				break;
   5208 			case 16384:
   5209 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5210 				break;
   5211 			default:
   5212 				panic("wm_init: MCLBYTES %d unsupported",
   5213 				    MCLBYTES);
   5214 				break;
   5215 			}
   5216 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5217 	}
   5218 
   5219 	/* Set the receive filter. */
   5220 	wm_set_filter(sc);
   5221 
   5222 	/* Enable ECC */
   5223 	switch (sc->sc_type) {
   5224 	case WM_T_82571:
   5225 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5226 		reg |= PBA_ECC_CORR_EN;
   5227 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5228 		break;
   5229 	case WM_T_PCH_LPT:
   5230 	case WM_T_PCH_SPT:
   5231 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5232 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5233 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5234 
   5235 		sc->sc_ctrl |= CTRL_MEHE;
   5236 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5237 		break;
   5238 	default:
   5239 		break;
   5240 	}
   5241 
   5242 	/* On 575 and later set RDT only if RX enabled */
   5243 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5244 		int qidx;
   5245 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5246 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5247 			for (i = 0; i < WM_NRXDESC; i++) {
   5248 				mutex_enter(rxq->rxq_lock);
   5249 				wm_init_rxdesc(rxq, i);
   5250 				mutex_exit(rxq->rxq_lock);
   5251 
   5252 			}
   5253 		}
   5254 	}
   5255 
   5256 	wm_turnon(sc);
   5257 
   5258 	/* Start the one second link check clock. */
   5259 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5260 
   5261 	/* ...all done! */
   5262 	ifp->if_flags |= IFF_RUNNING;
   5263 	ifp->if_flags &= ~IFF_OACTIVE;
   5264 
   5265  out:
   5266 	sc->sc_if_flags = ifp->if_flags;
   5267 	if (error)
   5268 		log(LOG_ERR, "%s: interface not running\n",
   5269 		    device_xname(sc->sc_dev));
   5270 	return error;
   5271 }
   5272 
   5273 /*
   5274  * wm_stop:		[ifnet interface function]
   5275  *
   5276  *	Stop transmission on the interface.
   5277  */
   5278 static void
   5279 wm_stop(struct ifnet *ifp, int disable)
   5280 {
   5281 	struct wm_softc *sc = ifp->if_softc;
   5282 
   5283 	WM_CORE_LOCK(sc);
   5284 	wm_stop_locked(ifp, disable);
   5285 	WM_CORE_UNLOCK(sc);
   5286 }
   5287 
   5288 static void
   5289 wm_stop_locked(struct ifnet *ifp, int disable)
   5290 {
   5291 	struct wm_softc *sc = ifp->if_softc;
   5292 	struct wm_txsoft *txs;
   5293 	int i, qidx;
   5294 
   5295 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5296 		device_xname(sc->sc_dev), __func__));
   5297 	KASSERT(WM_CORE_LOCKED(sc));
   5298 
   5299 	wm_turnoff(sc);
   5300 
   5301 	/* Stop the one second clock. */
   5302 	callout_stop(&sc->sc_tick_ch);
   5303 
   5304 	/* Stop the 82547 Tx FIFO stall check timer. */
   5305 	if (sc->sc_type == WM_T_82547)
   5306 		callout_stop(&sc->sc_txfifo_ch);
   5307 
   5308 	if (sc->sc_flags & WM_F_HAS_MII) {
   5309 		/* Down the MII. */
   5310 		mii_down(&sc->sc_mii);
   5311 	} else {
   5312 #if 0
   5313 		/* Should we clear PHY's status properly? */
   5314 		wm_reset(sc);
   5315 #endif
   5316 	}
   5317 
   5318 	/* Stop the transmit and receive processes. */
   5319 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5320 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5321 	sc->sc_rctl &= ~RCTL_EN;
   5322 
   5323 	/*
   5324 	 * Clear the interrupt mask to ensure the device cannot assert its
   5325 	 * interrupt line.
   5326 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5327 	 * service any currently pending or shared interrupt.
   5328 	 */
   5329 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5330 	sc->sc_icr = 0;
   5331 	if (sc->sc_nintrs > 1) {
   5332 		if (sc->sc_type != WM_T_82574) {
   5333 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5334 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5335 		} else
   5336 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5337 	}
   5338 
   5339 	/* Release any queued transmit buffers. */
   5340 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5341 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5342 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5343 		mutex_enter(txq->txq_lock);
   5344 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5345 			txs = &txq->txq_soft[i];
   5346 			if (txs->txs_mbuf != NULL) {
   5347 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5348 				m_freem(txs->txs_mbuf);
   5349 				txs->txs_mbuf = NULL;
   5350 			}
   5351 		}
   5352 		mutex_exit(txq->txq_lock);
   5353 	}
   5354 
   5355 	/* Mark the interface as down and cancel the watchdog timer. */
   5356 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5357 	ifp->if_timer = 0;
   5358 
   5359 	if (disable) {
   5360 		for (i = 0; i < sc->sc_nqueues; i++) {
   5361 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5362 			mutex_enter(rxq->rxq_lock);
   5363 			wm_rxdrain(rxq);
   5364 			mutex_exit(rxq->rxq_lock);
   5365 		}
   5366 	}
   5367 
   5368 #if 0 /* notyet */
   5369 	if (sc->sc_type >= WM_T_82544)
   5370 		CSR_WRITE(sc, WMREG_WUC, 0);
   5371 #endif
   5372 }
   5373 
   5374 static void
   5375 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5376 {
   5377 	struct mbuf *m;
   5378 	int i;
   5379 
   5380 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5381 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5382 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5383 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5384 		    m->m_data, m->m_len, m->m_flags);
   5385 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5386 	    i, i == 1 ? "" : "s");
   5387 }
   5388 
   5389 /*
   5390  * wm_82547_txfifo_stall:
   5391  *
   5392  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5393  *	reset the FIFO pointers, and restart packet transmission.
   5394  */
   5395 static void
   5396 wm_82547_txfifo_stall(void *arg)
   5397 {
   5398 	struct wm_softc *sc = arg;
   5399 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5400 
   5401 	mutex_enter(txq->txq_lock);
   5402 
   5403 	if (txq->txq_stopping)
   5404 		goto out;
   5405 
   5406 	if (txq->txq_fifo_stall) {
   5407 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5408 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5409 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5410 			/*
   5411 			 * Packets have drained.  Stop transmitter, reset
   5412 			 * FIFO pointers, restart transmitter, and kick
   5413 			 * the packet queue.
   5414 			 */
   5415 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5416 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5417 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5418 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5419 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5420 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5421 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5422 			CSR_WRITE_FLUSH(sc);
   5423 
   5424 			txq->txq_fifo_head = 0;
   5425 			txq->txq_fifo_stall = 0;
   5426 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5427 		} else {
   5428 			/*
   5429 			 * Still waiting for packets to drain; try again in
   5430 			 * another tick.
   5431 			 */
   5432 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5433 		}
   5434 	}
   5435 
   5436 out:
   5437 	mutex_exit(txq->txq_lock);
   5438 }
   5439 
   5440 /*
   5441  * wm_82547_txfifo_bugchk:
   5442  *
   5443  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5444  *	prevent enqueueing a packet that would wrap around the end
   5445  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5446  *
   5447  *	We do this by checking the amount of space before the end
   5448  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5449  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5450  *	the internal FIFO pointers to the beginning, and restart
   5451  *	transmission on the interface.
   5452  */
   5453 #define	WM_FIFO_HDR		0x10
   5454 #define	WM_82547_PAD_LEN	0x3e0
   5455 static int
   5456 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5457 {
   5458 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5459 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5460 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5461 
   5462 	/* Just return if already stalled. */
   5463 	if (txq->txq_fifo_stall)
   5464 		return 1;
   5465 
   5466 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5467 		/* Stall only occurs in half-duplex mode. */
   5468 		goto send_packet;
   5469 	}
   5470 
   5471 	if (len >= WM_82547_PAD_LEN + space) {
   5472 		txq->txq_fifo_stall = 1;
   5473 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5474 		return 1;
   5475 	}
   5476 
   5477  send_packet:
   5478 	txq->txq_fifo_head += len;
   5479 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5480 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5481 
   5482 	return 0;
   5483 }
   5484 
   5485 static int
   5486 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5487 {
   5488 	int error;
   5489 
   5490 	/*
   5491 	 * Allocate the control data structures, and create and load the
   5492 	 * DMA map for it.
   5493 	 *
   5494 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5495 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5496 	 * both sets within the same 4G segment.
   5497 	 */
   5498 	if (sc->sc_type < WM_T_82544)
   5499 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5500 	else
   5501 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5502 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5503 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5504 	else
   5505 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5506 
   5507 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5508 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5509 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5510 		aprint_error_dev(sc->sc_dev,
   5511 		    "unable to allocate TX control data, error = %d\n",
   5512 		    error);
   5513 		goto fail_0;
   5514 	}
   5515 
   5516 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5517 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5518 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5519 		aprint_error_dev(sc->sc_dev,
   5520 		    "unable to map TX control data, error = %d\n", error);
   5521 		goto fail_1;
   5522 	}
   5523 
   5524 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5525 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5526 		aprint_error_dev(sc->sc_dev,
   5527 		    "unable to create TX control data DMA map, error = %d\n",
   5528 		    error);
   5529 		goto fail_2;
   5530 	}
   5531 
   5532 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5533 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5534 		aprint_error_dev(sc->sc_dev,
   5535 		    "unable to load TX control data DMA map, error = %d\n",
   5536 		    error);
   5537 		goto fail_3;
   5538 	}
   5539 
   5540 	return 0;
   5541 
   5542  fail_3:
   5543 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5544  fail_2:
   5545 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5546 	    WM_TXDESCS_SIZE(txq));
   5547  fail_1:
   5548 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5549  fail_0:
   5550 	return error;
   5551 }
   5552 
   5553 static void
   5554 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5555 {
   5556 
   5557 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5558 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5559 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5560 	    WM_TXDESCS_SIZE(txq));
   5561 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5562 }
   5563 
   5564 static int
   5565 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5566 {
   5567 	int error;
   5568 
   5569 	/*
   5570 	 * Allocate the control data structures, and create and load the
   5571 	 * DMA map for it.
   5572 	 *
   5573 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5574 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5575 	 * both sets within the same 4G segment.
   5576 	 */
   5577 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5578 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
   5579 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5580 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5581 		aprint_error_dev(sc->sc_dev,
   5582 		    "unable to allocate RX control data, error = %d\n",
   5583 		    error);
   5584 		goto fail_0;
   5585 	}
   5586 
   5587 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5588 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5589 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5590 		aprint_error_dev(sc->sc_dev,
   5591 		    "unable to map RX control data, error = %d\n", error);
   5592 		goto fail_1;
   5593 	}
   5594 
   5595 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5596 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5597 		aprint_error_dev(sc->sc_dev,
   5598 		    "unable to create RX control data DMA map, error = %d\n",
   5599 		    error);
   5600 		goto fail_2;
   5601 	}
   5602 
   5603 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5604 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5605 		aprint_error_dev(sc->sc_dev,
   5606 		    "unable to load RX control data DMA map, error = %d\n",
   5607 		    error);
   5608 		goto fail_3;
   5609 	}
   5610 
   5611 	return 0;
   5612 
   5613  fail_3:
   5614 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5615  fail_2:
   5616 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5617 	    rxq->rxq_desc_size);
   5618  fail_1:
   5619 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5620  fail_0:
   5621 	return error;
   5622 }
   5623 
   5624 static void
   5625 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5626 {
   5627 
   5628 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5629 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5630 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5631 	    rxq->rxq_desc_size);
   5632 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5633 }
   5634 
   5635 
   5636 static int
   5637 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5638 {
   5639 	int i, error;
   5640 
   5641 	/* Create the transmit buffer DMA maps. */
   5642 	WM_TXQUEUELEN(txq) =
   5643 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5644 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5645 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5646 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5647 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5648 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5649 			aprint_error_dev(sc->sc_dev,
   5650 			    "unable to create Tx DMA map %d, error = %d\n",
   5651 			    i, error);
   5652 			goto fail;
   5653 		}
   5654 	}
   5655 
   5656 	return 0;
   5657 
   5658  fail:
   5659 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5660 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5661 			bus_dmamap_destroy(sc->sc_dmat,
   5662 			    txq->txq_soft[i].txs_dmamap);
   5663 	}
   5664 	return error;
   5665 }
   5666 
   5667 static void
   5668 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5669 {
   5670 	int i;
   5671 
   5672 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5673 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5674 			bus_dmamap_destroy(sc->sc_dmat,
   5675 			    txq->txq_soft[i].txs_dmamap);
   5676 	}
   5677 }
   5678 
   5679 static int
   5680 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5681 {
   5682 	int i, error;
   5683 
   5684 	/* Create the receive buffer DMA maps. */
   5685 	for (i = 0; i < WM_NRXDESC; i++) {
   5686 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5687 			    MCLBYTES, 0, 0,
   5688 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5689 			aprint_error_dev(sc->sc_dev,
   5690 			    "unable to create Rx DMA map %d error = %d\n",
   5691 			    i, error);
   5692 			goto fail;
   5693 		}
   5694 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5695 	}
   5696 
   5697 	return 0;
   5698 
   5699  fail:
   5700 	for (i = 0; i < WM_NRXDESC; i++) {
   5701 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5702 			bus_dmamap_destroy(sc->sc_dmat,
   5703 			    rxq->rxq_soft[i].rxs_dmamap);
   5704 	}
   5705 	return error;
   5706 }
   5707 
   5708 static void
   5709 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5710 {
   5711 	int i;
   5712 
   5713 	for (i = 0; i < WM_NRXDESC; i++) {
   5714 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5715 			bus_dmamap_destroy(sc->sc_dmat,
   5716 			    rxq->rxq_soft[i].rxs_dmamap);
   5717 	}
   5718 }
   5719 
   5720 /*
   5721  * wm_alloc_quques:
   5722  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5723  */
   5724 static int
   5725 wm_alloc_txrx_queues(struct wm_softc *sc)
   5726 {
   5727 	int i, error, tx_done, rx_done;
   5728 
   5729 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5730 	    KM_SLEEP);
   5731 	if (sc->sc_queue == NULL) {
   5732 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5733 		error = ENOMEM;
   5734 		goto fail_0;
   5735 	}
   5736 
   5737 	/*
   5738 	 * For transmission
   5739 	 */
   5740 	error = 0;
   5741 	tx_done = 0;
   5742 	for (i = 0; i < sc->sc_nqueues; i++) {
   5743 #ifdef WM_EVENT_COUNTERS
   5744 		int j;
   5745 		const char *xname;
   5746 #endif
   5747 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5748 		txq->txq_sc = sc;
   5749 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5750 
   5751 		error = wm_alloc_tx_descs(sc, txq);
   5752 		if (error)
   5753 			break;
   5754 		error = wm_alloc_tx_buffer(sc, txq);
   5755 		if (error) {
   5756 			wm_free_tx_descs(sc, txq);
   5757 			break;
   5758 		}
   5759 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5760 		if (txq->txq_interq == NULL) {
   5761 			wm_free_tx_descs(sc, txq);
   5762 			wm_free_tx_buffer(sc, txq);
   5763 			error = ENOMEM;
   5764 			break;
   5765 		}
   5766 
   5767 #ifdef WM_EVENT_COUNTERS
   5768 		xname = device_xname(sc->sc_dev);
   5769 
   5770 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5771 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5772 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5773 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5774 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5775 
   5776 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5777 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5778 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5779 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5780 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5781 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5782 
   5783 		for (j = 0; j < WM_NTXSEGS; j++) {
   5784 			snprintf(txq->txq_txseg_evcnt_names[j],
   5785 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   5786 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   5787 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   5788 		}
   5789 
   5790 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   5791 
   5792 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   5793 #endif /* WM_EVENT_COUNTERS */
   5794 
   5795 		tx_done++;
   5796 	}
   5797 	if (error)
   5798 		goto fail_1;
   5799 
   5800 	/*
   5801 	 * For recieve
   5802 	 */
   5803 	error = 0;
   5804 	rx_done = 0;
   5805 	for (i = 0; i < sc->sc_nqueues; i++) {
   5806 #ifdef WM_EVENT_COUNTERS
   5807 		const char *xname;
   5808 #endif
   5809 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5810 		rxq->rxq_sc = sc;
   5811 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5812 
   5813 		error = wm_alloc_rx_descs(sc, rxq);
   5814 		if (error)
   5815 			break;
   5816 
   5817 		error = wm_alloc_rx_buffer(sc, rxq);
   5818 		if (error) {
   5819 			wm_free_rx_descs(sc, rxq);
   5820 			break;
   5821 		}
   5822 
   5823 #ifdef WM_EVENT_COUNTERS
   5824 		xname = device_xname(sc->sc_dev);
   5825 
   5826 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   5827 
   5828 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   5829 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   5830 #endif /* WM_EVENT_COUNTERS */
   5831 
   5832 		rx_done++;
   5833 	}
   5834 	if (error)
   5835 		goto fail_2;
   5836 
   5837 	return 0;
   5838 
   5839  fail_2:
   5840 	for (i = 0; i < rx_done; i++) {
   5841 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5842 		wm_free_rx_buffer(sc, rxq);
   5843 		wm_free_rx_descs(sc, rxq);
   5844 		if (rxq->rxq_lock)
   5845 			mutex_obj_free(rxq->rxq_lock);
   5846 	}
   5847  fail_1:
   5848 	for (i = 0; i < tx_done; i++) {
   5849 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5850 		pcq_destroy(txq->txq_interq);
   5851 		wm_free_tx_buffer(sc, txq);
   5852 		wm_free_tx_descs(sc, txq);
   5853 		if (txq->txq_lock)
   5854 			mutex_obj_free(txq->txq_lock);
   5855 	}
   5856 
   5857 	kmem_free(sc->sc_queue,
   5858 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   5859  fail_0:
   5860 	return error;
   5861 }
   5862 
   5863 /*
   5864  * wm_free_quques:
   5865  *	Free {tx,rx}descs and {tx,rx} buffers
   5866  */
   5867 static void
   5868 wm_free_txrx_queues(struct wm_softc *sc)
   5869 {
   5870 	int i;
   5871 
   5872 	for (i = 0; i < sc->sc_nqueues; i++) {
   5873 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5874 		wm_free_rx_buffer(sc, rxq);
   5875 		wm_free_rx_descs(sc, rxq);
   5876 		if (rxq->rxq_lock)
   5877 			mutex_obj_free(rxq->rxq_lock);
   5878 	}
   5879 
   5880 	for (i = 0; i < sc->sc_nqueues; i++) {
   5881 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5882 		wm_free_tx_buffer(sc, txq);
   5883 		wm_free_tx_descs(sc, txq);
   5884 		if (txq->txq_lock)
   5885 			mutex_obj_free(txq->txq_lock);
   5886 	}
   5887 
   5888 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   5889 }
   5890 
   5891 static void
   5892 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5893 {
   5894 
   5895 	KASSERT(mutex_owned(txq->txq_lock));
   5896 
   5897 	/* Initialize the transmit descriptor ring. */
   5898 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5899 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5900 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5901 	txq->txq_free = WM_NTXDESC(txq);
   5902 	txq->txq_next = 0;
   5903 }
   5904 
   5905 static void
   5906 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5907     struct wm_txqueue *txq)
   5908 {
   5909 
   5910 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5911 		device_xname(sc->sc_dev), __func__));
   5912 	KASSERT(mutex_owned(txq->txq_lock));
   5913 
   5914 	if (sc->sc_type < WM_T_82543) {
   5915 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5916 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5917 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5918 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5919 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5920 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5921 	} else {
   5922 		int qid = wmq->wmq_id;
   5923 
   5924 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5925 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5926 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5927 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5928 
   5929 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5930 			/*
   5931 			 * Don't write TDT before TCTL.EN is set.
   5932 			 * See the document.
   5933 			 */
   5934 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5935 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5936 			    | TXDCTL_WTHRESH(0));
   5937 		else {
   5938 			/* ITR / 4 */
   5939 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5940 			if (sc->sc_type >= WM_T_82540) {
   5941 				/* should be same */
   5942 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5943 			}
   5944 
   5945 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5946 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5947 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5948 		}
   5949 	}
   5950 }
   5951 
   5952 static void
   5953 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5954 {
   5955 	int i;
   5956 
   5957 	KASSERT(mutex_owned(txq->txq_lock));
   5958 
   5959 	/* Initialize the transmit job descriptors. */
   5960 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5961 		txq->txq_soft[i].txs_mbuf = NULL;
   5962 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5963 	txq->txq_snext = 0;
   5964 	txq->txq_sdirty = 0;
   5965 }
   5966 
   5967 static void
   5968 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5969     struct wm_txqueue *txq)
   5970 {
   5971 
   5972 	KASSERT(mutex_owned(txq->txq_lock));
   5973 
   5974 	/*
   5975 	 * Set up some register offsets that are different between
   5976 	 * the i82542 and the i82543 and later chips.
   5977 	 */
   5978 	if (sc->sc_type < WM_T_82543)
   5979 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5980 	else
   5981 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   5982 
   5983 	wm_init_tx_descs(sc, txq);
   5984 	wm_init_tx_regs(sc, wmq, txq);
   5985 	wm_init_tx_buffer(sc, txq);
   5986 }
   5987 
   5988 static void
   5989 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5990     struct wm_rxqueue *rxq)
   5991 {
   5992 
   5993 	KASSERT(mutex_owned(rxq->rxq_lock));
   5994 
   5995 	/*
   5996 	 * Initialize the receive descriptor and receive job
   5997 	 * descriptor rings.
   5998 	 */
   5999 	if (sc->sc_type < WM_T_82543) {
   6000 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6001 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6002 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6003 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   6004 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6005 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6006 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6007 
   6008 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6009 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6010 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6011 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6012 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6013 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6014 	} else {
   6015 		int qid = wmq->wmq_id;
   6016 
   6017 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6018 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6019 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   6020 
   6021 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6022 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6023 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   6024 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   6025 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6026 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6027 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6028 			    | RXDCTL_WTHRESH(1));
   6029 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6030 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6031 		} else {
   6032 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6033 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6034 			/* ITR / 4 */
   6035 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   6036 			/* MUST be same */
   6037 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   6038 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6039 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6040 		}
   6041 	}
   6042 }
   6043 
   6044 static int
   6045 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6046 {
   6047 	struct wm_rxsoft *rxs;
   6048 	int error, i;
   6049 
   6050 	KASSERT(mutex_owned(rxq->rxq_lock));
   6051 
   6052 	for (i = 0; i < WM_NRXDESC; i++) {
   6053 		rxs = &rxq->rxq_soft[i];
   6054 		if (rxs->rxs_mbuf == NULL) {
   6055 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6056 				log(LOG_ERR, "%s: unable to allocate or map "
   6057 				    "rx buffer %d, error = %d\n",
   6058 				    device_xname(sc->sc_dev), i, error);
   6059 				/*
   6060 				 * XXX Should attempt to run with fewer receive
   6061 				 * XXX buffers instead of just failing.
   6062 				 */
   6063 				wm_rxdrain(rxq);
   6064 				return ENOMEM;
   6065 			}
   6066 		} else {
   6067 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6068 				wm_init_rxdesc(rxq, i);
   6069 			/*
   6070 			 * For 82575 and newer device, the RX descriptors
   6071 			 * must be initialized after the setting of RCTL.EN in
   6072 			 * wm_set_filter()
   6073 			 */
   6074 		}
   6075 	}
   6076 	rxq->rxq_ptr = 0;
   6077 	rxq->rxq_discard = 0;
   6078 	WM_RXCHAIN_RESET(rxq);
   6079 
   6080 	return 0;
   6081 }
   6082 
   6083 static int
   6084 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6085     struct wm_rxqueue *rxq)
   6086 {
   6087 
   6088 	KASSERT(mutex_owned(rxq->rxq_lock));
   6089 
   6090 	/*
   6091 	 * Set up some register offsets that are different between
   6092 	 * the i82542 and the i82543 and later chips.
   6093 	 */
   6094 	if (sc->sc_type < WM_T_82543)
   6095 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6096 	else
   6097 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6098 
   6099 	wm_init_rx_regs(sc, wmq, rxq);
   6100 	return wm_init_rx_buffer(sc, rxq);
   6101 }
   6102 
   6103 /*
   6104  * wm_init_quques:
   6105  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6106  */
   6107 static int
   6108 wm_init_txrx_queues(struct wm_softc *sc)
   6109 {
   6110 	int i, error = 0;
   6111 
   6112 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6113 		device_xname(sc->sc_dev), __func__));
   6114 
   6115 	for (i = 0; i < sc->sc_nqueues; i++) {
   6116 		struct wm_queue *wmq = &sc->sc_queue[i];
   6117 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6118 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6119 
   6120 		mutex_enter(txq->txq_lock);
   6121 		wm_init_tx_queue(sc, wmq, txq);
   6122 		mutex_exit(txq->txq_lock);
   6123 
   6124 		mutex_enter(rxq->rxq_lock);
   6125 		error = wm_init_rx_queue(sc, wmq, rxq);
   6126 		mutex_exit(rxq->rxq_lock);
   6127 		if (error)
   6128 			break;
   6129 	}
   6130 
   6131 	return error;
   6132 }
   6133 
   6134 /*
   6135  * wm_tx_offload:
   6136  *
   6137  *	Set up TCP/IP checksumming parameters for the
   6138  *	specified packet.
   6139  */
   6140 static int
   6141 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   6142     uint8_t *fieldsp)
   6143 {
   6144 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6145 	struct mbuf *m0 = txs->txs_mbuf;
   6146 	struct livengood_tcpip_ctxdesc *t;
   6147 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6148 	uint32_t ipcse;
   6149 	struct ether_header *eh;
   6150 	int offset, iphl;
   6151 	uint8_t fields;
   6152 
   6153 	/*
   6154 	 * XXX It would be nice if the mbuf pkthdr had offset
   6155 	 * fields for the protocol headers.
   6156 	 */
   6157 
   6158 	eh = mtod(m0, struct ether_header *);
   6159 	switch (htons(eh->ether_type)) {
   6160 	case ETHERTYPE_IP:
   6161 	case ETHERTYPE_IPV6:
   6162 		offset = ETHER_HDR_LEN;
   6163 		break;
   6164 
   6165 	case ETHERTYPE_VLAN:
   6166 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6167 		break;
   6168 
   6169 	default:
   6170 		/*
   6171 		 * Don't support this protocol or encapsulation.
   6172 		 */
   6173 		*fieldsp = 0;
   6174 		*cmdp = 0;
   6175 		return 0;
   6176 	}
   6177 
   6178 	if ((m0->m_pkthdr.csum_flags &
   6179 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   6180 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6181 	} else {
   6182 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6183 	}
   6184 	ipcse = offset + iphl - 1;
   6185 
   6186 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6187 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6188 	seg = 0;
   6189 	fields = 0;
   6190 
   6191 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6192 		int hlen = offset + iphl;
   6193 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6194 
   6195 		if (__predict_false(m0->m_len <
   6196 				    (hlen + sizeof(struct tcphdr)))) {
   6197 			/*
   6198 			 * TCP/IP headers are not in the first mbuf; we need
   6199 			 * to do this the slow and painful way.  Let's just
   6200 			 * hope this doesn't happen very often.
   6201 			 */
   6202 			struct tcphdr th;
   6203 
   6204 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6205 
   6206 			m_copydata(m0, hlen, sizeof(th), &th);
   6207 			if (v4) {
   6208 				struct ip ip;
   6209 
   6210 				m_copydata(m0, offset, sizeof(ip), &ip);
   6211 				ip.ip_len = 0;
   6212 				m_copyback(m0,
   6213 				    offset + offsetof(struct ip, ip_len),
   6214 				    sizeof(ip.ip_len), &ip.ip_len);
   6215 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6216 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6217 			} else {
   6218 				struct ip6_hdr ip6;
   6219 
   6220 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6221 				ip6.ip6_plen = 0;
   6222 				m_copyback(m0,
   6223 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6224 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6225 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6226 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6227 			}
   6228 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6229 			    sizeof(th.th_sum), &th.th_sum);
   6230 
   6231 			hlen += th.th_off << 2;
   6232 		} else {
   6233 			/*
   6234 			 * TCP/IP headers are in the first mbuf; we can do
   6235 			 * this the easy way.
   6236 			 */
   6237 			struct tcphdr *th;
   6238 
   6239 			if (v4) {
   6240 				struct ip *ip =
   6241 				    (void *)(mtod(m0, char *) + offset);
   6242 				th = (void *)(mtod(m0, char *) + hlen);
   6243 
   6244 				ip->ip_len = 0;
   6245 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6246 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6247 			} else {
   6248 				struct ip6_hdr *ip6 =
   6249 				    (void *)(mtod(m0, char *) + offset);
   6250 				th = (void *)(mtod(m0, char *) + hlen);
   6251 
   6252 				ip6->ip6_plen = 0;
   6253 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6254 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6255 			}
   6256 			hlen += th->th_off << 2;
   6257 		}
   6258 
   6259 		if (v4) {
   6260 			WM_Q_EVCNT_INCR(txq, txtso);
   6261 			cmdlen |= WTX_TCPIP_CMD_IP;
   6262 		} else {
   6263 			WM_Q_EVCNT_INCR(txq, txtso6);
   6264 			ipcse = 0;
   6265 		}
   6266 		cmd |= WTX_TCPIP_CMD_TSE;
   6267 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6268 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6269 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6270 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6271 	}
   6272 
   6273 	/*
   6274 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6275 	 * offload feature, if we load the context descriptor, we
   6276 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6277 	 */
   6278 
   6279 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6280 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6281 	    WTX_TCPIP_IPCSE(ipcse);
   6282 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6283 		WM_Q_EVCNT_INCR(txq, txipsum);
   6284 		fields |= WTX_IXSM;
   6285 	}
   6286 
   6287 	offset += iphl;
   6288 
   6289 	if (m0->m_pkthdr.csum_flags &
   6290 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6291 		WM_Q_EVCNT_INCR(txq, txtusum);
   6292 		fields |= WTX_TXSM;
   6293 		tucs = WTX_TCPIP_TUCSS(offset) |
   6294 		    WTX_TCPIP_TUCSO(offset +
   6295 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6296 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6297 	} else if ((m0->m_pkthdr.csum_flags &
   6298 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6299 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6300 		fields |= WTX_TXSM;
   6301 		tucs = WTX_TCPIP_TUCSS(offset) |
   6302 		    WTX_TCPIP_TUCSO(offset +
   6303 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6304 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6305 	} else {
   6306 		/* Just initialize it to a valid TCP context. */
   6307 		tucs = WTX_TCPIP_TUCSS(offset) |
   6308 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6309 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6310 	}
   6311 
   6312 	/* Fill in the context descriptor. */
   6313 	t = (struct livengood_tcpip_ctxdesc *)
   6314 	    &txq->txq_descs[txq->txq_next];
   6315 	t->tcpip_ipcs = htole32(ipcs);
   6316 	t->tcpip_tucs = htole32(tucs);
   6317 	t->tcpip_cmdlen = htole32(cmdlen);
   6318 	t->tcpip_seg = htole32(seg);
   6319 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6320 
   6321 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6322 	txs->txs_ndesc++;
   6323 
   6324 	*cmdp = cmd;
   6325 	*fieldsp = fields;
   6326 
   6327 	return 0;
   6328 }
   6329 
   6330 static inline int
   6331 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6332 {
   6333 	struct wm_softc *sc = ifp->if_softc;
   6334 	u_int cpuid = cpu_index(curcpu());
   6335 
   6336 	/*
   6337 	 * Currently, simple distribute strategy.
   6338 	 * TODO:
   6339 	 * distribute by flowid(RSS has value).
   6340 	 */
   6341 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
   6342 }
   6343 
   6344 /*
   6345  * wm_start:		[ifnet interface function]
   6346  *
   6347  *	Start packet transmission on the interface.
   6348  */
   6349 static void
   6350 wm_start(struct ifnet *ifp)
   6351 {
   6352 	struct wm_softc *sc = ifp->if_softc;
   6353 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6354 
   6355 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6356 
   6357 	/*
   6358 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6359 	 */
   6360 
   6361 	mutex_enter(txq->txq_lock);
   6362 	if (!txq->txq_stopping)
   6363 		wm_start_locked(ifp);
   6364 	mutex_exit(txq->txq_lock);
   6365 }
   6366 
   6367 static void
   6368 wm_start_locked(struct ifnet *ifp)
   6369 {
   6370 	struct wm_softc *sc = ifp->if_softc;
   6371 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6372 
   6373 	wm_send_common_locked(ifp, txq, false);
   6374 }
   6375 
   6376 static int
   6377 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6378 {
   6379 	int qid;
   6380 	struct wm_softc *sc = ifp->if_softc;
   6381 	struct wm_txqueue *txq;
   6382 
   6383 	qid = wm_select_txqueue(ifp, m);
   6384 	txq = &sc->sc_queue[qid].wmq_txq;
   6385 
   6386 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6387 		m_freem(m);
   6388 		WM_Q_EVCNT_INCR(txq, txdrop);
   6389 		return ENOBUFS;
   6390 	}
   6391 
   6392 	/*
   6393 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6394 	 */
   6395 	ifp->if_obytes += m->m_pkthdr.len;
   6396 	if (m->m_flags & M_MCAST)
   6397 		ifp->if_omcasts++;
   6398 
   6399 	if (mutex_tryenter(txq->txq_lock)) {
   6400 		if (!txq->txq_stopping)
   6401 			wm_transmit_locked(ifp, txq);
   6402 		mutex_exit(txq->txq_lock);
   6403 	}
   6404 
   6405 	return 0;
   6406 }
   6407 
   6408 static void
   6409 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6410 {
   6411 
   6412 	wm_send_common_locked(ifp, txq, true);
   6413 }
   6414 
   6415 static void
   6416 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6417     bool is_transmit)
   6418 {
   6419 	struct wm_softc *sc = ifp->if_softc;
   6420 	struct mbuf *m0;
   6421 	struct m_tag *mtag;
   6422 	struct wm_txsoft *txs;
   6423 	bus_dmamap_t dmamap;
   6424 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6425 	bus_addr_t curaddr;
   6426 	bus_size_t seglen, curlen;
   6427 	uint32_t cksumcmd;
   6428 	uint8_t cksumfields;
   6429 
   6430 	KASSERT(mutex_owned(txq->txq_lock));
   6431 
   6432 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6433 		return;
   6434 
   6435 	/* Remember the previous number of free descriptors. */
   6436 	ofree = txq->txq_free;
   6437 
   6438 	/*
   6439 	 * Loop through the send queue, setting up transmit descriptors
   6440 	 * until we drain the queue, or use up all available transmit
   6441 	 * descriptors.
   6442 	 */
   6443 	for (;;) {
   6444 		m0 = NULL;
   6445 
   6446 		/* Get a work queue entry. */
   6447 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6448 			wm_txeof(sc, txq);
   6449 			if (txq->txq_sfree == 0) {
   6450 				DPRINTF(WM_DEBUG_TX,
   6451 				    ("%s: TX: no free job descriptors\n",
   6452 					device_xname(sc->sc_dev)));
   6453 				WM_Q_EVCNT_INCR(txq, txsstall);
   6454 				break;
   6455 			}
   6456 		}
   6457 
   6458 		/* Grab a packet off the queue. */
   6459 		if (is_transmit)
   6460 			m0 = pcq_get(txq->txq_interq);
   6461 		else
   6462 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6463 		if (m0 == NULL)
   6464 			break;
   6465 
   6466 		DPRINTF(WM_DEBUG_TX,
   6467 		    ("%s: TX: have packet to transmit: %p\n",
   6468 		    device_xname(sc->sc_dev), m0));
   6469 
   6470 		txs = &txq->txq_soft[txq->txq_snext];
   6471 		dmamap = txs->txs_dmamap;
   6472 
   6473 		use_tso = (m0->m_pkthdr.csum_flags &
   6474 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6475 
   6476 		/*
   6477 		 * So says the Linux driver:
   6478 		 * The controller does a simple calculation to make sure
   6479 		 * there is enough room in the FIFO before initiating the
   6480 		 * DMA for each buffer.  The calc is:
   6481 		 *	4 = ceil(buffer len / MSS)
   6482 		 * To make sure we don't overrun the FIFO, adjust the max
   6483 		 * buffer len if the MSS drops.
   6484 		 */
   6485 		dmamap->dm_maxsegsz =
   6486 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6487 		    ? m0->m_pkthdr.segsz << 2
   6488 		    : WTX_MAX_LEN;
   6489 
   6490 		/*
   6491 		 * Load the DMA map.  If this fails, the packet either
   6492 		 * didn't fit in the allotted number of segments, or we
   6493 		 * were short on resources.  For the too-many-segments
   6494 		 * case, we simply report an error and drop the packet,
   6495 		 * since we can't sanely copy a jumbo packet to a single
   6496 		 * buffer.
   6497 		 */
   6498 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6499 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6500 		if (error) {
   6501 			if (error == EFBIG) {
   6502 				WM_Q_EVCNT_INCR(txq, txdrop);
   6503 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6504 				    "DMA segments, dropping...\n",
   6505 				    device_xname(sc->sc_dev));
   6506 				wm_dump_mbuf_chain(sc, m0);
   6507 				m_freem(m0);
   6508 				continue;
   6509 			}
   6510 			/*  Short on resources, just stop for now. */
   6511 			DPRINTF(WM_DEBUG_TX,
   6512 			    ("%s: TX: dmamap load failed: %d\n",
   6513 			    device_xname(sc->sc_dev), error));
   6514 			break;
   6515 		}
   6516 
   6517 		segs_needed = dmamap->dm_nsegs;
   6518 		if (use_tso) {
   6519 			/* For sentinel descriptor; see below. */
   6520 			segs_needed++;
   6521 		}
   6522 
   6523 		/*
   6524 		 * Ensure we have enough descriptors free to describe
   6525 		 * the packet.  Note, we always reserve one descriptor
   6526 		 * at the end of the ring due to the semantics of the
   6527 		 * TDT register, plus one more in the event we need
   6528 		 * to load offload context.
   6529 		 */
   6530 		if (segs_needed > txq->txq_free - 2) {
   6531 			/*
   6532 			 * Not enough free descriptors to transmit this
   6533 			 * packet.  We haven't committed anything yet,
   6534 			 * so just unload the DMA map, put the packet
   6535 			 * pack on the queue, and punt.  Notify the upper
   6536 			 * layer that there are no more slots left.
   6537 			 */
   6538 			DPRINTF(WM_DEBUG_TX,
   6539 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6540 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6541 			    segs_needed, txq->txq_free - 1));
   6542 			ifp->if_flags |= IFF_OACTIVE;
   6543 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6544 			WM_Q_EVCNT_INCR(txq, txdstall);
   6545 			break;
   6546 		}
   6547 
   6548 		/*
   6549 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6550 		 * once we know we can transmit the packet, since we
   6551 		 * do some internal FIFO space accounting here.
   6552 		 */
   6553 		if (sc->sc_type == WM_T_82547 &&
   6554 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6555 			DPRINTF(WM_DEBUG_TX,
   6556 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6557 			    device_xname(sc->sc_dev)));
   6558 			ifp->if_flags |= IFF_OACTIVE;
   6559 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6560 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6561 			break;
   6562 		}
   6563 
   6564 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6565 
   6566 		DPRINTF(WM_DEBUG_TX,
   6567 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6568 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6569 
   6570 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6571 
   6572 		/*
   6573 		 * Store a pointer to the packet so that we can free it
   6574 		 * later.
   6575 		 *
   6576 		 * Initially, we consider the number of descriptors the
   6577 		 * packet uses the number of DMA segments.  This may be
   6578 		 * incremented by 1 if we do checksum offload (a descriptor
   6579 		 * is used to set the checksum context).
   6580 		 */
   6581 		txs->txs_mbuf = m0;
   6582 		txs->txs_firstdesc = txq->txq_next;
   6583 		txs->txs_ndesc = segs_needed;
   6584 
   6585 		/* Set up offload parameters for this packet. */
   6586 		if (m0->m_pkthdr.csum_flags &
   6587 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6588 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6589 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6590 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6591 					  &cksumfields) != 0) {
   6592 				/* Error message already displayed. */
   6593 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6594 				continue;
   6595 			}
   6596 		} else {
   6597 			cksumcmd = 0;
   6598 			cksumfields = 0;
   6599 		}
   6600 
   6601 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6602 
   6603 		/* Sync the DMA map. */
   6604 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6605 		    BUS_DMASYNC_PREWRITE);
   6606 
   6607 		/* Initialize the transmit descriptor. */
   6608 		for (nexttx = txq->txq_next, seg = 0;
   6609 		     seg < dmamap->dm_nsegs; seg++) {
   6610 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6611 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6612 			     seglen != 0;
   6613 			     curaddr += curlen, seglen -= curlen,
   6614 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6615 				curlen = seglen;
   6616 
   6617 				/*
   6618 				 * So says the Linux driver:
   6619 				 * Work around for premature descriptor
   6620 				 * write-backs in TSO mode.  Append a
   6621 				 * 4-byte sentinel descriptor.
   6622 				 */
   6623 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6624 				    curlen > 8)
   6625 					curlen -= 4;
   6626 
   6627 				wm_set_dma_addr(
   6628 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6629 				txq->txq_descs[nexttx].wtx_cmdlen
   6630 				    = htole32(cksumcmd | curlen);
   6631 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6632 				    = 0;
   6633 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6634 				    = cksumfields;
   6635 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6636 				lasttx = nexttx;
   6637 
   6638 				DPRINTF(WM_DEBUG_TX,
   6639 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6640 				     "len %#04zx\n",
   6641 				    device_xname(sc->sc_dev), nexttx,
   6642 				    (uint64_t)curaddr, curlen));
   6643 			}
   6644 		}
   6645 
   6646 		KASSERT(lasttx != -1);
   6647 
   6648 		/*
   6649 		 * Set up the command byte on the last descriptor of
   6650 		 * the packet.  If we're in the interrupt delay window,
   6651 		 * delay the interrupt.
   6652 		 */
   6653 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6654 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6655 
   6656 		/*
   6657 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6658 		 * up the descriptor to encapsulate the packet for us.
   6659 		 *
   6660 		 * This is only valid on the last descriptor of the packet.
   6661 		 */
   6662 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6663 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6664 			    htole32(WTX_CMD_VLE);
   6665 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6666 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6667 		}
   6668 
   6669 		txs->txs_lastdesc = lasttx;
   6670 
   6671 		DPRINTF(WM_DEBUG_TX,
   6672 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6673 		    device_xname(sc->sc_dev),
   6674 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6675 
   6676 		/* Sync the descriptors we're using. */
   6677 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6678 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6679 
   6680 		/* Give the packet to the chip. */
   6681 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6682 
   6683 		DPRINTF(WM_DEBUG_TX,
   6684 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6685 
   6686 		DPRINTF(WM_DEBUG_TX,
   6687 		    ("%s: TX: finished transmitting packet, job %d\n",
   6688 		    device_xname(sc->sc_dev), txq->txq_snext));
   6689 
   6690 		/* Advance the tx pointer. */
   6691 		txq->txq_free -= txs->txs_ndesc;
   6692 		txq->txq_next = nexttx;
   6693 
   6694 		txq->txq_sfree--;
   6695 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6696 
   6697 		/* Pass the packet to any BPF listeners. */
   6698 		bpf_mtap(ifp, m0);
   6699 	}
   6700 
   6701 	if (m0 != NULL) {
   6702 		ifp->if_flags |= IFF_OACTIVE;
   6703 		WM_Q_EVCNT_INCR(txq, txdrop);
   6704 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6705 			__func__));
   6706 		m_freem(m0);
   6707 	}
   6708 
   6709 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6710 		/* No more slots; notify upper layer. */
   6711 		ifp->if_flags |= IFF_OACTIVE;
   6712 	}
   6713 
   6714 	if (txq->txq_free != ofree) {
   6715 		/* Set a watchdog timer in case the chip flakes out. */
   6716 		ifp->if_timer = 5;
   6717 	}
   6718 }
   6719 
   6720 /*
   6721  * wm_nq_tx_offload:
   6722  *
   6723  *	Set up TCP/IP checksumming parameters for the
   6724  *	specified packet, for NEWQUEUE devices
   6725  */
   6726 static int
   6727 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6728     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6729 {
   6730 	struct mbuf *m0 = txs->txs_mbuf;
   6731 	struct m_tag *mtag;
   6732 	uint32_t vl_len, mssidx, cmdc;
   6733 	struct ether_header *eh;
   6734 	int offset, iphl;
   6735 
   6736 	/*
   6737 	 * XXX It would be nice if the mbuf pkthdr had offset
   6738 	 * fields for the protocol headers.
   6739 	 */
   6740 	*cmdlenp = 0;
   6741 	*fieldsp = 0;
   6742 
   6743 	eh = mtod(m0, struct ether_header *);
   6744 	switch (htons(eh->ether_type)) {
   6745 	case ETHERTYPE_IP:
   6746 	case ETHERTYPE_IPV6:
   6747 		offset = ETHER_HDR_LEN;
   6748 		break;
   6749 
   6750 	case ETHERTYPE_VLAN:
   6751 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6752 		break;
   6753 
   6754 	default:
   6755 		/* Don't support this protocol or encapsulation. */
   6756 		*do_csum = false;
   6757 		return 0;
   6758 	}
   6759 	*do_csum = true;
   6760 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6761 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6762 
   6763 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6764 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6765 
   6766 	if ((m0->m_pkthdr.csum_flags &
   6767 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6768 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6769 	} else {
   6770 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6771 	}
   6772 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6773 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6774 
   6775 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6776 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6777 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6778 		*cmdlenp |= NQTX_CMD_VLE;
   6779 	}
   6780 
   6781 	mssidx = 0;
   6782 
   6783 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6784 		int hlen = offset + iphl;
   6785 		int tcp_hlen;
   6786 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6787 
   6788 		if (__predict_false(m0->m_len <
   6789 				    (hlen + sizeof(struct tcphdr)))) {
   6790 			/*
   6791 			 * TCP/IP headers are not in the first mbuf; we need
   6792 			 * to do this the slow and painful way.  Let's just
   6793 			 * hope this doesn't happen very often.
   6794 			 */
   6795 			struct tcphdr th;
   6796 
   6797 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6798 
   6799 			m_copydata(m0, hlen, sizeof(th), &th);
   6800 			if (v4) {
   6801 				struct ip ip;
   6802 
   6803 				m_copydata(m0, offset, sizeof(ip), &ip);
   6804 				ip.ip_len = 0;
   6805 				m_copyback(m0,
   6806 				    offset + offsetof(struct ip, ip_len),
   6807 				    sizeof(ip.ip_len), &ip.ip_len);
   6808 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6809 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6810 			} else {
   6811 				struct ip6_hdr ip6;
   6812 
   6813 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6814 				ip6.ip6_plen = 0;
   6815 				m_copyback(m0,
   6816 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6817 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6818 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6819 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6820 			}
   6821 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6822 			    sizeof(th.th_sum), &th.th_sum);
   6823 
   6824 			tcp_hlen = th.th_off << 2;
   6825 		} else {
   6826 			/*
   6827 			 * TCP/IP headers are in the first mbuf; we can do
   6828 			 * this the easy way.
   6829 			 */
   6830 			struct tcphdr *th;
   6831 
   6832 			if (v4) {
   6833 				struct ip *ip =
   6834 				    (void *)(mtod(m0, char *) + offset);
   6835 				th = (void *)(mtod(m0, char *) + hlen);
   6836 
   6837 				ip->ip_len = 0;
   6838 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6839 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6840 			} else {
   6841 				struct ip6_hdr *ip6 =
   6842 				    (void *)(mtod(m0, char *) + offset);
   6843 				th = (void *)(mtod(m0, char *) + hlen);
   6844 
   6845 				ip6->ip6_plen = 0;
   6846 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6847 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6848 			}
   6849 			tcp_hlen = th->th_off << 2;
   6850 		}
   6851 		hlen += tcp_hlen;
   6852 		*cmdlenp |= NQTX_CMD_TSE;
   6853 
   6854 		if (v4) {
   6855 			WM_Q_EVCNT_INCR(txq, txtso);
   6856 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6857 		} else {
   6858 			WM_Q_EVCNT_INCR(txq, txtso6);
   6859 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6860 		}
   6861 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6862 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6863 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6864 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6865 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6866 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6867 	} else {
   6868 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6869 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6870 	}
   6871 
   6872 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6873 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6874 		cmdc |= NQTXC_CMD_IP4;
   6875 	}
   6876 
   6877 	if (m0->m_pkthdr.csum_flags &
   6878 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6879 		WM_Q_EVCNT_INCR(txq, txtusum);
   6880 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6881 			cmdc |= NQTXC_CMD_TCP;
   6882 		} else {
   6883 			cmdc |= NQTXC_CMD_UDP;
   6884 		}
   6885 		cmdc |= NQTXC_CMD_IP4;
   6886 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6887 	}
   6888 	if (m0->m_pkthdr.csum_flags &
   6889 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6890 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6891 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6892 			cmdc |= NQTXC_CMD_TCP;
   6893 		} else {
   6894 			cmdc |= NQTXC_CMD_UDP;
   6895 		}
   6896 		cmdc |= NQTXC_CMD_IP6;
   6897 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6898 	}
   6899 
   6900 	/* Fill in the context descriptor. */
   6901 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6902 	    htole32(vl_len);
   6903 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6904 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6905 	    htole32(cmdc);
   6906 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6907 	    htole32(mssidx);
   6908 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6909 	DPRINTF(WM_DEBUG_TX,
   6910 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6911 	    txq->txq_next, 0, vl_len));
   6912 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6913 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6914 	txs->txs_ndesc++;
   6915 	return 0;
   6916 }
   6917 
   6918 /*
   6919  * wm_nq_start:		[ifnet interface function]
   6920  *
   6921  *	Start packet transmission on the interface for NEWQUEUE devices
   6922  */
   6923 static void
   6924 wm_nq_start(struct ifnet *ifp)
   6925 {
   6926 	struct wm_softc *sc = ifp->if_softc;
   6927 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6928 
   6929 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6930 
   6931 	/*
   6932 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6933 	 */
   6934 
   6935 	mutex_enter(txq->txq_lock);
   6936 	if (!txq->txq_stopping)
   6937 		wm_nq_start_locked(ifp);
   6938 	mutex_exit(txq->txq_lock);
   6939 }
   6940 
   6941 static void
   6942 wm_nq_start_locked(struct ifnet *ifp)
   6943 {
   6944 	struct wm_softc *sc = ifp->if_softc;
   6945 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6946 
   6947 	wm_nq_send_common_locked(ifp, txq, false);
   6948 }
   6949 
   6950 static int
   6951 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   6952 {
   6953 	int qid;
   6954 	struct wm_softc *sc = ifp->if_softc;
   6955 	struct wm_txqueue *txq;
   6956 
   6957 	qid = wm_select_txqueue(ifp, m);
   6958 	txq = &sc->sc_queue[qid].wmq_txq;
   6959 
   6960 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6961 		m_freem(m);
   6962 		WM_Q_EVCNT_INCR(txq, txdrop);
   6963 		return ENOBUFS;
   6964 	}
   6965 
   6966 	/*
   6967 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6968 	 */
   6969 	ifp->if_obytes += m->m_pkthdr.len;
   6970 	if (m->m_flags & M_MCAST)
   6971 		ifp->if_omcasts++;
   6972 
   6973 	if (mutex_tryenter(txq->txq_lock)) {
   6974 		if (!txq->txq_stopping)
   6975 			wm_nq_transmit_locked(ifp, txq);
   6976 		mutex_exit(txq->txq_lock);
   6977 	}
   6978 
   6979 	return 0;
   6980 }
   6981 
   6982 static void
   6983 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6984 {
   6985 
   6986 	wm_nq_send_common_locked(ifp, txq, true);
   6987 }
   6988 
   6989 static void
   6990 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6991     bool is_transmit)
   6992 {
   6993 	struct wm_softc *sc = ifp->if_softc;
   6994 	struct mbuf *m0;
   6995 	struct m_tag *mtag;
   6996 	struct wm_txsoft *txs;
   6997 	bus_dmamap_t dmamap;
   6998 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6999 	bool do_csum, sent;
   7000 
   7001 	KASSERT(mutex_owned(txq->txq_lock));
   7002 
   7003 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   7004 		return;
   7005 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7006 		return;
   7007 
   7008 	sent = false;
   7009 
   7010 	/*
   7011 	 * Loop through the send queue, setting up transmit descriptors
   7012 	 * until we drain the queue, or use up all available transmit
   7013 	 * descriptors.
   7014 	 */
   7015 	for (;;) {
   7016 		m0 = NULL;
   7017 
   7018 		/* Get a work queue entry. */
   7019 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7020 			wm_txeof(sc, txq);
   7021 			if (txq->txq_sfree == 0) {
   7022 				DPRINTF(WM_DEBUG_TX,
   7023 				    ("%s: TX: no free job descriptors\n",
   7024 					device_xname(sc->sc_dev)));
   7025 				WM_Q_EVCNT_INCR(txq, txsstall);
   7026 				break;
   7027 			}
   7028 		}
   7029 
   7030 		/* Grab a packet off the queue. */
   7031 		if (is_transmit)
   7032 			m0 = pcq_get(txq->txq_interq);
   7033 		else
   7034 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7035 		if (m0 == NULL)
   7036 			break;
   7037 
   7038 		DPRINTF(WM_DEBUG_TX,
   7039 		    ("%s: TX: have packet to transmit: %p\n",
   7040 		    device_xname(sc->sc_dev), m0));
   7041 
   7042 		txs = &txq->txq_soft[txq->txq_snext];
   7043 		dmamap = txs->txs_dmamap;
   7044 
   7045 		/*
   7046 		 * Load the DMA map.  If this fails, the packet either
   7047 		 * didn't fit in the allotted number of segments, or we
   7048 		 * were short on resources.  For the too-many-segments
   7049 		 * case, we simply report an error and drop the packet,
   7050 		 * since we can't sanely copy a jumbo packet to a single
   7051 		 * buffer.
   7052 		 */
   7053 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7054 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7055 		if (error) {
   7056 			if (error == EFBIG) {
   7057 				WM_Q_EVCNT_INCR(txq, txdrop);
   7058 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7059 				    "DMA segments, dropping...\n",
   7060 				    device_xname(sc->sc_dev));
   7061 				wm_dump_mbuf_chain(sc, m0);
   7062 				m_freem(m0);
   7063 				continue;
   7064 			}
   7065 			/* Short on resources, just stop for now. */
   7066 			DPRINTF(WM_DEBUG_TX,
   7067 			    ("%s: TX: dmamap load failed: %d\n",
   7068 			    device_xname(sc->sc_dev), error));
   7069 			break;
   7070 		}
   7071 
   7072 		segs_needed = dmamap->dm_nsegs;
   7073 
   7074 		/*
   7075 		 * Ensure we have enough descriptors free to describe
   7076 		 * the packet.  Note, we always reserve one descriptor
   7077 		 * at the end of the ring due to the semantics of the
   7078 		 * TDT register, plus one more in the event we need
   7079 		 * to load offload context.
   7080 		 */
   7081 		if (segs_needed > txq->txq_free - 2) {
   7082 			/*
   7083 			 * Not enough free descriptors to transmit this
   7084 			 * packet.  We haven't committed anything yet,
   7085 			 * so just unload the DMA map, put the packet
   7086 			 * pack on the queue, and punt.  Notify the upper
   7087 			 * layer that there are no more slots left.
   7088 			 */
   7089 			DPRINTF(WM_DEBUG_TX,
   7090 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7091 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7092 			    segs_needed, txq->txq_free - 1));
   7093 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7094 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7095 			WM_Q_EVCNT_INCR(txq, txdstall);
   7096 			break;
   7097 		}
   7098 
   7099 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7100 
   7101 		DPRINTF(WM_DEBUG_TX,
   7102 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7103 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7104 
   7105 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7106 
   7107 		/*
   7108 		 * Store a pointer to the packet so that we can free it
   7109 		 * later.
   7110 		 *
   7111 		 * Initially, we consider the number of descriptors the
   7112 		 * packet uses the number of DMA segments.  This may be
   7113 		 * incremented by 1 if we do checksum offload (a descriptor
   7114 		 * is used to set the checksum context).
   7115 		 */
   7116 		txs->txs_mbuf = m0;
   7117 		txs->txs_firstdesc = txq->txq_next;
   7118 		txs->txs_ndesc = segs_needed;
   7119 
   7120 		/* Set up offload parameters for this packet. */
   7121 		uint32_t cmdlen, fields, dcmdlen;
   7122 		if (m0->m_pkthdr.csum_flags &
   7123 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7124 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7125 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7126 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7127 			    &do_csum) != 0) {
   7128 				/* Error message already displayed. */
   7129 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7130 				continue;
   7131 			}
   7132 		} else {
   7133 			do_csum = false;
   7134 			cmdlen = 0;
   7135 			fields = 0;
   7136 		}
   7137 
   7138 		/* Sync the DMA map. */
   7139 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7140 		    BUS_DMASYNC_PREWRITE);
   7141 
   7142 		/* Initialize the first transmit descriptor. */
   7143 		nexttx = txq->txq_next;
   7144 		if (!do_csum) {
   7145 			/* setup a legacy descriptor */
   7146 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7147 			    dmamap->dm_segs[0].ds_addr);
   7148 			txq->txq_descs[nexttx].wtx_cmdlen =
   7149 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7150 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7151 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7152 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7153 			    NULL) {
   7154 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7155 				    htole32(WTX_CMD_VLE);
   7156 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7157 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7158 			} else {
   7159 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7160 			}
   7161 			dcmdlen = 0;
   7162 		} else {
   7163 			/* setup an advanced data descriptor */
   7164 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7165 			    htole64(dmamap->dm_segs[0].ds_addr);
   7166 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7167 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7168 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7169 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7170 			    htole32(fields);
   7171 			DPRINTF(WM_DEBUG_TX,
   7172 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7173 			    device_xname(sc->sc_dev), nexttx,
   7174 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7175 			DPRINTF(WM_DEBUG_TX,
   7176 			    ("\t 0x%08x%08x\n", fields,
   7177 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7178 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7179 		}
   7180 
   7181 		lasttx = nexttx;
   7182 		nexttx = WM_NEXTTX(txq, nexttx);
   7183 		/*
   7184 		 * fill in the next descriptors. legacy or adcanced format
   7185 		 * is the same here
   7186 		 */
   7187 		for (seg = 1; seg < dmamap->dm_nsegs;
   7188 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7189 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7190 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7191 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7192 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7193 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7194 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7195 			lasttx = nexttx;
   7196 
   7197 			DPRINTF(WM_DEBUG_TX,
   7198 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7199 			     "len %#04zx\n",
   7200 			    device_xname(sc->sc_dev), nexttx,
   7201 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7202 			    dmamap->dm_segs[seg].ds_len));
   7203 		}
   7204 
   7205 		KASSERT(lasttx != -1);
   7206 
   7207 		/*
   7208 		 * Set up the command byte on the last descriptor of
   7209 		 * the packet.  If we're in the interrupt delay window,
   7210 		 * delay the interrupt.
   7211 		 */
   7212 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7213 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7214 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7215 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7216 
   7217 		txs->txs_lastdesc = lasttx;
   7218 
   7219 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7220 		    device_xname(sc->sc_dev),
   7221 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7222 
   7223 		/* Sync the descriptors we're using. */
   7224 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7225 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7226 
   7227 		/* Give the packet to the chip. */
   7228 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7229 		sent = true;
   7230 
   7231 		DPRINTF(WM_DEBUG_TX,
   7232 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7233 
   7234 		DPRINTF(WM_DEBUG_TX,
   7235 		    ("%s: TX: finished transmitting packet, job %d\n",
   7236 		    device_xname(sc->sc_dev), txq->txq_snext));
   7237 
   7238 		/* Advance the tx pointer. */
   7239 		txq->txq_free -= txs->txs_ndesc;
   7240 		txq->txq_next = nexttx;
   7241 
   7242 		txq->txq_sfree--;
   7243 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7244 
   7245 		/* Pass the packet to any BPF listeners. */
   7246 		bpf_mtap(ifp, m0);
   7247 	}
   7248 
   7249 	if (m0 != NULL) {
   7250 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7251 		WM_Q_EVCNT_INCR(txq, txdrop);
   7252 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7253 			__func__));
   7254 		m_freem(m0);
   7255 	}
   7256 
   7257 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7258 		/* No more slots; notify upper layer. */
   7259 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7260 	}
   7261 
   7262 	if (sent) {
   7263 		/* Set a watchdog timer in case the chip flakes out. */
   7264 		ifp->if_timer = 5;
   7265 	}
   7266 }
   7267 
   7268 static void
   7269 wm_deferred_start(struct ifnet *ifp)
   7270 {
   7271 	struct wm_softc *sc = ifp->if_softc;
   7272 	int qid = 0;
   7273 
   7274 	/*
   7275 	 * Try to transmit on all Tx queues. Passing a txq somehow and
   7276 	 * transmitting only on the txq may be better.
   7277 	 */
   7278 restart:
   7279 	WM_CORE_LOCK(sc);
   7280 	if (sc->sc_core_stopping)
   7281 		goto out;
   7282 
   7283 	for (; qid < sc->sc_nqueues; qid++) {
   7284 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   7285 
   7286 		if (!mutex_tryenter(txq->txq_lock))
   7287 			continue;
   7288 
   7289 		if (txq->txq_stopping) {
   7290 			mutex_exit(txq->txq_lock);
   7291 			continue;
   7292 		}
   7293 		WM_CORE_UNLOCK(sc);
   7294 
   7295 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7296 			/* XXX need for ALTQ */
   7297 			if (qid == 0)
   7298 				wm_nq_start_locked(ifp);
   7299 			wm_nq_transmit_locked(ifp, txq);
   7300 		} else {
   7301 			/* XXX need for ALTQ */
   7302 			if (qid == 0)
   7303 				wm_start_locked(ifp);
   7304 			wm_transmit_locked(ifp, txq);
   7305 		}
   7306 		mutex_exit(txq->txq_lock);
   7307 
   7308 		qid++;
   7309 		goto restart;
   7310 	}
   7311 out:
   7312 	WM_CORE_UNLOCK(sc);
   7313 }
   7314 
   7315 /* Interrupt */
   7316 
   7317 /*
   7318  * wm_txeof:
   7319  *
   7320  *	Helper; handle transmit interrupts.
   7321  */
   7322 static int
   7323 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7324 {
   7325 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7326 	struct wm_txsoft *txs;
   7327 	bool processed = false;
   7328 	int count = 0;
   7329 	int i;
   7330 	uint8_t status;
   7331 
   7332 	KASSERT(mutex_owned(txq->txq_lock));
   7333 
   7334 	if (txq->txq_stopping)
   7335 		return 0;
   7336 
   7337 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7338 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7339 	else
   7340 		ifp->if_flags &= ~IFF_OACTIVE;
   7341 
   7342 	/*
   7343 	 * Go through the Tx list and free mbufs for those
   7344 	 * frames which have been transmitted.
   7345 	 */
   7346 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7347 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7348 		txs = &txq->txq_soft[i];
   7349 
   7350 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7351 			device_xname(sc->sc_dev), i));
   7352 
   7353 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7354 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7355 
   7356 		status =
   7357 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7358 		if ((status & WTX_ST_DD) == 0) {
   7359 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7360 			    BUS_DMASYNC_PREREAD);
   7361 			break;
   7362 		}
   7363 
   7364 		processed = true;
   7365 		count++;
   7366 		DPRINTF(WM_DEBUG_TX,
   7367 		    ("%s: TX: job %d done: descs %d..%d\n",
   7368 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7369 		    txs->txs_lastdesc));
   7370 
   7371 		/*
   7372 		 * XXX We should probably be using the statistics
   7373 		 * XXX registers, but I don't know if they exist
   7374 		 * XXX on chips before the i82544.
   7375 		 */
   7376 
   7377 #ifdef WM_EVENT_COUNTERS
   7378 		if (status & WTX_ST_TU)
   7379 			WM_Q_EVCNT_INCR(txq, tu);
   7380 #endif /* WM_EVENT_COUNTERS */
   7381 
   7382 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7383 			ifp->if_oerrors++;
   7384 			if (status & WTX_ST_LC)
   7385 				log(LOG_WARNING, "%s: late collision\n",
   7386 				    device_xname(sc->sc_dev));
   7387 			else if (status & WTX_ST_EC) {
   7388 				ifp->if_collisions += 16;
   7389 				log(LOG_WARNING, "%s: excessive collisions\n",
   7390 				    device_xname(sc->sc_dev));
   7391 			}
   7392 		} else
   7393 			ifp->if_opackets++;
   7394 
   7395 		txq->txq_free += txs->txs_ndesc;
   7396 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7397 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7398 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7399 		m_freem(txs->txs_mbuf);
   7400 		txs->txs_mbuf = NULL;
   7401 	}
   7402 
   7403 	/* Update the dirty transmit buffer pointer. */
   7404 	txq->txq_sdirty = i;
   7405 	DPRINTF(WM_DEBUG_TX,
   7406 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7407 
   7408 	if (count != 0)
   7409 		rnd_add_uint32(&sc->rnd_source, count);
   7410 
   7411 	/*
   7412 	 * If there are no more pending transmissions, cancel the watchdog
   7413 	 * timer.
   7414 	 */
   7415 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7416 		ifp->if_timer = 0;
   7417 
   7418 	return processed;
   7419 }
   7420 
   7421 /*
   7422  * wm_rxeof:
   7423  *
   7424  *	Helper; handle receive interrupts.
   7425  */
   7426 static void
   7427 wm_rxeof(struct wm_rxqueue *rxq)
   7428 {
   7429 	struct wm_softc *sc = rxq->rxq_sc;
   7430 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7431 	struct wm_rxsoft *rxs;
   7432 	struct mbuf *m;
   7433 	int i, len;
   7434 	int count = 0;
   7435 	uint8_t status, errors;
   7436 	uint16_t vlantag;
   7437 
   7438 	KASSERT(mutex_owned(rxq->rxq_lock));
   7439 
   7440 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7441 		rxs = &rxq->rxq_soft[i];
   7442 
   7443 		DPRINTF(WM_DEBUG_RX,
   7444 		    ("%s: RX: checking descriptor %d\n",
   7445 		    device_xname(sc->sc_dev), i));
   7446 
   7447 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7448 
   7449 		status = rxq->rxq_descs[i].wrx_status;
   7450 		errors = rxq->rxq_descs[i].wrx_errors;
   7451 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   7452 		vlantag = rxq->rxq_descs[i].wrx_special;
   7453 
   7454 		if ((status & WRX_ST_DD) == 0) {
   7455 			/* We have processed all of the receive descriptors. */
   7456 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   7457 			break;
   7458 		}
   7459 
   7460 		count++;
   7461 		if (__predict_false(rxq->rxq_discard)) {
   7462 			DPRINTF(WM_DEBUG_RX,
   7463 			    ("%s: RX: discarding contents of descriptor %d\n",
   7464 			    device_xname(sc->sc_dev), i));
   7465 			wm_init_rxdesc(rxq, i);
   7466 			if (status & WRX_ST_EOP) {
   7467 				/* Reset our state. */
   7468 				DPRINTF(WM_DEBUG_RX,
   7469 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7470 				    device_xname(sc->sc_dev)));
   7471 				rxq->rxq_discard = 0;
   7472 			}
   7473 			continue;
   7474 		}
   7475 
   7476 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7477 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7478 
   7479 		m = rxs->rxs_mbuf;
   7480 
   7481 		/*
   7482 		 * Add a new receive buffer to the ring, unless of
   7483 		 * course the length is zero. Treat the latter as a
   7484 		 * failed mapping.
   7485 		 */
   7486 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7487 			/*
   7488 			 * Failed, throw away what we've done so
   7489 			 * far, and discard the rest of the packet.
   7490 			 */
   7491 			ifp->if_ierrors++;
   7492 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7493 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7494 			wm_init_rxdesc(rxq, i);
   7495 			if ((status & WRX_ST_EOP) == 0)
   7496 				rxq->rxq_discard = 1;
   7497 			if (rxq->rxq_head != NULL)
   7498 				m_freem(rxq->rxq_head);
   7499 			WM_RXCHAIN_RESET(rxq);
   7500 			DPRINTF(WM_DEBUG_RX,
   7501 			    ("%s: RX: Rx buffer allocation failed, "
   7502 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7503 			    rxq->rxq_discard ? " (discard)" : ""));
   7504 			continue;
   7505 		}
   7506 
   7507 		m->m_len = len;
   7508 		rxq->rxq_len += len;
   7509 		DPRINTF(WM_DEBUG_RX,
   7510 		    ("%s: RX: buffer at %p len %d\n",
   7511 		    device_xname(sc->sc_dev), m->m_data, len));
   7512 
   7513 		/* If this is not the end of the packet, keep looking. */
   7514 		if ((status & WRX_ST_EOP) == 0) {
   7515 			WM_RXCHAIN_LINK(rxq, m);
   7516 			DPRINTF(WM_DEBUG_RX,
   7517 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7518 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7519 			continue;
   7520 		}
   7521 
   7522 		/*
   7523 		 * Okay, we have the entire packet now.  The chip is
   7524 		 * configured to include the FCS except I350 and I21[01]
   7525 		 * (not all chips can be configured to strip it),
   7526 		 * so we need to trim it.
   7527 		 * May need to adjust length of previous mbuf in the
   7528 		 * chain if the current mbuf is too short.
   7529 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7530 		 * is always set in I350, so we don't trim it.
   7531 		 */
   7532 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7533 		    && (sc->sc_type != WM_T_I210)
   7534 		    && (sc->sc_type != WM_T_I211)) {
   7535 			if (m->m_len < ETHER_CRC_LEN) {
   7536 				rxq->rxq_tail->m_len
   7537 				    -= (ETHER_CRC_LEN - m->m_len);
   7538 				m->m_len = 0;
   7539 			} else
   7540 				m->m_len -= ETHER_CRC_LEN;
   7541 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7542 		} else
   7543 			len = rxq->rxq_len;
   7544 
   7545 		WM_RXCHAIN_LINK(rxq, m);
   7546 
   7547 		*rxq->rxq_tailp = NULL;
   7548 		m = rxq->rxq_head;
   7549 
   7550 		WM_RXCHAIN_RESET(rxq);
   7551 
   7552 		DPRINTF(WM_DEBUG_RX,
   7553 		    ("%s: RX: have entire packet, len -> %d\n",
   7554 		    device_xname(sc->sc_dev), len));
   7555 
   7556 		/* If an error occurred, update stats and drop the packet. */
   7557 		if (errors &
   7558 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7559 			if (errors & WRX_ER_SE)
   7560 				log(LOG_WARNING, "%s: symbol error\n",
   7561 				    device_xname(sc->sc_dev));
   7562 			else if (errors & WRX_ER_SEQ)
   7563 				log(LOG_WARNING, "%s: receive sequence error\n",
   7564 				    device_xname(sc->sc_dev));
   7565 			else if (errors & WRX_ER_CE)
   7566 				log(LOG_WARNING, "%s: CRC error\n",
   7567 				    device_xname(sc->sc_dev));
   7568 			m_freem(m);
   7569 			continue;
   7570 		}
   7571 
   7572 		/* No errors.  Receive the packet. */
   7573 		m_set_rcvif(m, ifp);
   7574 		m->m_pkthdr.len = len;
   7575 
   7576 		/*
   7577 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7578 		 * for us.  Associate the tag with the packet.
   7579 		 */
   7580 		/* XXXX should check for i350 and i354 */
   7581 		if ((status & WRX_ST_VP) != 0) {
   7582 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
   7583 		}
   7584 
   7585 		/* Set up checksum info for this packet. */
   7586 		if ((status & WRX_ST_IXSM) == 0) {
   7587 			if (status & WRX_ST_IPCS) {
   7588 				WM_Q_EVCNT_INCR(rxq, rxipsum);
   7589 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7590 				if (errors & WRX_ER_IPE)
   7591 					m->m_pkthdr.csum_flags |=
   7592 					    M_CSUM_IPv4_BAD;
   7593 			}
   7594 			if (status & WRX_ST_TCPCS) {
   7595 				/*
   7596 				 * Note: we don't know if this was TCP or UDP,
   7597 				 * so we just set both bits, and expect the
   7598 				 * upper layers to deal.
   7599 				 */
   7600 				WM_Q_EVCNT_INCR(rxq, rxtusum);
   7601 				m->m_pkthdr.csum_flags |=
   7602 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7603 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7604 				if (errors & WRX_ER_TCPE)
   7605 					m->m_pkthdr.csum_flags |=
   7606 					    M_CSUM_TCP_UDP_BAD;
   7607 			}
   7608 		}
   7609 
   7610 		mutex_exit(rxq->rxq_lock);
   7611 
   7612 		/* Pass it on. */
   7613 		if_percpuq_enqueue(sc->sc_ipq, m);
   7614 
   7615 		mutex_enter(rxq->rxq_lock);
   7616 
   7617 		if (rxq->rxq_stopping)
   7618 			break;
   7619 	}
   7620 
   7621 	/* Update the receive pointer. */
   7622 	rxq->rxq_ptr = i;
   7623 	if (count != 0)
   7624 		rnd_add_uint32(&sc->rnd_source, count);
   7625 
   7626 	DPRINTF(WM_DEBUG_RX,
   7627 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7628 }
   7629 
   7630 /*
   7631  * wm_linkintr_gmii:
   7632  *
   7633  *	Helper; handle link interrupts for GMII.
   7634  */
   7635 static void
   7636 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7637 {
   7638 
   7639 	KASSERT(WM_CORE_LOCKED(sc));
   7640 
   7641 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7642 		__func__));
   7643 
   7644 	if (icr & ICR_LSC) {
   7645 		uint32_t reg;
   7646 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7647 
   7648 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7649 			wm_gig_downshift_workaround_ich8lan(sc);
   7650 
   7651 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7652 			device_xname(sc->sc_dev)));
   7653 		mii_pollstat(&sc->sc_mii);
   7654 		if (sc->sc_type == WM_T_82543) {
   7655 			int miistatus, active;
   7656 
   7657 			/*
   7658 			 * With 82543, we need to force speed and
   7659 			 * duplex on the MAC equal to what the PHY
   7660 			 * speed and duplex configuration is.
   7661 			 */
   7662 			miistatus = sc->sc_mii.mii_media_status;
   7663 
   7664 			if (miistatus & IFM_ACTIVE) {
   7665 				active = sc->sc_mii.mii_media_active;
   7666 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7667 				switch (IFM_SUBTYPE(active)) {
   7668 				case IFM_10_T:
   7669 					sc->sc_ctrl |= CTRL_SPEED_10;
   7670 					break;
   7671 				case IFM_100_TX:
   7672 					sc->sc_ctrl |= CTRL_SPEED_100;
   7673 					break;
   7674 				case IFM_1000_T:
   7675 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7676 					break;
   7677 				default:
   7678 					/*
   7679 					 * fiber?
   7680 					 * Shoud not enter here.
   7681 					 */
   7682 					printf("unknown media (%x)\n", active);
   7683 					break;
   7684 				}
   7685 				if (active & IFM_FDX)
   7686 					sc->sc_ctrl |= CTRL_FD;
   7687 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7688 			}
   7689 		} else if ((sc->sc_type == WM_T_ICH8)
   7690 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7691 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7692 		} else if (sc->sc_type == WM_T_PCH) {
   7693 			wm_k1_gig_workaround_hv(sc,
   7694 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7695 		}
   7696 
   7697 		if ((sc->sc_phytype == WMPHY_82578)
   7698 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7699 			== IFM_1000_T)) {
   7700 
   7701 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7702 				delay(200*1000); /* XXX too big */
   7703 
   7704 				/* Link stall fix for link up */
   7705 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7706 				    HV_MUX_DATA_CTRL,
   7707 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7708 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7709 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7710 				    HV_MUX_DATA_CTRL,
   7711 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7712 			}
   7713 		}
   7714 		/*
   7715 		 * I217 Packet Loss issue:
   7716 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   7717 		 * on power up.
   7718 		 * Set the Beacon Duration for I217 to 8 usec
   7719 		 */
   7720 		if ((sc->sc_type == WM_T_PCH_LPT)
   7721 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   7722 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   7723 			reg &= ~FEXTNVM4_BEACON_DURATION;
   7724 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   7725 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   7726 		}
   7727 
   7728 		/* XXX Work-around I218 hang issue */
   7729 		/* e1000_k1_workaround_lpt_lp() */
   7730 
   7731 		if ((sc->sc_type == WM_T_PCH_LPT)
   7732 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   7733 			/*
   7734 			 * Set platform power management values for Latency
   7735 			 * Tolerance Reporting (LTR)
   7736 			 */
   7737 			wm_platform_pm_pch_lpt(sc,
   7738 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   7739 				    != 0));
   7740 		}
   7741 
   7742 		/* FEXTNVM6 K1-off workaround */
   7743 		if (sc->sc_type == WM_T_PCH_SPT) {
   7744 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   7745 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   7746 			    & FEXTNVM6_K1_OFF_ENABLE)
   7747 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   7748 			else
   7749 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   7750 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   7751 		}
   7752 	} else if (icr & ICR_RXSEQ) {
   7753 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7754 			device_xname(sc->sc_dev)));
   7755 	}
   7756 }
   7757 
   7758 /*
   7759  * wm_linkintr_tbi:
   7760  *
   7761  *	Helper; handle link interrupts for TBI mode.
   7762  */
   7763 static void
   7764 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7765 {
   7766 	uint32_t status;
   7767 
   7768 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7769 		__func__));
   7770 
   7771 	status = CSR_READ(sc, WMREG_STATUS);
   7772 	if (icr & ICR_LSC) {
   7773 		if (status & STATUS_LU) {
   7774 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7775 			    device_xname(sc->sc_dev),
   7776 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7777 			/*
   7778 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7779 			 * so we should update sc->sc_ctrl
   7780 			 */
   7781 
   7782 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7783 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7784 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7785 			if (status & STATUS_FD)
   7786 				sc->sc_tctl |=
   7787 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7788 			else
   7789 				sc->sc_tctl |=
   7790 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7791 			if (sc->sc_ctrl & CTRL_TFCE)
   7792 				sc->sc_fcrtl |= FCRTL_XONE;
   7793 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7794 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7795 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7796 				      sc->sc_fcrtl);
   7797 			sc->sc_tbi_linkup = 1;
   7798 		} else {
   7799 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7800 			    device_xname(sc->sc_dev)));
   7801 			sc->sc_tbi_linkup = 0;
   7802 		}
   7803 		/* Update LED */
   7804 		wm_tbi_serdes_set_linkled(sc);
   7805 	} else if (icr & ICR_RXSEQ) {
   7806 		DPRINTF(WM_DEBUG_LINK,
   7807 		    ("%s: LINK: Receive sequence error\n",
   7808 		    device_xname(sc->sc_dev)));
   7809 	}
   7810 }
   7811 
   7812 /*
   7813  * wm_linkintr_serdes:
   7814  *
   7815  *	Helper; handle link interrupts for TBI mode.
   7816  */
   7817 static void
   7818 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7819 {
   7820 	struct mii_data *mii = &sc->sc_mii;
   7821 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7822 	uint32_t pcs_adv, pcs_lpab, reg;
   7823 
   7824 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7825 		__func__));
   7826 
   7827 	if (icr & ICR_LSC) {
   7828 		/* Check PCS */
   7829 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7830 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7831 			mii->mii_media_status |= IFM_ACTIVE;
   7832 			sc->sc_tbi_linkup = 1;
   7833 		} else {
   7834 			mii->mii_media_status |= IFM_NONE;
   7835 			sc->sc_tbi_linkup = 0;
   7836 			wm_tbi_serdes_set_linkled(sc);
   7837 			return;
   7838 		}
   7839 		mii->mii_media_active |= IFM_1000_SX;
   7840 		if ((reg & PCS_LSTS_FDX) != 0)
   7841 			mii->mii_media_active |= IFM_FDX;
   7842 		else
   7843 			mii->mii_media_active |= IFM_HDX;
   7844 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7845 			/* Check flow */
   7846 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7847 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7848 				DPRINTF(WM_DEBUG_LINK,
   7849 				    ("XXX LINKOK but not ACOMP\n"));
   7850 				return;
   7851 			}
   7852 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7853 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7854 			DPRINTF(WM_DEBUG_LINK,
   7855 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7856 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7857 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7858 				mii->mii_media_active |= IFM_FLOW
   7859 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7860 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7861 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7862 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7863 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7864 				mii->mii_media_active |= IFM_FLOW
   7865 				    | IFM_ETH_TXPAUSE;
   7866 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7867 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7868 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7869 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7870 				mii->mii_media_active |= IFM_FLOW
   7871 				    | IFM_ETH_RXPAUSE;
   7872 		}
   7873 		/* Update LED */
   7874 		wm_tbi_serdes_set_linkled(sc);
   7875 	} else {
   7876 		DPRINTF(WM_DEBUG_LINK,
   7877 		    ("%s: LINK: Receive sequence error\n",
   7878 		    device_xname(sc->sc_dev)));
   7879 	}
   7880 }
   7881 
   7882 /*
   7883  * wm_linkintr:
   7884  *
   7885  *	Helper; handle link interrupts.
   7886  */
   7887 static void
   7888 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7889 {
   7890 
   7891 	KASSERT(WM_CORE_LOCKED(sc));
   7892 
   7893 	if (sc->sc_flags & WM_F_HAS_MII)
   7894 		wm_linkintr_gmii(sc, icr);
   7895 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7896 	    && (sc->sc_type >= WM_T_82575))
   7897 		wm_linkintr_serdes(sc, icr);
   7898 	else
   7899 		wm_linkintr_tbi(sc, icr);
   7900 }
   7901 
   7902 /*
   7903  * wm_intr_legacy:
   7904  *
   7905  *	Interrupt service routine for INTx and MSI.
   7906  */
   7907 static int
   7908 wm_intr_legacy(void *arg)
   7909 {
   7910 	struct wm_softc *sc = arg;
   7911 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7912 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
   7913 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7914 	uint32_t icr, rndval = 0;
   7915 	int handled = 0;
   7916 
   7917 	DPRINTF(WM_DEBUG_TX,
   7918 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7919 	while (1 /* CONSTCOND */) {
   7920 		icr = CSR_READ(sc, WMREG_ICR);
   7921 		if ((icr & sc->sc_icr) == 0)
   7922 			break;
   7923 		if (rndval == 0)
   7924 			rndval = icr;
   7925 
   7926 		mutex_enter(rxq->rxq_lock);
   7927 
   7928 		if (rxq->rxq_stopping) {
   7929 			mutex_exit(rxq->rxq_lock);
   7930 			break;
   7931 		}
   7932 
   7933 		handled = 1;
   7934 
   7935 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7936 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   7937 			DPRINTF(WM_DEBUG_RX,
   7938 			    ("%s: RX: got Rx intr 0x%08x\n",
   7939 			    device_xname(sc->sc_dev),
   7940 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   7941 			WM_Q_EVCNT_INCR(rxq, rxintr);
   7942 		}
   7943 #endif
   7944 		wm_rxeof(rxq);
   7945 
   7946 		mutex_exit(rxq->rxq_lock);
   7947 		mutex_enter(txq->txq_lock);
   7948 
   7949 		if (txq->txq_stopping) {
   7950 			mutex_exit(txq->txq_lock);
   7951 			break;
   7952 		}
   7953 
   7954 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7955 		if (icr & ICR_TXDW) {
   7956 			DPRINTF(WM_DEBUG_TX,
   7957 			    ("%s: TX: got TXDW interrupt\n",
   7958 			    device_xname(sc->sc_dev)));
   7959 			WM_Q_EVCNT_INCR(txq, txdw);
   7960 		}
   7961 #endif
   7962 		wm_txeof(sc, txq);
   7963 
   7964 		mutex_exit(txq->txq_lock);
   7965 		WM_CORE_LOCK(sc);
   7966 
   7967 		if (sc->sc_core_stopping) {
   7968 			WM_CORE_UNLOCK(sc);
   7969 			break;
   7970 		}
   7971 
   7972 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   7973 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7974 			wm_linkintr(sc, icr);
   7975 		}
   7976 
   7977 		WM_CORE_UNLOCK(sc);
   7978 
   7979 		if (icr & ICR_RXO) {
   7980 #if defined(WM_DEBUG)
   7981 			log(LOG_WARNING, "%s: Receive overrun\n",
   7982 			    device_xname(sc->sc_dev));
   7983 #endif /* defined(WM_DEBUG) */
   7984 		}
   7985 	}
   7986 
   7987 	rnd_add_uint32(&sc->rnd_source, rndval);
   7988 
   7989 	if (handled) {
   7990 		/* Try to get more packets going. */
   7991 		if_schedule_deferred_start(ifp);
   7992 	}
   7993 
   7994 	return handled;
   7995 }
   7996 
   7997 static int
   7998 wm_txrxintr_msix(void *arg)
   7999 {
   8000 	struct wm_queue *wmq = arg;
   8001 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8002 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8003 	struct wm_softc *sc = txq->txq_sc;
   8004 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8005 
   8006 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8007 
   8008 	DPRINTF(WM_DEBUG_TX,
   8009 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8010 
   8011 	if (sc->sc_type == WM_T_82574)
   8012 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8013 	else if (sc->sc_type == WM_T_82575)
   8014 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8015 	else
   8016 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8017 
   8018 	mutex_enter(txq->txq_lock);
   8019 
   8020 	if (txq->txq_stopping) {
   8021 		mutex_exit(txq->txq_lock);
   8022 		return 0;
   8023 	}
   8024 
   8025 	WM_Q_EVCNT_INCR(txq, txdw);
   8026 	wm_txeof(sc, txq);
   8027 
   8028 	/* Try to get more packets going. */
   8029 	if (pcq_peek(txq->txq_interq) != NULL)
   8030 		if_schedule_deferred_start(ifp);
   8031 	/*
   8032 	 * There are still some upper layer processing which call
   8033 	 * ifp->if_start(). e.g. ALTQ
   8034 	 */
   8035 	if (wmq->wmq_id == 0)
   8036 		if_schedule_deferred_start(ifp);
   8037 
   8038 	mutex_exit(txq->txq_lock);
   8039 
   8040 	DPRINTF(WM_DEBUG_RX,
   8041 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8042 	mutex_enter(rxq->rxq_lock);
   8043 
   8044 	if (rxq->rxq_stopping) {
   8045 		mutex_exit(rxq->rxq_lock);
   8046 		return 0;
   8047 	}
   8048 
   8049 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8050 	wm_rxeof(rxq);
   8051 	mutex_exit(rxq->rxq_lock);
   8052 
   8053 	if (sc->sc_type == WM_T_82574)
   8054 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8055 	else if (sc->sc_type == WM_T_82575)
   8056 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8057 	else
   8058 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8059 
   8060 	return 1;
   8061 }
   8062 
   8063 /*
   8064  * wm_linkintr_msix:
   8065  *
   8066  *	Interrupt service routine for link status change for MSI-X.
   8067  */
   8068 static int
   8069 wm_linkintr_msix(void *arg)
   8070 {
   8071 	struct wm_softc *sc = arg;
   8072 	uint32_t reg;
   8073 
   8074 	DPRINTF(WM_DEBUG_LINK,
   8075 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8076 
   8077 	reg = CSR_READ(sc, WMREG_ICR);
   8078 	WM_CORE_LOCK(sc);
   8079 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8080 		goto out;
   8081 
   8082 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8083 	wm_linkintr(sc, ICR_LSC);
   8084 
   8085 out:
   8086 	WM_CORE_UNLOCK(sc);
   8087 
   8088 	if (sc->sc_type == WM_T_82574)
   8089 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8090 	else if (sc->sc_type == WM_T_82575)
   8091 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8092 	else
   8093 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8094 
   8095 	return 1;
   8096 }
   8097 
   8098 /*
   8099  * Media related.
   8100  * GMII, SGMII, TBI (and SERDES)
   8101  */
   8102 
   8103 /* Common */
   8104 
   8105 /*
   8106  * wm_tbi_serdes_set_linkled:
   8107  *
   8108  *	Update the link LED on TBI and SERDES devices.
   8109  */
   8110 static void
   8111 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8112 {
   8113 
   8114 	if (sc->sc_tbi_linkup)
   8115 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8116 	else
   8117 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8118 
   8119 	/* 82540 or newer devices are active low */
   8120 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8121 
   8122 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8123 }
   8124 
   8125 /* GMII related */
   8126 
   8127 /*
   8128  * wm_gmii_reset:
   8129  *
   8130  *	Reset the PHY.
   8131  */
   8132 static void
   8133 wm_gmii_reset(struct wm_softc *sc)
   8134 {
   8135 	uint32_t reg;
   8136 	int rv;
   8137 
   8138 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8139 		device_xname(sc->sc_dev), __func__));
   8140 
   8141 	rv = sc->phy.acquire(sc);
   8142 	if (rv != 0) {
   8143 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8144 		    __func__);
   8145 		return;
   8146 	}
   8147 
   8148 	switch (sc->sc_type) {
   8149 	case WM_T_82542_2_0:
   8150 	case WM_T_82542_2_1:
   8151 		/* null */
   8152 		break;
   8153 	case WM_T_82543:
   8154 		/*
   8155 		 * With 82543, we need to force speed and duplex on the MAC
   8156 		 * equal to what the PHY speed and duplex configuration is.
   8157 		 * In addition, we need to perform a hardware reset on the PHY
   8158 		 * to take it out of reset.
   8159 		 */
   8160 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8161 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8162 
   8163 		/* The PHY reset pin is active-low. */
   8164 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8165 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8166 		    CTRL_EXT_SWDPIN(4));
   8167 		reg |= CTRL_EXT_SWDPIO(4);
   8168 
   8169 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8170 		CSR_WRITE_FLUSH(sc);
   8171 		delay(10*1000);
   8172 
   8173 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8174 		CSR_WRITE_FLUSH(sc);
   8175 		delay(150);
   8176 #if 0
   8177 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8178 #endif
   8179 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8180 		break;
   8181 	case WM_T_82544:	/* reset 10000us */
   8182 	case WM_T_82540:
   8183 	case WM_T_82545:
   8184 	case WM_T_82545_3:
   8185 	case WM_T_82546:
   8186 	case WM_T_82546_3:
   8187 	case WM_T_82541:
   8188 	case WM_T_82541_2:
   8189 	case WM_T_82547:
   8190 	case WM_T_82547_2:
   8191 	case WM_T_82571:	/* reset 100us */
   8192 	case WM_T_82572:
   8193 	case WM_T_82573:
   8194 	case WM_T_82574:
   8195 	case WM_T_82575:
   8196 	case WM_T_82576:
   8197 	case WM_T_82580:
   8198 	case WM_T_I350:
   8199 	case WM_T_I354:
   8200 	case WM_T_I210:
   8201 	case WM_T_I211:
   8202 	case WM_T_82583:
   8203 	case WM_T_80003:
   8204 		/* generic reset */
   8205 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8206 		CSR_WRITE_FLUSH(sc);
   8207 		delay(20000);
   8208 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8209 		CSR_WRITE_FLUSH(sc);
   8210 		delay(20000);
   8211 
   8212 		if ((sc->sc_type == WM_T_82541)
   8213 		    || (sc->sc_type == WM_T_82541_2)
   8214 		    || (sc->sc_type == WM_T_82547)
   8215 		    || (sc->sc_type == WM_T_82547_2)) {
   8216 			/* workaround for igp are done in igp_reset() */
   8217 			/* XXX add code to set LED after phy reset */
   8218 		}
   8219 		break;
   8220 	case WM_T_ICH8:
   8221 	case WM_T_ICH9:
   8222 	case WM_T_ICH10:
   8223 	case WM_T_PCH:
   8224 	case WM_T_PCH2:
   8225 	case WM_T_PCH_LPT:
   8226 	case WM_T_PCH_SPT:
   8227 		/* generic reset */
   8228 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8229 		CSR_WRITE_FLUSH(sc);
   8230 		delay(100);
   8231 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8232 		CSR_WRITE_FLUSH(sc);
   8233 		delay(150);
   8234 		break;
   8235 	default:
   8236 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8237 		    __func__);
   8238 		break;
   8239 	}
   8240 
   8241 	sc->phy.release(sc);
   8242 
   8243 	/* get_cfg_done */
   8244 	wm_get_cfg_done(sc);
   8245 
   8246 	/* extra setup */
   8247 	switch (sc->sc_type) {
   8248 	case WM_T_82542_2_0:
   8249 	case WM_T_82542_2_1:
   8250 	case WM_T_82543:
   8251 	case WM_T_82544:
   8252 	case WM_T_82540:
   8253 	case WM_T_82545:
   8254 	case WM_T_82545_3:
   8255 	case WM_T_82546:
   8256 	case WM_T_82546_3:
   8257 	case WM_T_82541_2:
   8258 	case WM_T_82547_2:
   8259 	case WM_T_82571:
   8260 	case WM_T_82572:
   8261 	case WM_T_82573:
   8262 	case WM_T_82575:
   8263 	case WM_T_82576:
   8264 	case WM_T_82580:
   8265 	case WM_T_I350:
   8266 	case WM_T_I354:
   8267 	case WM_T_I210:
   8268 	case WM_T_I211:
   8269 	case WM_T_80003:
   8270 		/* null */
   8271 		break;
   8272 	case WM_T_82574:
   8273 	case WM_T_82583:
   8274 		wm_lplu_d0_disable(sc);
   8275 		break;
   8276 	case WM_T_82541:
   8277 	case WM_T_82547:
   8278 		/* XXX Configure actively LED after PHY reset */
   8279 		break;
   8280 	case WM_T_ICH8:
   8281 	case WM_T_ICH9:
   8282 	case WM_T_ICH10:
   8283 	case WM_T_PCH:
   8284 	case WM_T_PCH2:
   8285 	case WM_T_PCH_LPT:
   8286 	case WM_T_PCH_SPT:
   8287 		/* Allow time for h/w to get to a quiescent state afer reset */
   8288 		delay(10*1000);
   8289 
   8290 		if (sc->sc_type == WM_T_PCH)
   8291 			wm_hv_phy_workaround_ich8lan(sc);
   8292 
   8293 		if (sc->sc_type == WM_T_PCH2)
   8294 			wm_lv_phy_workaround_ich8lan(sc);
   8295 
   8296 		/* Clear the host wakeup bit after lcd reset */
   8297 		if (sc->sc_type >= WM_T_PCH) {
   8298 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8299 			    BM_PORT_GEN_CFG);
   8300 			reg &= ~BM_WUC_HOST_WU_BIT;
   8301 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8302 			    BM_PORT_GEN_CFG, reg);
   8303 		}
   8304 
   8305 		/*
   8306 		 * XXX Configure the LCD with th extended configuration region
   8307 		 * in NVM
   8308 		 */
   8309 
   8310 		/* Disable D0 LPLU. */
   8311 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8312 			wm_lplu_d0_disable_pch(sc);
   8313 		else
   8314 			wm_lplu_d0_disable(sc);	/* ICH* */
   8315 		break;
   8316 	default:
   8317 		panic("%s: unknown type\n", __func__);
   8318 		break;
   8319 	}
   8320 }
   8321 
   8322 /*
   8323  * wm_get_phy_id_82575:
   8324  *
   8325  * Return PHY ID. Return -1 if it failed.
   8326  */
   8327 static int
   8328 wm_get_phy_id_82575(struct wm_softc *sc)
   8329 {
   8330 	uint32_t reg;
   8331 	int phyid = -1;
   8332 
   8333 	/* XXX */
   8334 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8335 		return -1;
   8336 
   8337 	if (wm_sgmii_uses_mdio(sc)) {
   8338 		switch (sc->sc_type) {
   8339 		case WM_T_82575:
   8340 		case WM_T_82576:
   8341 			reg = CSR_READ(sc, WMREG_MDIC);
   8342 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8343 			break;
   8344 		case WM_T_82580:
   8345 		case WM_T_I350:
   8346 		case WM_T_I354:
   8347 		case WM_T_I210:
   8348 		case WM_T_I211:
   8349 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8350 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8351 			break;
   8352 		default:
   8353 			return -1;
   8354 		}
   8355 	}
   8356 
   8357 	return phyid;
   8358 }
   8359 
   8360 
   8361 /*
   8362  * wm_gmii_mediainit:
   8363  *
   8364  *	Initialize media for use on 1000BASE-T devices.
   8365  */
   8366 static void
   8367 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8368 {
   8369 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8370 	struct mii_data *mii = &sc->sc_mii;
   8371 	uint32_t reg;
   8372 
   8373 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8374 		device_xname(sc->sc_dev), __func__));
   8375 
   8376 	/* We have GMII. */
   8377 	sc->sc_flags |= WM_F_HAS_MII;
   8378 
   8379 	if (sc->sc_type == WM_T_80003)
   8380 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8381 	else
   8382 		sc->sc_tipg = TIPG_1000T_DFLT;
   8383 
   8384 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8385 	if ((sc->sc_type == WM_T_82580)
   8386 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8387 	    || (sc->sc_type == WM_T_I211)) {
   8388 		reg = CSR_READ(sc, WMREG_PHPM);
   8389 		reg &= ~PHPM_GO_LINK_D;
   8390 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8391 	}
   8392 
   8393 	/*
   8394 	 * Let the chip set speed/duplex on its own based on
   8395 	 * signals from the PHY.
   8396 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8397 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8398 	 */
   8399 	sc->sc_ctrl |= CTRL_SLU;
   8400 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8401 
   8402 	/* Initialize our media structures and probe the GMII. */
   8403 	mii->mii_ifp = ifp;
   8404 
   8405 	/*
   8406 	 * Determine the PHY access method.
   8407 	 *
   8408 	 *  For SGMII, use SGMII specific method.
   8409 	 *
   8410 	 *  For some devices, we can determine the PHY access method
   8411 	 * from sc_type.
   8412 	 *
   8413 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   8414 	 * access  method by sc_type, so use the PCI product ID for some
   8415 	 * devices.
   8416 	 * For other ICH8 variants, try to use igp's method. If the PHY
   8417 	 * can't detect, then use bm's method.
   8418 	 */
   8419 	switch (prodid) {
   8420 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   8421 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   8422 		/* 82577 */
   8423 		sc->sc_phytype = WMPHY_82577;
   8424 		break;
   8425 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   8426 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   8427 		/* 82578 */
   8428 		sc->sc_phytype = WMPHY_82578;
   8429 		break;
   8430 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8431 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8432 		/* 82579 */
   8433 		sc->sc_phytype = WMPHY_82579;
   8434 		break;
   8435 	case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8436 	case PCI_PRODUCT_INTEL_82801I_BM:
   8437 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8438 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8439 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8440 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8441 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8442 		/* ICH8, 9, 10 with 82567 */
   8443 		sc->sc_phytype = WMPHY_BM;
   8444 		mii->mii_readreg = wm_gmii_bm_readreg;
   8445 		mii->mii_writereg = wm_gmii_bm_writereg;
   8446 		break;
   8447 	default:
   8448 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   8449 		    && !wm_sgmii_uses_mdio(sc)){
   8450 			/* SGMII */
   8451 			mii->mii_readreg = wm_sgmii_readreg;
   8452 			mii->mii_writereg = wm_sgmii_writereg;
   8453 		} else if ((sc->sc_type == WM_T_82574)
   8454 		    || (sc->sc_type == WM_T_82583)) {
   8455 			/* BM2 (phyaddr == 1) */
   8456 			sc->sc_phytype = WMPHY_BM;
   8457 			mii->mii_readreg = wm_gmii_bm_readreg;
   8458 			mii->mii_writereg = wm_gmii_bm_writereg;
   8459 		} else if (sc->sc_type >= WM_T_ICH8) {
   8460 			/* non-82567 ICH8, 9 and 10 */
   8461 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8462 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8463 		} else if (sc->sc_type >= WM_T_80003) {
   8464 			/* 80003 */
   8465 			sc->sc_phytype = WMPHY_GG82563;
   8466 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8467 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8468 		} else if (sc->sc_type >= WM_T_I210) {
   8469 			/* I210 and I211 */
   8470 			sc->sc_phytype = WMPHY_210;
   8471 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8472 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8473 		} else if (sc->sc_type >= WM_T_82580) {
   8474 			/* 82580, I350 and I354 */
   8475 			sc->sc_phytype = WMPHY_82580;
   8476 			mii->mii_readreg = wm_gmii_82580_readreg;
   8477 			mii->mii_writereg = wm_gmii_82580_writereg;
   8478 		} else if (sc->sc_type >= WM_T_82544) {
   8479 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8480 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8481 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8482 		} else {
   8483 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8484 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8485 		}
   8486 		break;
   8487 	}
   8488 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8489 		/* All PCH* use _hv_ */
   8490 		mii->mii_readreg = wm_gmii_hv_readreg;
   8491 		mii->mii_writereg = wm_gmii_hv_writereg;
   8492 	}
   8493 	mii->mii_statchg = wm_gmii_statchg;
   8494 
   8495 	/* get PHY control from SMBus to PCIe */
   8496 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   8497 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   8498 		wm_smbustopci(sc);
   8499 
   8500 	wm_gmii_reset(sc);
   8501 
   8502 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8503 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8504 	    wm_gmii_mediastatus);
   8505 
   8506 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8507 	    || (sc->sc_type == WM_T_82580)
   8508 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8509 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8510 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8511 			/* Attach only one port */
   8512 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8513 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8514 		} else {
   8515 			int i, id;
   8516 			uint32_t ctrl_ext;
   8517 
   8518 			id = wm_get_phy_id_82575(sc);
   8519 			if (id != -1) {
   8520 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8521 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8522 			}
   8523 			if ((id == -1)
   8524 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8525 				/* Power on sgmii phy if it is disabled */
   8526 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8527 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8528 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8529 				CSR_WRITE_FLUSH(sc);
   8530 				delay(300*1000); /* XXX too long */
   8531 
   8532 				/* from 1 to 8 */
   8533 				for (i = 1; i < 8; i++)
   8534 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8535 					    0xffffffff, i, MII_OFFSET_ANY,
   8536 					    MIIF_DOPAUSE);
   8537 
   8538 				/* restore previous sfp cage power state */
   8539 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8540 			}
   8541 		}
   8542 	} else {
   8543 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8544 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8545 	}
   8546 
   8547 	/*
   8548 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8549 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8550 	 */
   8551 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8552 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8553 		wm_set_mdio_slow_mode_hv(sc);
   8554 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8555 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8556 	}
   8557 
   8558 	/*
   8559 	 * (For ICH8 variants)
   8560 	 * If PHY detection failed, use BM's r/w function and retry.
   8561 	 */
   8562 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8563 		/* if failed, retry with *_bm_* */
   8564 		mii->mii_readreg = wm_gmii_bm_readreg;
   8565 		mii->mii_writereg = wm_gmii_bm_writereg;
   8566 
   8567 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8568 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8569 	}
   8570 
   8571 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8572 		/* Any PHY wasn't find */
   8573 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8574 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8575 		sc->sc_phytype = WMPHY_NONE;
   8576 	} else {
   8577 		/*
   8578 		 * PHY Found!
   8579 		 * Check PHY type.
   8580 		 */
   8581 		uint32_t model;
   8582 		struct mii_softc *child;
   8583 
   8584 		child = LIST_FIRST(&mii->mii_phys);
   8585 		model = child->mii_mpd_model;
   8586 		if (model == MII_MODEL_yyINTEL_I82566)
   8587 			sc->sc_phytype = WMPHY_IGP_3;
   8588 
   8589 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8590 	}
   8591 }
   8592 
   8593 /*
   8594  * wm_gmii_mediachange:	[ifmedia interface function]
   8595  *
   8596  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8597  */
   8598 static int
   8599 wm_gmii_mediachange(struct ifnet *ifp)
   8600 {
   8601 	struct wm_softc *sc = ifp->if_softc;
   8602 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8603 	int rc;
   8604 
   8605 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8606 		device_xname(sc->sc_dev), __func__));
   8607 	if ((ifp->if_flags & IFF_UP) == 0)
   8608 		return 0;
   8609 
   8610 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8611 	sc->sc_ctrl |= CTRL_SLU;
   8612 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8613 	    || (sc->sc_type > WM_T_82543)) {
   8614 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8615 	} else {
   8616 		sc->sc_ctrl &= ~CTRL_ASDE;
   8617 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8618 		if (ife->ifm_media & IFM_FDX)
   8619 			sc->sc_ctrl |= CTRL_FD;
   8620 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8621 		case IFM_10_T:
   8622 			sc->sc_ctrl |= CTRL_SPEED_10;
   8623 			break;
   8624 		case IFM_100_TX:
   8625 			sc->sc_ctrl |= CTRL_SPEED_100;
   8626 			break;
   8627 		case IFM_1000_T:
   8628 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8629 			break;
   8630 		default:
   8631 			panic("wm_gmii_mediachange: bad media 0x%x",
   8632 			    ife->ifm_media);
   8633 		}
   8634 	}
   8635 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8636 	if (sc->sc_type <= WM_T_82543)
   8637 		wm_gmii_reset(sc);
   8638 
   8639 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8640 		return 0;
   8641 	return rc;
   8642 }
   8643 
   8644 /*
   8645  * wm_gmii_mediastatus:	[ifmedia interface function]
   8646  *
   8647  *	Get the current interface media status on a 1000BASE-T device.
   8648  */
   8649 static void
   8650 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8651 {
   8652 	struct wm_softc *sc = ifp->if_softc;
   8653 
   8654 	ether_mediastatus(ifp, ifmr);
   8655 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8656 	    | sc->sc_flowflags;
   8657 }
   8658 
   8659 #define	MDI_IO		CTRL_SWDPIN(2)
   8660 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8661 #define	MDI_CLK		CTRL_SWDPIN(3)
   8662 
   8663 static void
   8664 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8665 {
   8666 	uint32_t i, v;
   8667 
   8668 	v = CSR_READ(sc, WMREG_CTRL);
   8669 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8670 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8671 
   8672 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8673 		if (data & i)
   8674 			v |= MDI_IO;
   8675 		else
   8676 			v &= ~MDI_IO;
   8677 		CSR_WRITE(sc, WMREG_CTRL, v);
   8678 		CSR_WRITE_FLUSH(sc);
   8679 		delay(10);
   8680 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8681 		CSR_WRITE_FLUSH(sc);
   8682 		delay(10);
   8683 		CSR_WRITE(sc, WMREG_CTRL, v);
   8684 		CSR_WRITE_FLUSH(sc);
   8685 		delay(10);
   8686 	}
   8687 }
   8688 
   8689 static uint32_t
   8690 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8691 {
   8692 	uint32_t v, i, data = 0;
   8693 
   8694 	v = CSR_READ(sc, WMREG_CTRL);
   8695 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8696 	v |= CTRL_SWDPIO(3);
   8697 
   8698 	CSR_WRITE(sc, WMREG_CTRL, v);
   8699 	CSR_WRITE_FLUSH(sc);
   8700 	delay(10);
   8701 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8702 	CSR_WRITE_FLUSH(sc);
   8703 	delay(10);
   8704 	CSR_WRITE(sc, WMREG_CTRL, v);
   8705 	CSR_WRITE_FLUSH(sc);
   8706 	delay(10);
   8707 
   8708 	for (i = 0; i < 16; i++) {
   8709 		data <<= 1;
   8710 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8711 		CSR_WRITE_FLUSH(sc);
   8712 		delay(10);
   8713 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8714 			data |= 1;
   8715 		CSR_WRITE(sc, WMREG_CTRL, v);
   8716 		CSR_WRITE_FLUSH(sc);
   8717 		delay(10);
   8718 	}
   8719 
   8720 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8721 	CSR_WRITE_FLUSH(sc);
   8722 	delay(10);
   8723 	CSR_WRITE(sc, WMREG_CTRL, v);
   8724 	CSR_WRITE_FLUSH(sc);
   8725 	delay(10);
   8726 
   8727 	return data;
   8728 }
   8729 
   8730 #undef MDI_IO
   8731 #undef MDI_DIR
   8732 #undef MDI_CLK
   8733 
   8734 /*
   8735  * wm_gmii_i82543_readreg:	[mii interface function]
   8736  *
   8737  *	Read a PHY register on the GMII (i82543 version).
   8738  */
   8739 static int
   8740 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8741 {
   8742 	struct wm_softc *sc = device_private(self);
   8743 	int rv;
   8744 
   8745 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8746 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8747 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8748 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8749 
   8750 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8751 	    device_xname(sc->sc_dev), phy, reg, rv));
   8752 
   8753 	return rv;
   8754 }
   8755 
   8756 /*
   8757  * wm_gmii_i82543_writereg:	[mii interface function]
   8758  *
   8759  *	Write a PHY register on the GMII (i82543 version).
   8760  */
   8761 static void
   8762 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8763 {
   8764 	struct wm_softc *sc = device_private(self);
   8765 
   8766 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8767 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8768 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8769 	    (MII_COMMAND_START << 30), 32);
   8770 }
   8771 
   8772 /*
   8773  * wm_gmii_mdic_readreg:	[mii interface function]
   8774  *
   8775  *	Read a PHY register on the GMII.
   8776  */
   8777 static int
   8778 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   8779 {
   8780 	struct wm_softc *sc = device_private(self);
   8781 	uint32_t mdic = 0;
   8782 	int i, rv;
   8783 
   8784 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8785 	    MDIC_REGADD(reg));
   8786 
   8787 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8788 		mdic = CSR_READ(sc, WMREG_MDIC);
   8789 		if (mdic & MDIC_READY)
   8790 			break;
   8791 		delay(50);
   8792 	}
   8793 
   8794 	if ((mdic & MDIC_READY) == 0) {
   8795 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8796 		    device_xname(sc->sc_dev), phy, reg);
   8797 		rv = 0;
   8798 	} else if (mdic & MDIC_E) {
   8799 #if 0 /* This is normal if no PHY is present. */
   8800 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8801 		    device_xname(sc->sc_dev), phy, reg);
   8802 #endif
   8803 		rv = 0;
   8804 	} else {
   8805 		rv = MDIC_DATA(mdic);
   8806 		if (rv == 0xffff)
   8807 			rv = 0;
   8808 	}
   8809 
   8810 	return rv;
   8811 }
   8812 
   8813 /*
   8814  * wm_gmii_mdic_writereg:	[mii interface function]
   8815  *
   8816  *	Write a PHY register on the GMII.
   8817  */
   8818 static void
   8819 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   8820 {
   8821 	struct wm_softc *sc = device_private(self);
   8822 	uint32_t mdic = 0;
   8823 	int i;
   8824 
   8825 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8826 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8827 
   8828 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8829 		mdic = CSR_READ(sc, WMREG_MDIC);
   8830 		if (mdic & MDIC_READY)
   8831 			break;
   8832 		delay(50);
   8833 	}
   8834 
   8835 	if ((mdic & MDIC_READY) == 0)
   8836 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8837 		    device_xname(sc->sc_dev), phy, reg);
   8838 	else if (mdic & MDIC_E)
   8839 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8840 		    device_xname(sc->sc_dev), phy, reg);
   8841 }
   8842 
   8843 /*
   8844  * wm_gmii_i82544_readreg:	[mii interface function]
   8845  *
   8846  *	Read a PHY register on the GMII.
   8847  */
   8848 static int
   8849 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8850 {
   8851 	struct wm_softc *sc = device_private(self);
   8852 	int rv;
   8853 
   8854 	if (sc->phy.acquire(sc)) {
   8855 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8856 		    __func__);
   8857 		return 0;
   8858 	}
   8859 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   8860 	sc->phy.release(sc);
   8861 
   8862 	return rv;
   8863 }
   8864 
   8865 /*
   8866  * wm_gmii_i82544_writereg:	[mii interface function]
   8867  *
   8868  *	Write a PHY register on the GMII.
   8869  */
   8870 static void
   8871 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8872 {
   8873 	struct wm_softc *sc = device_private(self);
   8874 
   8875 	if (sc->phy.acquire(sc)) {
   8876 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8877 		    __func__);
   8878 	}
   8879 	wm_gmii_mdic_writereg(self, phy, reg, val);
   8880 	sc->phy.release(sc);
   8881 }
   8882 
   8883 /*
   8884  * wm_gmii_i80003_readreg:	[mii interface function]
   8885  *
   8886  *	Read a PHY register on the kumeran
   8887  * This could be handled by the PHY layer if we didn't have to lock the
   8888  * ressource ...
   8889  */
   8890 static int
   8891 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8892 {
   8893 	struct wm_softc *sc = device_private(self);
   8894 	int rv;
   8895 
   8896 	if (phy != 1) /* only one PHY on kumeran bus */
   8897 		return 0;
   8898 
   8899 	if (sc->phy.acquire(sc)) {
   8900 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8901 		    __func__);
   8902 		return 0;
   8903 	}
   8904 
   8905 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   8906 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8907 		    reg >> GG82563_PAGE_SHIFT);
   8908 	} else {
   8909 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8910 		    reg >> GG82563_PAGE_SHIFT);
   8911 	}
   8912 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8913 	delay(200);
   8914 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   8915 	delay(200);
   8916 	sc->phy.release(sc);
   8917 
   8918 	return rv;
   8919 }
   8920 
   8921 /*
   8922  * wm_gmii_i80003_writereg:	[mii interface function]
   8923  *
   8924  *	Write a PHY register on the kumeran.
   8925  * This could be handled by the PHY layer if we didn't have to lock the
   8926  * ressource ...
   8927  */
   8928 static void
   8929 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8930 {
   8931 	struct wm_softc *sc = device_private(self);
   8932 
   8933 	if (phy != 1) /* only one PHY on kumeran bus */
   8934 		return;
   8935 
   8936 	if (sc->phy.acquire(sc)) {
   8937 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8938 		    __func__);
   8939 		return;
   8940 	}
   8941 
   8942 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   8943 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8944 		    reg >> GG82563_PAGE_SHIFT);
   8945 	} else {
   8946 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8947 		    reg >> GG82563_PAGE_SHIFT);
   8948 	}
   8949 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8950 	delay(200);
   8951 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   8952 	delay(200);
   8953 
   8954 	sc->phy.release(sc);
   8955 }
   8956 
   8957 /*
   8958  * wm_gmii_bm_readreg:	[mii interface function]
   8959  *
   8960  *	Read a PHY register on the kumeran
   8961  * This could be handled by the PHY layer if we didn't have to lock the
   8962  * ressource ...
   8963  */
   8964 static int
   8965 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8966 {
   8967 	struct wm_softc *sc = device_private(self);
   8968 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   8969 	uint16_t val;
   8970 	int rv;
   8971 
   8972 	if (sc->phy.acquire(sc)) {
   8973 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8974 		    __func__);
   8975 		return 0;
   8976 	}
   8977 
   8978 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   8979 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   8980 		    || (reg == 31)) ? 1 : phy;
   8981 	/* Page 800 works differently than the rest so it has its own func */
   8982 	if (page == BM_WUC_PAGE) {
   8983 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8984 		rv = val;
   8985 		goto release;
   8986 	}
   8987 
   8988 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8989 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   8990 		    && (sc->sc_type != WM_T_82583))
   8991 			wm_gmii_mdic_writereg(self, phy,
   8992 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   8993 		else
   8994 			wm_gmii_mdic_writereg(self, phy,
   8995 			    BME1000_PHY_PAGE_SELECT, page);
   8996 	}
   8997 
   8998 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   8999 
   9000 release:
   9001 	sc->phy.release(sc);
   9002 	return rv;
   9003 }
   9004 
   9005 /*
   9006  * wm_gmii_bm_writereg:	[mii interface function]
   9007  *
   9008  *	Write a PHY register on the kumeran.
   9009  * This could be handled by the PHY layer if we didn't have to lock the
   9010  * ressource ...
   9011  */
   9012 static void
   9013 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   9014 {
   9015 	struct wm_softc *sc = device_private(self);
   9016 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9017 
   9018 	if (sc->phy.acquire(sc)) {
   9019 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9020 		    __func__);
   9021 		return;
   9022 	}
   9023 
   9024 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9025 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9026 		    || (reg == 31)) ? 1 : phy;
   9027 	/* Page 800 works differently than the rest so it has its own func */
   9028 	if (page == BM_WUC_PAGE) {
   9029 		uint16_t tmp;
   9030 
   9031 		tmp = val;
   9032 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9033 		goto release;
   9034 	}
   9035 
   9036 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9037 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9038 		    && (sc->sc_type != WM_T_82583))
   9039 			wm_gmii_mdic_writereg(self, phy,
   9040 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9041 		else
   9042 			wm_gmii_mdic_writereg(self, phy,
   9043 			    BME1000_PHY_PAGE_SELECT, page);
   9044 	}
   9045 
   9046 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9047 
   9048 release:
   9049 	sc->phy.release(sc);
   9050 }
   9051 
   9052 static void
   9053 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   9054 {
   9055 	struct wm_softc *sc = device_private(self);
   9056 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9057 	uint16_t wuce, reg;
   9058 
   9059 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9060 		device_xname(sc->sc_dev), __func__));
   9061 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9062 	if (sc->sc_type == WM_T_PCH) {
   9063 		/* XXX e1000 driver do nothing... why? */
   9064 	}
   9065 
   9066 	/*
   9067 	 * 1) Enable PHY wakeup register first.
   9068 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9069 	 */
   9070 
   9071 	/* Set page 769 */
   9072 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9073 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9074 
   9075 	/* Read WUCE and save it */
   9076 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9077 
   9078 	reg = wuce | BM_WUC_ENABLE_BIT;
   9079 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9080 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9081 
   9082 	/* Select page 800 */
   9083 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9084 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9085 
   9086 	/*
   9087 	 * 2) Access PHY wakeup register.
   9088 	 * See e1000_access_phy_wakeup_reg_bm.
   9089 	 */
   9090 
   9091 	/* Write page 800 */
   9092 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9093 
   9094 	if (rd)
   9095 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9096 	else
   9097 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9098 
   9099 	/*
   9100 	 * 3) Disable PHY wakeup register.
   9101 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9102 	 */
   9103 	/* Set page 769 */
   9104 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9105 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9106 
   9107 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9108 }
   9109 
   9110 /*
   9111  * wm_gmii_hv_readreg:	[mii interface function]
   9112  *
   9113  *	Read a PHY register on the kumeran
   9114  * This could be handled by the PHY layer if we didn't have to lock the
   9115  * ressource ...
   9116  */
   9117 static int
   9118 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9119 {
   9120 	struct wm_softc *sc = device_private(self);
   9121 	int rv;
   9122 
   9123 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9124 		device_xname(sc->sc_dev), __func__));
   9125 	if (sc->phy.acquire(sc)) {
   9126 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9127 		    __func__);
   9128 		return 0;
   9129 	}
   9130 
   9131 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9132 	sc->phy.release(sc);
   9133 	return rv;
   9134 }
   9135 
   9136 static int
   9137 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9138 {
   9139 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9140 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9141 	uint16_t val;
   9142 	int rv;
   9143 
   9144 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9145 
   9146 	/* Page 800 works differently than the rest so it has its own func */
   9147 	if (page == BM_WUC_PAGE) {
   9148 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9149 		return val;
   9150 	}
   9151 
   9152 	/*
   9153 	 * Lower than page 768 works differently than the rest so it has its
   9154 	 * own func
   9155 	 */
   9156 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9157 		printf("gmii_hv_readreg!!!\n");
   9158 		return 0;
   9159 	}
   9160 
   9161 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9162 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9163 		    page << BME1000_PAGE_SHIFT);
   9164 	}
   9165 
   9166 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9167 	return rv;
   9168 }
   9169 
   9170 /*
   9171  * wm_gmii_hv_writereg:	[mii interface function]
   9172  *
   9173  *	Write a PHY register on the kumeran.
   9174  * This could be handled by the PHY layer if we didn't have to lock the
   9175  * ressource ...
   9176  */
   9177 static void
   9178 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9179 {
   9180 	struct wm_softc *sc = device_private(self);
   9181 
   9182 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9183 		device_xname(sc->sc_dev), __func__));
   9184 
   9185 	if (sc->phy.acquire(sc)) {
   9186 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9187 		    __func__);
   9188 		return;
   9189 	}
   9190 
   9191 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   9192 	sc->phy.release(sc);
   9193 }
   9194 
   9195 static void
   9196 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   9197 {
   9198 	struct wm_softc *sc = device_private(self);
   9199 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9200 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9201 
   9202 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9203 
   9204 	/* Page 800 works differently than the rest so it has its own func */
   9205 	if (page == BM_WUC_PAGE) {
   9206 		uint16_t tmp;
   9207 
   9208 		tmp = val;
   9209 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9210 		return;
   9211 	}
   9212 
   9213 	/*
   9214 	 * Lower than page 768 works differently than the rest so it has its
   9215 	 * own func
   9216 	 */
   9217 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9218 		printf("gmii_hv_writereg!!!\n");
   9219 		return;
   9220 	}
   9221 
   9222 	{
   9223 		/*
   9224 		 * XXX Workaround MDIO accesses being disabled after entering
   9225 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9226 		 * register is set)
   9227 		 */
   9228 		if (sc->sc_phytype == WMPHY_82578) {
   9229 			struct mii_softc *child;
   9230 
   9231 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9232 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   9233 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   9234 			    && ((val & (1 << 11)) != 0)) {
   9235 				printf("XXX need workaround\n");
   9236 			}
   9237 		}
   9238 
   9239 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9240 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9241 			    page << BME1000_PAGE_SHIFT);
   9242 		}
   9243 	}
   9244 
   9245 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   9246 }
   9247 
   9248 /*
   9249  * wm_gmii_82580_readreg:	[mii interface function]
   9250  *
   9251  *	Read a PHY register on the 82580 and I350.
   9252  * This could be handled by the PHY layer if we didn't have to lock the
   9253  * ressource ...
   9254  */
   9255 static int
   9256 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   9257 {
   9258 	struct wm_softc *sc = device_private(self);
   9259 	int rv;
   9260 
   9261 	if (sc->phy.acquire(sc) != 0) {
   9262 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9263 		    __func__);
   9264 		return 0;
   9265 	}
   9266 
   9267 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9268 
   9269 	sc->phy.release(sc);
   9270 	return rv;
   9271 }
   9272 
   9273 /*
   9274  * wm_gmii_82580_writereg:	[mii interface function]
   9275  *
   9276  *	Write a PHY register on the 82580 and I350.
   9277  * This could be handled by the PHY layer if we didn't have to lock the
   9278  * ressource ...
   9279  */
   9280 static void
   9281 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   9282 {
   9283 	struct wm_softc *sc = device_private(self);
   9284 
   9285 	if (sc->phy.acquire(sc) != 0) {
   9286 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9287 		    __func__);
   9288 		return;
   9289 	}
   9290 
   9291 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9292 
   9293 	sc->phy.release(sc);
   9294 }
   9295 
   9296 /*
   9297  * wm_gmii_gs40g_readreg:	[mii interface function]
   9298  *
   9299  *	Read a PHY register on the I2100 and I211.
   9300  * This could be handled by the PHY layer if we didn't have to lock the
   9301  * ressource ...
   9302  */
   9303 static int
   9304 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   9305 {
   9306 	struct wm_softc *sc = device_private(self);
   9307 	int page, offset;
   9308 	int rv;
   9309 
   9310 	/* Acquire semaphore */
   9311 	if (sc->phy.acquire(sc)) {
   9312 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9313 		    __func__);
   9314 		return 0;
   9315 	}
   9316 
   9317 	/* Page select */
   9318 	page = reg >> GS40G_PAGE_SHIFT;
   9319 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9320 
   9321 	/* Read reg */
   9322 	offset = reg & GS40G_OFFSET_MASK;
   9323 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   9324 
   9325 	sc->phy.release(sc);
   9326 	return rv;
   9327 }
   9328 
   9329 /*
   9330  * wm_gmii_gs40g_writereg:	[mii interface function]
   9331  *
   9332  *	Write a PHY register on the I210 and I211.
   9333  * This could be handled by the PHY layer if we didn't have to lock the
   9334  * ressource ...
   9335  */
   9336 static void
   9337 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   9338 {
   9339 	struct wm_softc *sc = device_private(self);
   9340 	int page, offset;
   9341 
   9342 	/* Acquire semaphore */
   9343 	if (sc->phy.acquire(sc)) {
   9344 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9345 		    __func__);
   9346 		return;
   9347 	}
   9348 
   9349 	/* Page select */
   9350 	page = reg >> GS40G_PAGE_SHIFT;
   9351 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9352 
   9353 	/* Write reg */
   9354 	offset = reg & GS40G_OFFSET_MASK;
   9355 	wm_gmii_mdic_writereg(self, phy, offset, val);
   9356 
   9357 	/* Release semaphore */
   9358 	sc->phy.release(sc);
   9359 }
   9360 
   9361 /*
   9362  * wm_gmii_statchg:	[mii interface function]
   9363  *
   9364  *	Callback from MII layer when media changes.
   9365  */
   9366 static void
   9367 wm_gmii_statchg(struct ifnet *ifp)
   9368 {
   9369 	struct wm_softc *sc = ifp->if_softc;
   9370 	struct mii_data *mii = &sc->sc_mii;
   9371 
   9372 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   9373 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9374 	sc->sc_fcrtl &= ~FCRTL_XONE;
   9375 
   9376 	/*
   9377 	 * Get flow control negotiation result.
   9378 	 */
   9379 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   9380 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   9381 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   9382 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   9383 	}
   9384 
   9385 	if (sc->sc_flowflags & IFM_FLOW) {
   9386 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   9387 			sc->sc_ctrl |= CTRL_TFCE;
   9388 			sc->sc_fcrtl |= FCRTL_XONE;
   9389 		}
   9390 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   9391 			sc->sc_ctrl |= CTRL_RFCE;
   9392 	}
   9393 
   9394 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   9395 		DPRINTF(WM_DEBUG_LINK,
   9396 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   9397 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9398 	} else {
   9399 		DPRINTF(WM_DEBUG_LINK,
   9400 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   9401 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9402 	}
   9403 
   9404 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9405 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9406 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   9407 						 : WMREG_FCRTL, sc->sc_fcrtl);
   9408 	if (sc->sc_type == WM_T_80003) {
   9409 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   9410 		case IFM_1000_T:
   9411 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9412 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   9413 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9414 			break;
   9415 		default:
   9416 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9417 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   9418 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   9419 			break;
   9420 		}
   9421 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   9422 	}
   9423 }
   9424 
   9425 /* kumeran related (80003, ICH* and PCH*) */
   9426 
   9427 /*
   9428  * wm_kmrn_readreg:
   9429  *
   9430  *	Read a kumeran register
   9431  */
   9432 static int
   9433 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   9434 {
   9435 	int rv;
   9436 
   9437 	if (sc->sc_type == WM_T_80003)
   9438 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9439 	else
   9440 		rv = sc->phy.acquire(sc);
   9441 	if (rv != 0) {
   9442 		aprint_error_dev(sc->sc_dev,
   9443 		    "%s: failed to get semaphore\n", __func__);
   9444 		return 0;
   9445 	}
   9446 
   9447 	rv = wm_kmrn_readreg_locked(sc, reg);
   9448 
   9449 	if (sc->sc_type == WM_T_80003)
   9450 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9451 	else
   9452 		sc->phy.release(sc);
   9453 
   9454 	return rv;
   9455 }
   9456 
   9457 static int
   9458 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   9459 {
   9460 	int rv;
   9461 
   9462 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9463 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9464 	    KUMCTRLSTA_REN);
   9465 	CSR_WRITE_FLUSH(sc);
   9466 	delay(2);
   9467 
   9468 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9469 
   9470 	return rv;
   9471 }
   9472 
   9473 /*
   9474  * wm_kmrn_writereg:
   9475  *
   9476  *	Write a kumeran register
   9477  */
   9478 static void
   9479 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9480 {
   9481 	int rv;
   9482 
   9483 	if (sc->sc_type == WM_T_80003)
   9484 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9485 	else
   9486 		rv = sc->phy.acquire(sc);
   9487 	if (rv != 0) {
   9488 		aprint_error_dev(sc->sc_dev,
   9489 		    "%s: failed to get semaphore\n", __func__);
   9490 		return;
   9491 	}
   9492 
   9493 	wm_kmrn_writereg_locked(sc, reg, val);
   9494 
   9495 	if (sc->sc_type == WM_T_80003)
   9496 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9497 	else
   9498 		sc->phy.release(sc);
   9499 }
   9500 
   9501 static void
   9502 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   9503 {
   9504 
   9505 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9506 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9507 	    (val & KUMCTRLSTA_MASK));
   9508 }
   9509 
   9510 /* SGMII related */
   9511 
   9512 /*
   9513  * wm_sgmii_uses_mdio
   9514  *
   9515  * Check whether the transaction is to the internal PHY or the external
   9516  * MDIO interface. Return true if it's MDIO.
   9517  */
   9518 static bool
   9519 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9520 {
   9521 	uint32_t reg;
   9522 	bool ismdio = false;
   9523 
   9524 	switch (sc->sc_type) {
   9525 	case WM_T_82575:
   9526 	case WM_T_82576:
   9527 		reg = CSR_READ(sc, WMREG_MDIC);
   9528 		ismdio = ((reg & MDIC_DEST) != 0);
   9529 		break;
   9530 	case WM_T_82580:
   9531 	case WM_T_I350:
   9532 	case WM_T_I354:
   9533 	case WM_T_I210:
   9534 	case WM_T_I211:
   9535 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9536 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9537 		break;
   9538 	default:
   9539 		break;
   9540 	}
   9541 
   9542 	return ismdio;
   9543 }
   9544 
   9545 /*
   9546  * wm_sgmii_readreg:	[mii interface function]
   9547  *
   9548  *	Read a PHY register on the SGMII
   9549  * This could be handled by the PHY layer if we didn't have to lock the
   9550  * ressource ...
   9551  */
   9552 static int
   9553 wm_sgmii_readreg(device_t self, int phy, int reg)
   9554 {
   9555 	struct wm_softc *sc = device_private(self);
   9556 	uint32_t i2ccmd;
   9557 	int i, rv;
   9558 
   9559 	if (sc->phy.acquire(sc)) {
   9560 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9561 		    __func__);
   9562 		return 0;
   9563 	}
   9564 
   9565 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9566 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9567 	    | I2CCMD_OPCODE_READ;
   9568 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9569 
   9570 	/* Poll the ready bit */
   9571 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9572 		delay(50);
   9573 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9574 		if (i2ccmd & I2CCMD_READY)
   9575 			break;
   9576 	}
   9577 	if ((i2ccmd & I2CCMD_READY) == 0)
   9578 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9579 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9580 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9581 
   9582 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9583 
   9584 	sc->phy.release(sc);
   9585 	return rv;
   9586 }
   9587 
   9588 /*
   9589  * wm_sgmii_writereg:	[mii interface function]
   9590  *
   9591  *	Write a PHY register on the SGMII.
   9592  * This could be handled by the PHY layer if we didn't have to lock the
   9593  * ressource ...
   9594  */
   9595 static void
   9596 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9597 {
   9598 	struct wm_softc *sc = device_private(self);
   9599 	uint32_t i2ccmd;
   9600 	int i;
   9601 	int val_swapped;
   9602 
   9603 	if (sc->phy.acquire(sc) != 0) {
   9604 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9605 		    __func__);
   9606 		return;
   9607 	}
   9608 	/* Swap the data bytes for the I2C interface */
   9609 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9610 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9611 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9612 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9613 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9614 
   9615 	/* Poll the ready bit */
   9616 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9617 		delay(50);
   9618 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9619 		if (i2ccmd & I2CCMD_READY)
   9620 			break;
   9621 	}
   9622 	if ((i2ccmd & I2CCMD_READY) == 0)
   9623 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9624 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9625 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9626 
   9627 	sc->phy.release(sc);
   9628 }
   9629 
   9630 /* TBI related */
   9631 
   9632 /*
   9633  * wm_tbi_mediainit:
   9634  *
   9635  *	Initialize media for use on 1000BASE-X devices.
   9636  */
   9637 static void
   9638 wm_tbi_mediainit(struct wm_softc *sc)
   9639 {
   9640 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9641 	const char *sep = "";
   9642 
   9643 	if (sc->sc_type < WM_T_82543)
   9644 		sc->sc_tipg = TIPG_WM_DFLT;
   9645 	else
   9646 		sc->sc_tipg = TIPG_LG_DFLT;
   9647 
   9648 	sc->sc_tbi_serdes_anegticks = 5;
   9649 
   9650 	/* Initialize our media structures */
   9651 	sc->sc_mii.mii_ifp = ifp;
   9652 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9653 
   9654 	if ((sc->sc_type >= WM_T_82575)
   9655 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9656 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9657 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9658 	else
   9659 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9660 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9661 
   9662 	/*
   9663 	 * SWD Pins:
   9664 	 *
   9665 	 *	0 = Link LED (output)
   9666 	 *	1 = Loss Of Signal (input)
   9667 	 */
   9668 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9669 
   9670 	/* XXX Perhaps this is only for TBI */
   9671 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9672 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9673 
   9674 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9675 		sc->sc_ctrl &= ~CTRL_LRST;
   9676 
   9677 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9678 
   9679 #define	ADD(ss, mm, dd)							\
   9680 do {									\
   9681 	aprint_normal("%s%s", sep, ss);					\
   9682 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9683 	sep = ", ";							\
   9684 } while (/*CONSTCOND*/0)
   9685 
   9686 	aprint_normal_dev(sc->sc_dev, "");
   9687 
   9688 	if (sc->sc_type == WM_T_I354) {
   9689 		uint32_t status;
   9690 
   9691 		status = CSR_READ(sc, WMREG_STATUS);
   9692 		if (((status & STATUS_2P5_SKU) != 0)
   9693 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   9694 			ADD("2500baseKX-FDX", IFM_2500_SX | IFM_FDX,ANAR_X_FD);
   9695 		} else
   9696 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX,ANAR_X_FD);
   9697 	} else if (sc->sc_type == WM_T_82545) {
   9698 		/* Only 82545 is LX (XXX except SFP) */
   9699 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9700 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9701 	} else {
   9702 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9703 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9704 	}
   9705 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9706 	aprint_normal("\n");
   9707 
   9708 #undef ADD
   9709 
   9710 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9711 }
   9712 
   9713 /*
   9714  * wm_tbi_mediachange:	[ifmedia interface function]
   9715  *
   9716  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9717  */
   9718 static int
   9719 wm_tbi_mediachange(struct ifnet *ifp)
   9720 {
   9721 	struct wm_softc *sc = ifp->if_softc;
   9722 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9723 	uint32_t status;
   9724 	int i;
   9725 
   9726 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9727 		/* XXX need some work for >= 82571 and < 82575 */
   9728 		if (sc->sc_type < WM_T_82575)
   9729 			return 0;
   9730 	}
   9731 
   9732 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9733 	    || (sc->sc_type >= WM_T_82575))
   9734 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9735 
   9736 	sc->sc_ctrl &= ~CTRL_LRST;
   9737 	sc->sc_txcw = TXCW_ANE;
   9738 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9739 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9740 	else if (ife->ifm_media & IFM_FDX)
   9741 		sc->sc_txcw |= TXCW_FD;
   9742 	else
   9743 		sc->sc_txcw |= TXCW_HD;
   9744 
   9745 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9746 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9747 
   9748 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9749 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9750 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9751 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9752 	CSR_WRITE_FLUSH(sc);
   9753 	delay(1000);
   9754 
   9755 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9756 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9757 
   9758 	/*
   9759 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9760 	 * optics detect a signal, 0 if they don't.
   9761 	 */
   9762 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9763 		/* Have signal; wait for the link to come up. */
   9764 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9765 			delay(10000);
   9766 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9767 				break;
   9768 		}
   9769 
   9770 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9771 			    device_xname(sc->sc_dev),i));
   9772 
   9773 		status = CSR_READ(sc, WMREG_STATUS);
   9774 		DPRINTF(WM_DEBUG_LINK,
   9775 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9776 			device_xname(sc->sc_dev),status, STATUS_LU));
   9777 		if (status & STATUS_LU) {
   9778 			/* Link is up. */
   9779 			DPRINTF(WM_DEBUG_LINK,
   9780 			    ("%s: LINK: set media -> link up %s\n",
   9781 			    device_xname(sc->sc_dev),
   9782 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9783 
   9784 			/*
   9785 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9786 			 * so we should update sc->sc_ctrl
   9787 			 */
   9788 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9789 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9790 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9791 			if (status & STATUS_FD)
   9792 				sc->sc_tctl |=
   9793 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9794 			else
   9795 				sc->sc_tctl |=
   9796 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9797 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9798 				sc->sc_fcrtl |= FCRTL_XONE;
   9799 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9800 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9801 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9802 				      sc->sc_fcrtl);
   9803 			sc->sc_tbi_linkup = 1;
   9804 		} else {
   9805 			if (i == WM_LINKUP_TIMEOUT)
   9806 				wm_check_for_link(sc);
   9807 			/* Link is down. */
   9808 			DPRINTF(WM_DEBUG_LINK,
   9809 			    ("%s: LINK: set media -> link down\n",
   9810 			    device_xname(sc->sc_dev)));
   9811 			sc->sc_tbi_linkup = 0;
   9812 		}
   9813 	} else {
   9814 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9815 		    device_xname(sc->sc_dev)));
   9816 		sc->sc_tbi_linkup = 0;
   9817 	}
   9818 
   9819 	wm_tbi_serdes_set_linkled(sc);
   9820 
   9821 	return 0;
   9822 }
   9823 
   9824 /*
   9825  * wm_tbi_mediastatus:	[ifmedia interface function]
   9826  *
   9827  *	Get the current interface media status on a 1000BASE-X device.
   9828  */
   9829 static void
   9830 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9831 {
   9832 	struct wm_softc *sc = ifp->if_softc;
   9833 	uint32_t ctrl, status;
   9834 
   9835 	ifmr->ifm_status = IFM_AVALID;
   9836 	ifmr->ifm_active = IFM_ETHER;
   9837 
   9838 	status = CSR_READ(sc, WMREG_STATUS);
   9839 	if ((status & STATUS_LU) == 0) {
   9840 		ifmr->ifm_active |= IFM_NONE;
   9841 		return;
   9842 	}
   9843 
   9844 	ifmr->ifm_status |= IFM_ACTIVE;
   9845 	/* Only 82545 is LX */
   9846 	if (sc->sc_type == WM_T_82545)
   9847 		ifmr->ifm_active |= IFM_1000_LX;
   9848 	else
   9849 		ifmr->ifm_active |= IFM_1000_SX;
   9850 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9851 		ifmr->ifm_active |= IFM_FDX;
   9852 	else
   9853 		ifmr->ifm_active |= IFM_HDX;
   9854 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9855 	if (ctrl & CTRL_RFCE)
   9856 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9857 	if (ctrl & CTRL_TFCE)
   9858 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9859 }
   9860 
   9861 /* XXX TBI only */
   9862 static int
   9863 wm_check_for_link(struct wm_softc *sc)
   9864 {
   9865 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9866 	uint32_t rxcw;
   9867 	uint32_t ctrl;
   9868 	uint32_t status;
   9869 	uint32_t sig;
   9870 
   9871 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9872 		/* XXX need some work for >= 82571 */
   9873 		if (sc->sc_type >= WM_T_82571) {
   9874 			sc->sc_tbi_linkup = 1;
   9875 			return 0;
   9876 		}
   9877 	}
   9878 
   9879 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9880 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9881 	status = CSR_READ(sc, WMREG_STATUS);
   9882 
   9883 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9884 
   9885 	DPRINTF(WM_DEBUG_LINK,
   9886 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9887 		device_xname(sc->sc_dev), __func__,
   9888 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9889 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   9890 
   9891 	/*
   9892 	 * SWDPIN   LU RXCW
   9893 	 *      0    0    0
   9894 	 *      0    0    1	(should not happen)
   9895 	 *      0    1    0	(should not happen)
   9896 	 *      0    1    1	(should not happen)
   9897 	 *      1    0    0	Disable autonego and force linkup
   9898 	 *      1    0    1	got /C/ but not linkup yet
   9899 	 *      1    1    0	(linkup)
   9900 	 *      1    1    1	If IFM_AUTO, back to autonego
   9901 	 *
   9902 	 */
   9903 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9904 	    && ((status & STATUS_LU) == 0)
   9905 	    && ((rxcw & RXCW_C) == 0)) {
   9906 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9907 			__func__));
   9908 		sc->sc_tbi_linkup = 0;
   9909 		/* Disable auto-negotiation in the TXCW register */
   9910 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9911 
   9912 		/*
   9913 		 * Force link-up and also force full-duplex.
   9914 		 *
   9915 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9916 		 * so we should update sc->sc_ctrl
   9917 		 */
   9918 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9919 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9920 	} else if (((status & STATUS_LU) != 0)
   9921 	    && ((rxcw & RXCW_C) != 0)
   9922 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9923 		sc->sc_tbi_linkup = 1;
   9924 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9925 			__func__));
   9926 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9927 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9928 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9929 	    && ((rxcw & RXCW_C) != 0)) {
   9930 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9931 	} else {
   9932 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9933 			status));
   9934 	}
   9935 
   9936 	return 0;
   9937 }
   9938 
   9939 /*
   9940  * wm_tbi_tick:
   9941  *
   9942  *	Check the link on TBI devices.
   9943  *	This function acts as mii_tick().
   9944  */
   9945 static void
   9946 wm_tbi_tick(struct wm_softc *sc)
   9947 {
   9948 	struct mii_data *mii = &sc->sc_mii;
   9949 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9950 	uint32_t status;
   9951 
   9952 	KASSERT(WM_CORE_LOCKED(sc));
   9953 
   9954 	status = CSR_READ(sc, WMREG_STATUS);
   9955 
   9956 	/* XXX is this needed? */
   9957 	(void)CSR_READ(sc, WMREG_RXCW);
   9958 	(void)CSR_READ(sc, WMREG_CTRL);
   9959 
   9960 	/* set link status */
   9961 	if ((status & STATUS_LU) == 0) {
   9962 		DPRINTF(WM_DEBUG_LINK,
   9963 		    ("%s: LINK: checklink -> down\n",
   9964 			device_xname(sc->sc_dev)));
   9965 		sc->sc_tbi_linkup = 0;
   9966 	} else if (sc->sc_tbi_linkup == 0) {
   9967 		DPRINTF(WM_DEBUG_LINK,
   9968 		    ("%s: LINK: checklink -> up %s\n",
   9969 			device_xname(sc->sc_dev),
   9970 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9971 		sc->sc_tbi_linkup = 1;
   9972 		sc->sc_tbi_serdes_ticks = 0;
   9973 	}
   9974 
   9975 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9976 		goto setled;
   9977 
   9978 	if ((status & STATUS_LU) == 0) {
   9979 		sc->sc_tbi_linkup = 0;
   9980 		/* If the timer expired, retry autonegotiation */
   9981 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9982 		    && (++sc->sc_tbi_serdes_ticks
   9983 			>= sc->sc_tbi_serdes_anegticks)) {
   9984 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9985 			sc->sc_tbi_serdes_ticks = 0;
   9986 			/*
   9987 			 * Reset the link, and let autonegotiation do
   9988 			 * its thing
   9989 			 */
   9990 			sc->sc_ctrl |= CTRL_LRST;
   9991 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9992 			CSR_WRITE_FLUSH(sc);
   9993 			delay(1000);
   9994 			sc->sc_ctrl &= ~CTRL_LRST;
   9995 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9996 			CSR_WRITE_FLUSH(sc);
   9997 			delay(1000);
   9998 			CSR_WRITE(sc, WMREG_TXCW,
   9999 			    sc->sc_txcw & ~TXCW_ANE);
   10000 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10001 		}
   10002 	}
   10003 
   10004 setled:
   10005 	wm_tbi_serdes_set_linkled(sc);
   10006 }
   10007 
   10008 /* SERDES related */
   10009 static void
   10010 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10011 {
   10012 	uint32_t reg;
   10013 
   10014 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10015 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   10016 		return;
   10017 
   10018 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   10019 	reg |= PCS_CFG_PCS_EN;
   10020 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   10021 
   10022 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10023 	reg &= ~CTRL_EXT_SWDPIN(3);
   10024 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10025 	CSR_WRITE_FLUSH(sc);
   10026 }
   10027 
   10028 static int
   10029 wm_serdes_mediachange(struct ifnet *ifp)
   10030 {
   10031 	struct wm_softc *sc = ifp->if_softc;
   10032 	bool pcs_autoneg = true; /* XXX */
   10033 	uint32_t ctrl_ext, pcs_lctl, reg;
   10034 
   10035 	/* XXX Currently, this function is not called on 8257[12] */
   10036 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10037 	    || (sc->sc_type >= WM_T_82575))
   10038 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10039 
   10040 	wm_serdes_power_up_link_82575(sc);
   10041 
   10042 	sc->sc_ctrl |= CTRL_SLU;
   10043 
   10044 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10045 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10046 
   10047 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10048 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10049 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10050 	case CTRL_EXT_LINK_MODE_SGMII:
   10051 		pcs_autoneg = true;
   10052 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10053 		break;
   10054 	case CTRL_EXT_LINK_MODE_1000KX:
   10055 		pcs_autoneg = false;
   10056 		/* FALLTHROUGH */
   10057 	default:
   10058 		if ((sc->sc_type == WM_T_82575)
   10059 		    || (sc->sc_type == WM_T_82576)) {
   10060 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10061 				pcs_autoneg = false;
   10062 		}
   10063 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10064 		    | CTRL_FRCFDX;
   10065 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10066 	}
   10067 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10068 
   10069 	if (pcs_autoneg) {
   10070 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10071 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10072 
   10073 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10074 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10075 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10076 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10077 	} else
   10078 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10079 
   10080 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10081 
   10082 
   10083 	return 0;
   10084 }
   10085 
   10086 static void
   10087 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10088 {
   10089 	struct wm_softc *sc = ifp->if_softc;
   10090 	struct mii_data *mii = &sc->sc_mii;
   10091 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10092 	uint32_t pcs_adv, pcs_lpab, reg;
   10093 
   10094 	ifmr->ifm_status = IFM_AVALID;
   10095 	ifmr->ifm_active = IFM_ETHER;
   10096 
   10097 	/* Check PCS */
   10098 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10099 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10100 		ifmr->ifm_active |= IFM_NONE;
   10101 		sc->sc_tbi_linkup = 0;
   10102 		goto setled;
   10103 	}
   10104 
   10105 	sc->sc_tbi_linkup = 1;
   10106 	ifmr->ifm_status |= IFM_ACTIVE;
   10107 	if (sc->sc_type == WM_T_I354) {
   10108 		uint32_t status;
   10109 
   10110 		status = CSR_READ(sc, WMREG_STATUS);
   10111 		if (((status & STATUS_2P5_SKU) != 0)
   10112 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10113 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   10114 		} else
   10115 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   10116 	} else {
   10117 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   10118 		case PCS_LSTS_SPEED_10:
   10119 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   10120 			break;
   10121 		case PCS_LSTS_SPEED_100:
   10122 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   10123 			break;
   10124 		case PCS_LSTS_SPEED_1000:
   10125 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10126 			break;
   10127 		default:
   10128 			device_printf(sc->sc_dev, "Unknown speed\n");
   10129 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10130 			break;
   10131 		}
   10132 	}
   10133 	if ((reg & PCS_LSTS_FDX) != 0)
   10134 		ifmr->ifm_active |= IFM_FDX;
   10135 	else
   10136 		ifmr->ifm_active |= IFM_HDX;
   10137 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10138 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10139 		/* Check flow */
   10140 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10141 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10142 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10143 			goto setled;
   10144 		}
   10145 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10146 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10147 		DPRINTF(WM_DEBUG_LINK,
   10148 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10149 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10150 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10151 			mii->mii_media_active |= IFM_FLOW
   10152 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10153 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10154 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10155 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10156 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10157 			mii->mii_media_active |= IFM_FLOW
   10158 			    | IFM_ETH_TXPAUSE;
   10159 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10160 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10161 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10162 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10163 			mii->mii_media_active |= IFM_FLOW
   10164 			    | IFM_ETH_RXPAUSE;
   10165 		}
   10166 	}
   10167 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10168 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10169 setled:
   10170 	wm_tbi_serdes_set_linkled(sc);
   10171 }
   10172 
   10173 /*
   10174  * wm_serdes_tick:
   10175  *
   10176  *	Check the link on serdes devices.
   10177  */
   10178 static void
   10179 wm_serdes_tick(struct wm_softc *sc)
   10180 {
   10181 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10182 	struct mii_data *mii = &sc->sc_mii;
   10183 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10184 	uint32_t reg;
   10185 
   10186 	KASSERT(WM_CORE_LOCKED(sc));
   10187 
   10188 	mii->mii_media_status = IFM_AVALID;
   10189 	mii->mii_media_active = IFM_ETHER;
   10190 
   10191 	/* Check PCS */
   10192 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10193 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   10194 		mii->mii_media_status |= IFM_ACTIVE;
   10195 		sc->sc_tbi_linkup = 1;
   10196 		sc->sc_tbi_serdes_ticks = 0;
   10197 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   10198 		if ((reg & PCS_LSTS_FDX) != 0)
   10199 			mii->mii_media_active |= IFM_FDX;
   10200 		else
   10201 			mii->mii_media_active |= IFM_HDX;
   10202 	} else {
   10203 		mii->mii_media_status |= IFM_NONE;
   10204 		sc->sc_tbi_linkup = 0;
   10205 		/* If the timer expired, retry autonegotiation */
   10206 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10207 		    && (++sc->sc_tbi_serdes_ticks
   10208 			>= sc->sc_tbi_serdes_anegticks)) {
   10209 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10210 			sc->sc_tbi_serdes_ticks = 0;
   10211 			/* XXX */
   10212 			wm_serdes_mediachange(ifp);
   10213 		}
   10214 	}
   10215 
   10216 	wm_tbi_serdes_set_linkled(sc);
   10217 }
   10218 
   10219 /* SFP related */
   10220 
   10221 static int
   10222 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   10223 {
   10224 	uint32_t i2ccmd;
   10225 	int i;
   10226 
   10227 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10228 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10229 
   10230 	/* Poll the ready bit */
   10231 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10232 		delay(50);
   10233 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10234 		if (i2ccmd & I2CCMD_READY)
   10235 			break;
   10236 	}
   10237 	if ((i2ccmd & I2CCMD_READY) == 0)
   10238 		return -1;
   10239 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10240 		return -1;
   10241 
   10242 	*data = i2ccmd & 0x00ff;
   10243 
   10244 	return 0;
   10245 }
   10246 
   10247 static uint32_t
   10248 wm_sfp_get_media_type(struct wm_softc *sc)
   10249 {
   10250 	uint32_t ctrl_ext;
   10251 	uint8_t val = 0;
   10252 	int timeout = 3;
   10253 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   10254 	int rv = -1;
   10255 
   10256 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10257 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   10258 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   10259 	CSR_WRITE_FLUSH(sc);
   10260 
   10261 	/* Read SFP module data */
   10262 	while (timeout) {
   10263 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   10264 		if (rv == 0)
   10265 			break;
   10266 		delay(100*1000); /* XXX too big */
   10267 		timeout--;
   10268 	}
   10269 	if (rv != 0)
   10270 		goto out;
   10271 	switch (val) {
   10272 	case SFF_SFP_ID_SFF:
   10273 		aprint_normal_dev(sc->sc_dev,
   10274 		    "Module/Connector soldered to board\n");
   10275 		break;
   10276 	case SFF_SFP_ID_SFP:
   10277 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   10278 		break;
   10279 	case SFF_SFP_ID_UNKNOWN:
   10280 		goto out;
   10281 	default:
   10282 		break;
   10283 	}
   10284 
   10285 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   10286 	if (rv != 0) {
   10287 		goto out;
   10288 	}
   10289 
   10290 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   10291 		mediatype = WM_MEDIATYPE_SERDES;
   10292 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   10293 		sc->sc_flags |= WM_F_SGMII;
   10294 		mediatype = WM_MEDIATYPE_COPPER;
   10295 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   10296 		sc->sc_flags |= WM_F_SGMII;
   10297 		mediatype = WM_MEDIATYPE_SERDES;
   10298 	}
   10299 
   10300 out:
   10301 	/* Restore I2C interface setting */
   10302 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10303 
   10304 	return mediatype;
   10305 }
   10306 
   10307 /*
   10308  * NVM related.
   10309  * Microwire, SPI (w/wo EERD) and Flash.
   10310  */
   10311 
   10312 /* Both spi and uwire */
   10313 
   10314 /*
   10315  * wm_eeprom_sendbits:
   10316  *
   10317  *	Send a series of bits to the EEPROM.
   10318  */
   10319 static void
   10320 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   10321 {
   10322 	uint32_t reg;
   10323 	int x;
   10324 
   10325 	reg = CSR_READ(sc, WMREG_EECD);
   10326 
   10327 	for (x = nbits; x > 0; x--) {
   10328 		if (bits & (1U << (x - 1)))
   10329 			reg |= EECD_DI;
   10330 		else
   10331 			reg &= ~EECD_DI;
   10332 		CSR_WRITE(sc, WMREG_EECD, reg);
   10333 		CSR_WRITE_FLUSH(sc);
   10334 		delay(2);
   10335 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10336 		CSR_WRITE_FLUSH(sc);
   10337 		delay(2);
   10338 		CSR_WRITE(sc, WMREG_EECD, reg);
   10339 		CSR_WRITE_FLUSH(sc);
   10340 		delay(2);
   10341 	}
   10342 }
   10343 
   10344 /*
   10345  * wm_eeprom_recvbits:
   10346  *
   10347  *	Receive a series of bits from the EEPROM.
   10348  */
   10349 static void
   10350 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   10351 {
   10352 	uint32_t reg, val;
   10353 	int x;
   10354 
   10355 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   10356 
   10357 	val = 0;
   10358 	for (x = nbits; x > 0; x--) {
   10359 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10360 		CSR_WRITE_FLUSH(sc);
   10361 		delay(2);
   10362 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   10363 			val |= (1U << (x - 1));
   10364 		CSR_WRITE(sc, WMREG_EECD, reg);
   10365 		CSR_WRITE_FLUSH(sc);
   10366 		delay(2);
   10367 	}
   10368 	*valp = val;
   10369 }
   10370 
   10371 /* Microwire */
   10372 
   10373 /*
   10374  * wm_nvm_read_uwire:
   10375  *
   10376  *	Read a word from the EEPROM using the MicroWire protocol.
   10377  */
   10378 static int
   10379 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10380 {
   10381 	uint32_t reg, val;
   10382 	int i;
   10383 
   10384 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10385 		device_xname(sc->sc_dev), __func__));
   10386 
   10387 	for (i = 0; i < wordcnt; i++) {
   10388 		/* Clear SK and DI. */
   10389 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   10390 		CSR_WRITE(sc, WMREG_EECD, reg);
   10391 
   10392 		/*
   10393 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   10394 		 * and Xen.
   10395 		 *
   10396 		 * We use this workaround only for 82540 because qemu's
   10397 		 * e1000 act as 82540.
   10398 		 */
   10399 		if (sc->sc_type == WM_T_82540) {
   10400 			reg |= EECD_SK;
   10401 			CSR_WRITE(sc, WMREG_EECD, reg);
   10402 			reg &= ~EECD_SK;
   10403 			CSR_WRITE(sc, WMREG_EECD, reg);
   10404 			CSR_WRITE_FLUSH(sc);
   10405 			delay(2);
   10406 		}
   10407 		/* XXX: end of workaround */
   10408 
   10409 		/* Set CHIP SELECT. */
   10410 		reg |= EECD_CS;
   10411 		CSR_WRITE(sc, WMREG_EECD, reg);
   10412 		CSR_WRITE_FLUSH(sc);
   10413 		delay(2);
   10414 
   10415 		/* Shift in the READ command. */
   10416 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   10417 
   10418 		/* Shift in address. */
   10419 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   10420 
   10421 		/* Shift out the data. */
   10422 		wm_eeprom_recvbits(sc, &val, 16);
   10423 		data[i] = val & 0xffff;
   10424 
   10425 		/* Clear CHIP SELECT. */
   10426 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   10427 		CSR_WRITE(sc, WMREG_EECD, reg);
   10428 		CSR_WRITE_FLUSH(sc);
   10429 		delay(2);
   10430 	}
   10431 
   10432 	return 0;
   10433 }
   10434 
   10435 /* SPI */
   10436 
   10437 /*
   10438  * Set SPI and FLASH related information from the EECD register.
   10439  * For 82541 and 82547, the word size is taken from EEPROM.
   10440  */
   10441 static int
   10442 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   10443 {
   10444 	int size;
   10445 	uint32_t reg;
   10446 	uint16_t data;
   10447 
   10448 	reg = CSR_READ(sc, WMREG_EECD);
   10449 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   10450 
   10451 	/* Read the size of NVM from EECD by default */
   10452 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10453 	switch (sc->sc_type) {
   10454 	case WM_T_82541:
   10455 	case WM_T_82541_2:
   10456 	case WM_T_82547:
   10457 	case WM_T_82547_2:
   10458 		/* Set dummy value to access EEPROM */
   10459 		sc->sc_nvm_wordsize = 64;
   10460 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   10461 		reg = data;
   10462 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10463 		if (size == 0)
   10464 			size = 6; /* 64 word size */
   10465 		else
   10466 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   10467 		break;
   10468 	case WM_T_80003:
   10469 	case WM_T_82571:
   10470 	case WM_T_82572:
   10471 	case WM_T_82573: /* SPI case */
   10472 	case WM_T_82574: /* SPI case */
   10473 	case WM_T_82583: /* SPI case */
   10474 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10475 		if (size > 14)
   10476 			size = 14;
   10477 		break;
   10478 	case WM_T_82575:
   10479 	case WM_T_82576:
   10480 	case WM_T_82580:
   10481 	case WM_T_I350:
   10482 	case WM_T_I354:
   10483 	case WM_T_I210:
   10484 	case WM_T_I211:
   10485 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10486 		if (size > 15)
   10487 			size = 15;
   10488 		break;
   10489 	default:
   10490 		aprint_error_dev(sc->sc_dev,
   10491 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   10492 		return -1;
   10493 		break;
   10494 	}
   10495 
   10496 	sc->sc_nvm_wordsize = 1 << size;
   10497 
   10498 	return 0;
   10499 }
   10500 
   10501 /*
   10502  * wm_nvm_ready_spi:
   10503  *
   10504  *	Wait for a SPI EEPROM to be ready for commands.
   10505  */
   10506 static int
   10507 wm_nvm_ready_spi(struct wm_softc *sc)
   10508 {
   10509 	uint32_t val;
   10510 	int usec;
   10511 
   10512 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10513 		device_xname(sc->sc_dev), __func__));
   10514 
   10515 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10516 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10517 		wm_eeprom_recvbits(sc, &val, 8);
   10518 		if ((val & SPI_SR_RDY) == 0)
   10519 			break;
   10520 	}
   10521 	if (usec >= SPI_MAX_RETRIES) {
   10522 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10523 		return 1;
   10524 	}
   10525 	return 0;
   10526 }
   10527 
   10528 /*
   10529  * wm_nvm_read_spi:
   10530  *
   10531  *	Read a work from the EEPROM using the SPI protocol.
   10532  */
   10533 static int
   10534 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10535 {
   10536 	uint32_t reg, val;
   10537 	int i;
   10538 	uint8_t opc;
   10539 
   10540 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10541 		device_xname(sc->sc_dev), __func__));
   10542 
   10543 	/* Clear SK and CS. */
   10544 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10545 	CSR_WRITE(sc, WMREG_EECD, reg);
   10546 	CSR_WRITE_FLUSH(sc);
   10547 	delay(2);
   10548 
   10549 	if (wm_nvm_ready_spi(sc))
   10550 		return 1;
   10551 
   10552 	/* Toggle CS to flush commands. */
   10553 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10554 	CSR_WRITE_FLUSH(sc);
   10555 	delay(2);
   10556 	CSR_WRITE(sc, WMREG_EECD, reg);
   10557 	CSR_WRITE_FLUSH(sc);
   10558 	delay(2);
   10559 
   10560 	opc = SPI_OPC_READ;
   10561 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10562 		opc |= SPI_OPC_A8;
   10563 
   10564 	wm_eeprom_sendbits(sc, opc, 8);
   10565 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10566 
   10567 	for (i = 0; i < wordcnt; i++) {
   10568 		wm_eeprom_recvbits(sc, &val, 16);
   10569 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10570 	}
   10571 
   10572 	/* Raise CS and clear SK. */
   10573 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10574 	CSR_WRITE(sc, WMREG_EECD, reg);
   10575 	CSR_WRITE_FLUSH(sc);
   10576 	delay(2);
   10577 
   10578 	return 0;
   10579 }
   10580 
   10581 /* Using with EERD */
   10582 
   10583 static int
   10584 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10585 {
   10586 	uint32_t attempts = 100000;
   10587 	uint32_t i, reg = 0;
   10588 	int32_t done = -1;
   10589 
   10590 	for (i = 0; i < attempts; i++) {
   10591 		reg = CSR_READ(sc, rw);
   10592 
   10593 		if (reg & EERD_DONE) {
   10594 			done = 0;
   10595 			break;
   10596 		}
   10597 		delay(5);
   10598 	}
   10599 
   10600 	return done;
   10601 }
   10602 
   10603 static int
   10604 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   10605     uint16_t *data)
   10606 {
   10607 	int i, eerd = 0;
   10608 	int error = 0;
   10609 
   10610 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10611 		device_xname(sc->sc_dev), __func__));
   10612 
   10613 	for (i = 0; i < wordcnt; i++) {
   10614 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   10615 
   10616 		CSR_WRITE(sc, WMREG_EERD, eerd);
   10617 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   10618 		if (error != 0)
   10619 			break;
   10620 
   10621 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   10622 	}
   10623 
   10624 	return error;
   10625 }
   10626 
   10627 /* Flash */
   10628 
   10629 static int
   10630 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10631 {
   10632 	uint32_t eecd;
   10633 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10634 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10635 	uint8_t sig_byte = 0;
   10636 
   10637 	switch (sc->sc_type) {
   10638 	case WM_T_PCH_SPT:
   10639 		/*
   10640 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   10641 		 * sector valid bits from the NVM.
   10642 		 */
   10643 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   10644 		if ((*bank == 0) || (*bank == 1)) {
   10645 			aprint_error_dev(sc->sc_dev,
   10646 			    "%s: no valid NVM bank present (%u)\n", __func__,
   10647 				*bank);
   10648 			return -1;
   10649 		} else {
   10650 			*bank = *bank - 2;
   10651 			return 0;
   10652 		}
   10653 	case WM_T_ICH8:
   10654 	case WM_T_ICH9:
   10655 		eecd = CSR_READ(sc, WMREG_EECD);
   10656 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10657 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10658 			return 0;
   10659 		}
   10660 		/* FALLTHROUGH */
   10661 	default:
   10662 		/* Default to 0 */
   10663 		*bank = 0;
   10664 
   10665 		/* Check bank 0 */
   10666 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10667 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10668 			*bank = 0;
   10669 			return 0;
   10670 		}
   10671 
   10672 		/* Check bank 1 */
   10673 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10674 		    &sig_byte);
   10675 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10676 			*bank = 1;
   10677 			return 0;
   10678 		}
   10679 	}
   10680 
   10681 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10682 		device_xname(sc->sc_dev)));
   10683 	return -1;
   10684 }
   10685 
   10686 /******************************************************************************
   10687  * This function does initial flash setup so that a new read/write/erase cycle
   10688  * can be started.
   10689  *
   10690  * sc - The pointer to the hw structure
   10691  ****************************************************************************/
   10692 static int32_t
   10693 wm_ich8_cycle_init(struct wm_softc *sc)
   10694 {
   10695 	uint16_t hsfsts;
   10696 	int32_t error = 1;
   10697 	int32_t i     = 0;
   10698 
   10699 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10700 
   10701 	/* May be check the Flash Des Valid bit in Hw status */
   10702 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10703 		return error;
   10704 	}
   10705 
   10706 	/* Clear FCERR in Hw status by writing 1 */
   10707 	/* Clear DAEL in Hw status by writing a 1 */
   10708 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10709 
   10710 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10711 
   10712 	/*
   10713 	 * Either we should have a hardware SPI cycle in progress bit to check
   10714 	 * against, in order to start a new cycle or FDONE bit should be
   10715 	 * changed in the hardware so that it is 1 after harware reset, which
   10716 	 * can then be used as an indication whether a cycle is in progress or
   10717 	 * has been completed .. we should also have some software semaphore
   10718 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10719 	 * threads access to those bits can be sequentiallized or a way so that
   10720 	 * 2 threads dont start the cycle at the same time
   10721 	 */
   10722 
   10723 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10724 		/*
   10725 		 * There is no cycle running at present, so we can start a
   10726 		 * cycle
   10727 		 */
   10728 
   10729 		/* Begin by setting Flash Cycle Done. */
   10730 		hsfsts |= HSFSTS_DONE;
   10731 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10732 		error = 0;
   10733 	} else {
   10734 		/*
   10735 		 * otherwise poll for sometime so the current cycle has a
   10736 		 * chance to end before giving up.
   10737 		 */
   10738 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10739 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10740 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10741 				error = 0;
   10742 				break;
   10743 			}
   10744 			delay(1);
   10745 		}
   10746 		if (error == 0) {
   10747 			/*
   10748 			 * Successful in waiting for previous cycle to timeout,
   10749 			 * now set the Flash Cycle Done.
   10750 			 */
   10751 			hsfsts |= HSFSTS_DONE;
   10752 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10753 		}
   10754 	}
   10755 	return error;
   10756 }
   10757 
   10758 /******************************************************************************
   10759  * This function starts a flash cycle and waits for its completion
   10760  *
   10761  * sc - The pointer to the hw structure
   10762  ****************************************************************************/
   10763 static int32_t
   10764 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10765 {
   10766 	uint16_t hsflctl;
   10767 	uint16_t hsfsts;
   10768 	int32_t error = 1;
   10769 	uint32_t i = 0;
   10770 
   10771 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10772 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10773 	hsflctl |= HSFCTL_GO;
   10774 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10775 
   10776 	/* Wait till FDONE bit is set to 1 */
   10777 	do {
   10778 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10779 		if (hsfsts & HSFSTS_DONE)
   10780 			break;
   10781 		delay(1);
   10782 		i++;
   10783 	} while (i < timeout);
   10784 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10785 		error = 0;
   10786 
   10787 	return error;
   10788 }
   10789 
   10790 /******************************************************************************
   10791  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   10792  *
   10793  * sc - The pointer to the hw structure
   10794  * index - The index of the byte or word to read.
   10795  * size - Size of data to read, 1=byte 2=word, 4=dword
   10796  * data - Pointer to the word to store the value read.
   10797  *****************************************************************************/
   10798 static int32_t
   10799 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10800     uint32_t size, uint32_t *data)
   10801 {
   10802 	uint16_t hsfsts;
   10803 	uint16_t hsflctl;
   10804 	uint32_t flash_linear_address;
   10805 	uint32_t flash_data = 0;
   10806 	int32_t error = 1;
   10807 	int32_t count = 0;
   10808 
   10809 	if (size < 1  || size > 4 || data == 0x0 ||
   10810 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10811 		return error;
   10812 
   10813 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10814 	    sc->sc_ich8_flash_base;
   10815 
   10816 	do {
   10817 		delay(1);
   10818 		/* Steps */
   10819 		error = wm_ich8_cycle_init(sc);
   10820 		if (error)
   10821 			break;
   10822 
   10823 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10824 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10825 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10826 		    & HSFCTL_BCOUNT_MASK;
   10827 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10828 		if (sc->sc_type == WM_T_PCH_SPT) {
   10829 			/*
   10830 			 * In SPT, This register is in Lan memory space, not
   10831 			 * flash. Therefore, only 32 bit access is supported.
   10832 			 */
   10833 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   10834 			    (uint32_t)hsflctl);
   10835 		} else
   10836 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10837 
   10838 		/*
   10839 		 * Write the last 24 bits of index into Flash Linear address
   10840 		 * field in Flash Address
   10841 		 */
   10842 		/* TODO: TBD maybe check the index against the size of flash */
   10843 
   10844 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10845 
   10846 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10847 
   10848 		/*
   10849 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10850 		 * the whole sequence a few more times, else read in (shift in)
   10851 		 * the Flash Data0, the order is least significant byte first
   10852 		 * msb to lsb
   10853 		 */
   10854 		if (error == 0) {
   10855 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10856 			if (size == 1)
   10857 				*data = (uint8_t)(flash_data & 0x000000FF);
   10858 			else if (size == 2)
   10859 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10860 			else if (size == 4)
   10861 				*data = (uint32_t)flash_data;
   10862 			break;
   10863 		} else {
   10864 			/*
   10865 			 * If we've gotten here, then things are probably
   10866 			 * completely hosed, but if the error condition is
   10867 			 * detected, it won't hurt to give it another try...
   10868 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10869 			 */
   10870 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10871 			if (hsfsts & HSFSTS_ERR) {
   10872 				/* Repeat for some time before giving up. */
   10873 				continue;
   10874 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10875 				break;
   10876 		}
   10877 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10878 
   10879 	return error;
   10880 }
   10881 
   10882 /******************************************************************************
   10883  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10884  *
   10885  * sc - pointer to wm_hw structure
   10886  * index - The index of the byte to read.
   10887  * data - Pointer to a byte to store the value read.
   10888  *****************************************************************************/
   10889 static int32_t
   10890 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10891 {
   10892 	int32_t status;
   10893 	uint32_t word = 0;
   10894 
   10895 	status = wm_read_ich8_data(sc, index, 1, &word);
   10896 	if (status == 0)
   10897 		*data = (uint8_t)word;
   10898 	else
   10899 		*data = 0;
   10900 
   10901 	return status;
   10902 }
   10903 
   10904 /******************************************************************************
   10905  * Reads a word from the NVM using the ICH8 flash access registers.
   10906  *
   10907  * sc - pointer to wm_hw structure
   10908  * index - The starting byte index of the word to read.
   10909  * data - Pointer to a word to store the value read.
   10910  *****************************************************************************/
   10911 static int32_t
   10912 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10913 {
   10914 	int32_t status;
   10915 	uint32_t word = 0;
   10916 
   10917 	status = wm_read_ich8_data(sc, index, 2, &word);
   10918 	if (status == 0)
   10919 		*data = (uint16_t)word;
   10920 	else
   10921 		*data = 0;
   10922 
   10923 	return status;
   10924 }
   10925 
   10926 /******************************************************************************
   10927  * Reads a dword from the NVM using the ICH8 flash access registers.
   10928  *
   10929  * sc - pointer to wm_hw structure
   10930  * index - The starting byte index of the word to read.
   10931  * data - Pointer to a word to store the value read.
   10932  *****************************************************************************/
   10933 static int32_t
   10934 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   10935 {
   10936 	int32_t status;
   10937 
   10938 	status = wm_read_ich8_data(sc, index, 4, data);
   10939 	return status;
   10940 }
   10941 
   10942 /******************************************************************************
   10943  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10944  * register.
   10945  *
   10946  * sc - Struct containing variables accessed by shared code
   10947  * offset - offset of word in the EEPROM to read
   10948  * data - word read from the EEPROM
   10949  * words - number of words to read
   10950  *****************************************************************************/
   10951 static int
   10952 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10953 {
   10954 	int32_t  error = 0;
   10955 	uint32_t flash_bank = 0;
   10956 	uint32_t act_offset = 0;
   10957 	uint32_t bank_offset = 0;
   10958 	uint16_t word = 0;
   10959 	uint16_t i = 0;
   10960 
   10961 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10962 		device_xname(sc->sc_dev), __func__));
   10963 
   10964 	/*
   10965 	 * We need to know which is the valid flash bank.  In the event
   10966 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10967 	 * managing flash_bank.  So it cannot be trusted and needs
   10968 	 * to be updated with each read.
   10969 	 */
   10970 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10971 	if (error) {
   10972 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10973 			device_xname(sc->sc_dev)));
   10974 		flash_bank = 0;
   10975 	}
   10976 
   10977 	/*
   10978 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10979 	 * size
   10980 	 */
   10981 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10982 
   10983 	error = wm_get_swfwhw_semaphore(sc);
   10984 	if (error) {
   10985 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10986 		    __func__);
   10987 		return error;
   10988 	}
   10989 
   10990 	for (i = 0; i < words; i++) {
   10991 		/* The NVM part needs a byte offset, hence * 2 */
   10992 		act_offset = bank_offset + ((offset + i) * 2);
   10993 		error = wm_read_ich8_word(sc, act_offset, &word);
   10994 		if (error) {
   10995 			aprint_error_dev(sc->sc_dev,
   10996 			    "%s: failed to read NVM\n", __func__);
   10997 			break;
   10998 		}
   10999 		data[i] = word;
   11000 	}
   11001 
   11002 	wm_put_swfwhw_semaphore(sc);
   11003 	return error;
   11004 }
   11005 
   11006 /******************************************************************************
   11007  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11008  * register.
   11009  *
   11010  * sc - Struct containing variables accessed by shared code
   11011  * offset - offset of word in the EEPROM to read
   11012  * data - word read from the EEPROM
   11013  * words - number of words to read
   11014  *****************************************************************************/
   11015 static int
   11016 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11017 {
   11018 	int32_t  error = 0;
   11019 	uint32_t flash_bank = 0;
   11020 	uint32_t act_offset = 0;
   11021 	uint32_t bank_offset = 0;
   11022 	uint32_t dword = 0;
   11023 	uint16_t i = 0;
   11024 
   11025 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11026 		device_xname(sc->sc_dev), __func__));
   11027 
   11028 	/*
   11029 	 * We need to know which is the valid flash bank.  In the event
   11030 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11031 	 * managing flash_bank.  So it cannot be trusted and needs
   11032 	 * to be updated with each read.
   11033 	 */
   11034 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11035 	if (error) {
   11036 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11037 			device_xname(sc->sc_dev)));
   11038 		flash_bank = 0;
   11039 	}
   11040 
   11041 	/*
   11042 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11043 	 * size
   11044 	 */
   11045 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11046 
   11047 	error = wm_get_swfwhw_semaphore(sc);
   11048 	if (error) {
   11049 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11050 		    __func__);
   11051 		return error;
   11052 	}
   11053 
   11054 	for (i = 0; i < words; i++) {
   11055 		/* The NVM part needs a byte offset, hence * 2 */
   11056 		act_offset = bank_offset + ((offset + i) * 2);
   11057 		/* but we must read dword aligned, so mask ... */
   11058 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11059 		if (error) {
   11060 			aprint_error_dev(sc->sc_dev,
   11061 			    "%s: failed to read NVM\n", __func__);
   11062 			break;
   11063 		}
   11064 		/* ... and pick out low or high word */
   11065 		if ((act_offset & 0x2) == 0)
   11066 			data[i] = (uint16_t)(dword & 0xFFFF);
   11067 		else
   11068 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11069 	}
   11070 
   11071 	wm_put_swfwhw_semaphore(sc);
   11072 	return error;
   11073 }
   11074 
   11075 /* iNVM */
   11076 
   11077 static int
   11078 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11079 {
   11080 	int32_t  rv = 0;
   11081 	uint32_t invm_dword;
   11082 	uint16_t i;
   11083 	uint8_t record_type, word_address;
   11084 
   11085 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11086 		device_xname(sc->sc_dev), __func__));
   11087 
   11088 	for (i = 0; i < INVM_SIZE; i++) {
   11089 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11090 		/* Get record type */
   11091 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11092 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11093 			break;
   11094 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11095 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11096 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11097 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11098 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11099 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11100 			if (word_address == address) {
   11101 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11102 				rv = 0;
   11103 				break;
   11104 			}
   11105 		}
   11106 	}
   11107 
   11108 	return rv;
   11109 }
   11110 
   11111 static int
   11112 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11113 {
   11114 	int rv = 0;
   11115 	int i;
   11116 
   11117 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11118 		device_xname(sc->sc_dev), __func__));
   11119 
   11120 	for (i = 0; i < words; i++) {
   11121 		switch (offset + i) {
   11122 		case NVM_OFF_MACADDR:
   11123 		case NVM_OFF_MACADDR1:
   11124 		case NVM_OFF_MACADDR2:
   11125 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11126 			if (rv != 0) {
   11127 				data[i] = 0xffff;
   11128 				rv = -1;
   11129 			}
   11130 			break;
   11131 		case NVM_OFF_CFG2:
   11132 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11133 			if (rv != 0) {
   11134 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11135 				rv = 0;
   11136 			}
   11137 			break;
   11138 		case NVM_OFF_CFG4:
   11139 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11140 			if (rv != 0) {
   11141 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11142 				rv = 0;
   11143 			}
   11144 			break;
   11145 		case NVM_OFF_LED_1_CFG:
   11146 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11147 			if (rv != 0) {
   11148 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11149 				rv = 0;
   11150 			}
   11151 			break;
   11152 		case NVM_OFF_LED_0_2_CFG:
   11153 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11154 			if (rv != 0) {
   11155 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11156 				rv = 0;
   11157 			}
   11158 			break;
   11159 		case NVM_OFF_ID_LED_SETTINGS:
   11160 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11161 			if (rv != 0) {
   11162 				*data = ID_LED_RESERVED_FFFF;
   11163 				rv = 0;
   11164 			}
   11165 			break;
   11166 		default:
   11167 			DPRINTF(WM_DEBUG_NVM,
   11168 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11169 			*data = NVM_RESERVED_WORD;
   11170 			break;
   11171 		}
   11172 	}
   11173 
   11174 	return rv;
   11175 }
   11176 
   11177 /* Lock, detecting NVM type, validate checksum, version and read */
   11178 
   11179 /*
   11180  * wm_nvm_acquire:
   11181  *
   11182  *	Perform the EEPROM handshake required on some chips.
   11183  */
   11184 static int
   11185 wm_nvm_acquire(struct wm_softc *sc)
   11186 {
   11187 	uint32_t reg;
   11188 	int x;
   11189 	int ret = 0;
   11190 
   11191 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11192 		device_xname(sc->sc_dev), __func__));
   11193 
   11194 	if (sc->sc_type >= WM_T_ICH8) {
   11195 		ret = wm_get_nvm_ich8lan(sc);
   11196 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   11197 		ret = wm_get_swfwhw_semaphore(sc);
   11198 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   11199 		/* This will also do wm_get_swsm_semaphore() if needed */
   11200 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   11201 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11202 		ret = wm_get_swsm_semaphore(sc);
   11203 	}
   11204 
   11205 	if (ret) {
   11206 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11207 			__func__);
   11208 		return 1;
   11209 	}
   11210 
   11211 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11212 		reg = CSR_READ(sc, WMREG_EECD);
   11213 
   11214 		/* Request EEPROM access. */
   11215 		reg |= EECD_EE_REQ;
   11216 		CSR_WRITE(sc, WMREG_EECD, reg);
   11217 
   11218 		/* ..and wait for it to be granted. */
   11219 		for (x = 0; x < 1000; x++) {
   11220 			reg = CSR_READ(sc, WMREG_EECD);
   11221 			if (reg & EECD_EE_GNT)
   11222 				break;
   11223 			delay(5);
   11224 		}
   11225 		if ((reg & EECD_EE_GNT) == 0) {
   11226 			aprint_error_dev(sc->sc_dev,
   11227 			    "could not acquire EEPROM GNT\n");
   11228 			reg &= ~EECD_EE_REQ;
   11229 			CSR_WRITE(sc, WMREG_EECD, reg);
   11230 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11231 				wm_put_swfwhw_semaphore(sc);
   11232 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   11233 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11234 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11235 				wm_put_swsm_semaphore(sc);
   11236 			return 1;
   11237 		}
   11238 	}
   11239 
   11240 	return 0;
   11241 }
   11242 
   11243 /*
   11244  * wm_nvm_release:
   11245  *
   11246  *	Release the EEPROM mutex.
   11247  */
   11248 static void
   11249 wm_nvm_release(struct wm_softc *sc)
   11250 {
   11251 	uint32_t reg;
   11252 
   11253 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11254 		device_xname(sc->sc_dev), __func__));
   11255 
   11256 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11257 		reg = CSR_READ(sc, WMREG_EECD);
   11258 		reg &= ~EECD_EE_REQ;
   11259 		CSR_WRITE(sc, WMREG_EECD, reg);
   11260 	}
   11261 
   11262 	if (sc->sc_type >= WM_T_ICH8) {
   11263 		wm_put_nvm_ich8lan(sc);
   11264 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11265 		wm_put_swfwhw_semaphore(sc);
   11266 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   11267 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11268 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11269 		wm_put_swsm_semaphore(sc);
   11270 }
   11271 
   11272 static int
   11273 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   11274 {
   11275 	uint32_t eecd = 0;
   11276 
   11277 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   11278 	    || sc->sc_type == WM_T_82583) {
   11279 		eecd = CSR_READ(sc, WMREG_EECD);
   11280 
   11281 		/* Isolate bits 15 & 16 */
   11282 		eecd = ((eecd >> 15) & 0x03);
   11283 
   11284 		/* If both bits are set, device is Flash type */
   11285 		if (eecd == 0x03)
   11286 			return 0;
   11287 	}
   11288 	return 1;
   11289 }
   11290 
   11291 static int
   11292 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   11293 {
   11294 	uint32_t eec;
   11295 
   11296 	eec = CSR_READ(sc, WMREG_EEC);
   11297 	if ((eec & EEC_FLASH_DETECTED) != 0)
   11298 		return 1;
   11299 
   11300 	return 0;
   11301 }
   11302 
   11303 /*
   11304  * wm_nvm_validate_checksum
   11305  *
   11306  * The checksum is defined as the sum of the first 64 (16 bit) words.
   11307  */
   11308 static int
   11309 wm_nvm_validate_checksum(struct wm_softc *sc)
   11310 {
   11311 	uint16_t checksum;
   11312 	uint16_t eeprom_data;
   11313 #ifdef WM_DEBUG
   11314 	uint16_t csum_wordaddr, valid_checksum;
   11315 #endif
   11316 	int i;
   11317 
   11318 	checksum = 0;
   11319 
   11320 	/* Don't check for I211 */
   11321 	if (sc->sc_type == WM_T_I211)
   11322 		return 0;
   11323 
   11324 #ifdef WM_DEBUG
   11325 	if (sc->sc_type == WM_T_PCH_LPT) {
   11326 		csum_wordaddr = NVM_OFF_COMPAT;
   11327 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   11328 	} else {
   11329 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   11330 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   11331 	}
   11332 
   11333 	/* Dump EEPROM image for debug */
   11334 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11335 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11336 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   11337 		/* XXX PCH_SPT? */
   11338 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   11339 		if ((eeprom_data & valid_checksum) == 0) {
   11340 			DPRINTF(WM_DEBUG_NVM,
   11341 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   11342 				device_xname(sc->sc_dev), eeprom_data,
   11343 				    valid_checksum));
   11344 		}
   11345 	}
   11346 
   11347 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   11348 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   11349 		for (i = 0; i < NVM_SIZE; i++) {
   11350 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11351 				printf("XXXX ");
   11352 			else
   11353 				printf("%04hx ", eeprom_data);
   11354 			if (i % 8 == 7)
   11355 				printf("\n");
   11356 		}
   11357 	}
   11358 
   11359 #endif /* WM_DEBUG */
   11360 
   11361 	for (i = 0; i < NVM_SIZE; i++) {
   11362 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11363 			return 1;
   11364 		checksum += eeprom_data;
   11365 	}
   11366 
   11367 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   11368 #ifdef WM_DEBUG
   11369 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   11370 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   11371 #endif
   11372 	}
   11373 
   11374 	return 0;
   11375 }
   11376 
   11377 static void
   11378 wm_nvm_version_invm(struct wm_softc *sc)
   11379 {
   11380 	uint32_t dword;
   11381 
   11382 	/*
   11383 	 * Linux's code to decode version is very strange, so we don't
   11384 	 * obey that algorithm and just use word 61 as the document.
   11385 	 * Perhaps it's not perfect though...
   11386 	 *
   11387 	 * Example:
   11388 	 *
   11389 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   11390 	 */
   11391 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   11392 	dword = __SHIFTOUT(dword, INVM_VER_1);
   11393 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   11394 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   11395 }
   11396 
   11397 static void
   11398 wm_nvm_version(struct wm_softc *sc)
   11399 {
   11400 	uint16_t major, minor, build, patch;
   11401 	uint16_t uid0, uid1;
   11402 	uint16_t nvm_data;
   11403 	uint16_t off;
   11404 	bool check_version = false;
   11405 	bool check_optionrom = false;
   11406 	bool have_build = false;
   11407 
   11408 	/*
   11409 	 * Version format:
   11410 	 *
   11411 	 * XYYZ
   11412 	 * X0YZ
   11413 	 * X0YY
   11414 	 *
   11415 	 * Example:
   11416 	 *
   11417 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   11418 	 *	82571	0x50a6	5.10.6?
   11419 	 *	82572	0x506a	5.6.10?
   11420 	 *	82572EI	0x5069	5.6.9?
   11421 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   11422 	 *		0x2013	2.1.3?
   11423 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   11424 	 */
   11425 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   11426 	switch (sc->sc_type) {
   11427 	case WM_T_82571:
   11428 	case WM_T_82572:
   11429 	case WM_T_82574:
   11430 	case WM_T_82583:
   11431 		check_version = true;
   11432 		check_optionrom = true;
   11433 		have_build = true;
   11434 		break;
   11435 	case WM_T_82575:
   11436 	case WM_T_82576:
   11437 	case WM_T_82580:
   11438 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   11439 			check_version = true;
   11440 		break;
   11441 	case WM_T_I211:
   11442 		wm_nvm_version_invm(sc);
   11443 		goto printver;
   11444 	case WM_T_I210:
   11445 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   11446 			wm_nvm_version_invm(sc);
   11447 			goto printver;
   11448 		}
   11449 		/* FALLTHROUGH */
   11450 	case WM_T_I350:
   11451 	case WM_T_I354:
   11452 		check_version = true;
   11453 		check_optionrom = true;
   11454 		break;
   11455 	default:
   11456 		return;
   11457 	}
   11458 	if (check_version) {
   11459 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   11460 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   11461 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   11462 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   11463 			build = nvm_data & NVM_BUILD_MASK;
   11464 			have_build = true;
   11465 		} else
   11466 			minor = nvm_data & 0x00ff;
   11467 
   11468 		/* Decimal */
   11469 		minor = (minor / 16) * 10 + (minor % 16);
   11470 		sc->sc_nvm_ver_major = major;
   11471 		sc->sc_nvm_ver_minor = minor;
   11472 
   11473 printver:
   11474 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   11475 		    sc->sc_nvm_ver_minor);
   11476 		if (have_build) {
   11477 			sc->sc_nvm_ver_build = build;
   11478 			aprint_verbose(".%d", build);
   11479 		}
   11480 	}
   11481 	if (check_optionrom) {
   11482 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   11483 		/* Option ROM Version */
   11484 		if ((off != 0x0000) && (off != 0xffff)) {
   11485 			off += NVM_COMBO_VER_OFF;
   11486 			wm_nvm_read(sc, off + 1, 1, &uid1);
   11487 			wm_nvm_read(sc, off, 1, &uid0);
   11488 			if ((uid0 != 0) && (uid0 != 0xffff)
   11489 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   11490 				/* 16bits */
   11491 				major = uid0 >> 8;
   11492 				build = (uid0 << 8) | (uid1 >> 8);
   11493 				patch = uid1 & 0x00ff;
   11494 				aprint_verbose(", option ROM Version %d.%d.%d",
   11495 				    major, build, patch);
   11496 			}
   11497 		}
   11498 	}
   11499 
   11500 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   11501 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   11502 }
   11503 
   11504 /*
   11505  * wm_nvm_read:
   11506  *
   11507  *	Read data from the serial EEPROM.
   11508  */
   11509 static int
   11510 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11511 {
   11512 	int rv;
   11513 
   11514 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11515 		device_xname(sc->sc_dev), __func__));
   11516 
   11517 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   11518 		return 1;
   11519 
   11520 	if (wm_nvm_acquire(sc))
   11521 		return 1;
   11522 
   11523 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11524 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11525 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   11526 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   11527 	else if (sc->sc_type == WM_T_PCH_SPT)
   11528 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11529 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11530 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11531 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11532 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11533 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11534 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11535 	else
   11536 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11537 
   11538 	wm_nvm_release(sc);
   11539 	return rv;
   11540 }
   11541 
   11542 /*
   11543  * Hardware semaphores.
   11544  * Very complexed...
   11545  */
   11546 
   11547 static int
   11548 wm_get_null(struct wm_softc *sc)
   11549 {
   11550 
   11551 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11552 		device_xname(sc->sc_dev), __func__));
   11553 	return 0;
   11554 }
   11555 
   11556 static void
   11557 wm_put_null(struct wm_softc *sc)
   11558 {
   11559 
   11560 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11561 		device_xname(sc->sc_dev), __func__));
   11562 	return;
   11563 }
   11564 
   11565 /*
   11566  * Get hardware semaphore.
   11567  * Same as e1000_get_hw_semaphore_generic()
   11568  */
   11569 static int
   11570 wm_get_swsm_semaphore(struct wm_softc *sc)
   11571 {
   11572 	int32_t timeout;
   11573 	uint32_t swsm;
   11574 
   11575 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11576 		device_xname(sc->sc_dev), __func__));
   11577 	KASSERT(sc->sc_nvm_wordsize > 0);
   11578 
   11579 	/* Get the SW semaphore. */
   11580 	timeout = sc->sc_nvm_wordsize + 1;
   11581 	while (timeout) {
   11582 		swsm = CSR_READ(sc, WMREG_SWSM);
   11583 
   11584 		if ((swsm & SWSM_SMBI) == 0)
   11585 			break;
   11586 
   11587 		delay(50);
   11588 		timeout--;
   11589 	}
   11590 
   11591 	if (timeout == 0) {
   11592 		aprint_error_dev(sc->sc_dev,
   11593 		    "could not acquire SWSM SMBI\n");
   11594 		return 1;
   11595 	}
   11596 
   11597 	/* Get the FW semaphore. */
   11598 	timeout = sc->sc_nvm_wordsize + 1;
   11599 	while (timeout) {
   11600 		swsm = CSR_READ(sc, WMREG_SWSM);
   11601 		swsm |= SWSM_SWESMBI;
   11602 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   11603 		/* If we managed to set the bit we got the semaphore. */
   11604 		swsm = CSR_READ(sc, WMREG_SWSM);
   11605 		if (swsm & SWSM_SWESMBI)
   11606 			break;
   11607 
   11608 		delay(50);
   11609 		timeout--;
   11610 	}
   11611 
   11612 	if (timeout == 0) {
   11613 		aprint_error_dev(sc->sc_dev,
   11614 		    "could not acquire SWSM SWESMBI\n");
   11615 		/* Release semaphores */
   11616 		wm_put_swsm_semaphore(sc);
   11617 		return 1;
   11618 	}
   11619 	return 0;
   11620 }
   11621 
   11622 /*
   11623  * Put hardware semaphore.
   11624  * Same as e1000_put_hw_semaphore_generic()
   11625  */
   11626 static void
   11627 wm_put_swsm_semaphore(struct wm_softc *sc)
   11628 {
   11629 	uint32_t swsm;
   11630 
   11631 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11632 		device_xname(sc->sc_dev), __func__));
   11633 
   11634 	swsm = CSR_READ(sc, WMREG_SWSM);
   11635 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   11636 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   11637 }
   11638 
   11639 /*
   11640  * Get SW/FW semaphore.
   11641  * Same as e1000_acquire_swfw_sync_82575().
   11642  */
   11643 static int
   11644 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11645 {
   11646 	uint32_t swfw_sync;
   11647 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   11648 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   11649 	int timeout = 200;
   11650 
   11651 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11652 		device_xname(sc->sc_dev), __func__));
   11653 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11654 
   11655 	for (timeout = 0; timeout < 200; timeout++) {
   11656 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11657 			if (wm_get_swsm_semaphore(sc)) {
   11658 				aprint_error_dev(sc->sc_dev,
   11659 				    "%s: failed to get semaphore\n",
   11660 				    __func__);
   11661 				return 1;
   11662 			}
   11663 		}
   11664 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11665 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   11666 			swfw_sync |= swmask;
   11667 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11668 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   11669 				wm_put_swsm_semaphore(sc);
   11670 			return 0;
   11671 		}
   11672 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   11673 			wm_put_swsm_semaphore(sc);
   11674 		delay(5000);
   11675 	}
   11676 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   11677 	    device_xname(sc->sc_dev), mask, swfw_sync);
   11678 	return 1;
   11679 }
   11680 
   11681 static void
   11682 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11683 {
   11684 	uint32_t swfw_sync;
   11685 
   11686 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11687 		device_xname(sc->sc_dev), __func__));
   11688 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11689 
   11690 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11691 		while (wm_get_swsm_semaphore(sc) != 0)
   11692 			continue;
   11693 	}
   11694 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11695 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   11696 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11697 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   11698 		wm_put_swsm_semaphore(sc);
   11699 }
   11700 
   11701 static int
   11702 wm_get_phy_82575(struct wm_softc *sc)
   11703 {
   11704 
   11705 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11706 		device_xname(sc->sc_dev), __func__));
   11707 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11708 }
   11709 
   11710 static void
   11711 wm_put_phy_82575(struct wm_softc *sc)
   11712 {
   11713 
   11714 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11715 		device_xname(sc->sc_dev), __func__));
   11716 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11717 }
   11718 
   11719 static int
   11720 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   11721 {
   11722 	uint32_t ext_ctrl;
   11723 	int timeout = 200;
   11724 
   11725 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11726 		device_xname(sc->sc_dev), __func__));
   11727 
   11728 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11729 	for (timeout = 0; timeout < 200; timeout++) {
   11730 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11731 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11732 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11733 
   11734 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11735 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11736 			return 0;
   11737 		delay(5000);
   11738 	}
   11739 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   11740 	    device_xname(sc->sc_dev), ext_ctrl);
   11741 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11742 	return 1;
   11743 }
   11744 
   11745 static void
   11746 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   11747 {
   11748 	uint32_t ext_ctrl;
   11749 
   11750 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11751 		device_xname(sc->sc_dev), __func__));
   11752 
   11753 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11754 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11755 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11756 
   11757 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11758 }
   11759 
   11760 static int
   11761 wm_get_swflag_ich8lan(struct wm_softc *sc)
   11762 {
   11763 	uint32_t ext_ctrl;
   11764 	int timeout;
   11765 
   11766 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11767 		device_xname(sc->sc_dev), __func__));
   11768 	mutex_enter(sc->sc_ich_phymtx);
   11769 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   11770 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11771 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   11772 			break;
   11773 		delay(1000);
   11774 	}
   11775 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   11776 		printf("%s: SW has already locked the resource\n",
   11777 		    device_xname(sc->sc_dev));
   11778 		goto out;
   11779 	}
   11780 
   11781 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11782 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11783 	for (timeout = 0; timeout < 1000; timeout++) {
   11784 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11785 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11786 			break;
   11787 		delay(1000);
   11788 	}
   11789 	if (timeout >= 1000) {
   11790 		printf("%s: failed to acquire semaphore\n",
   11791 		    device_xname(sc->sc_dev));
   11792 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11793 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11794 		goto out;
   11795 	}
   11796 	return 0;
   11797 
   11798 out:
   11799 	mutex_exit(sc->sc_ich_phymtx);
   11800 	return 1;
   11801 }
   11802 
   11803 static void
   11804 wm_put_swflag_ich8lan(struct wm_softc *sc)
   11805 {
   11806 	uint32_t ext_ctrl;
   11807 
   11808 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11809 		device_xname(sc->sc_dev), __func__));
   11810 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11811 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   11812 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11813 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11814 	} else {
   11815 		printf("%s: Semaphore unexpectedly released\n",
   11816 		    device_xname(sc->sc_dev));
   11817 	}
   11818 
   11819 	mutex_exit(sc->sc_ich_phymtx);
   11820 }
   11821 
   11822 static int
   11823 wm_get_nvm_ich8lan(struct wm_softc *sc)
   11824 {
   11825 
   11826 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11827 		device_xname(sc->sc_dev), __func__));
   11828 	mutex_enter(sc->sc_ich_nvmmtx);
   11829 
   11830 	return 0;
   11831 }
   11832 
   11833 static void
   11834 wm_put_nvm_ich8lan(struct wm_softc *sc)
   11835 {
   11836 
   11837 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11838 		device_xname(sc->sc_dev), __func__));
   11839 	mutex_exit(sc->sc_ich_nvmmtx);
   11840 }
   11841 
   11842 static int
   11843 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   11844 {
   11845 	int i = 0;
   11846 	uint32_t reg;
   11847 
   11848 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11849 		device_xname(sc->sc_dev), __func__));
   11850 
   11851 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11852 	do {
   11853 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   11854 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   11855 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11856 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   11857 			break;
   11858 		delay(2*1000);
   11859 		i++;
   11860 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   11861 
   11862 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   11863 		wm_put_hw_semaphore_82573(sc);
   11864 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   11865 		    device_xname(sc->sc_dev));
   11866 		return -1;
   11867 	}
   11868 
   11869 	return 0;
   11870 }
   11871 
   11872 static void
   11873 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   11874 {
   11875 	uint32_t reg;
   11876 
   11877 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11878 		device_xname(sc->sc_dev), __func__));
   11879 
   11880 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11881 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11882 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11883 }
   11884 
   11885 /*
   11886  * Management mode and power management related subroutines.
   11887  * BMC, AMT, suspend/resume and EEE.
   11888  */
   11889 
   11890 #ifdef WM_WOL
   11891 static int
   11892 wm_check_mng_mode(struct wm_softc *sc)
   11893 {
   11894 	int rv;
   11895 
   11896 	switch (sc->sc_type) {
   11897 	case WM_T_ICH8:
   11898 	case WM_T_ICH9:
   11899 	case WM_T_ICH10:
   11900 	case WM_T_PCH:
   11901 	case WM_T_PCH2:
   11902 	case WM_T_PCH_LPT:
   11903 	case WM_T_PCH_SPT:
   11904 		rv = wm_check_mng_mode_ich8lan(sc);
   11905 		break;
   11906 	case WM_T_82574:
   11907 	case WM_T_82583:
   11908 		rv = wm_check_mng_mode_82574(sc);
   11909 		break;
   11910 	case WM_T_82571:
   11911 	case WM_T_82572:
   11912 	case WM_T_82573:
   11913 	case WM_T_80003:
   11914 		rv = wm_check_mng_mode_generic(sc);
   11915 		break;
   11916 	default:
   11917 		/* noting to do */
   11918 		rv = 0;
   11919 		break;
   11920 	}
   11921 
   11922 	return rv;
   11923 }
   11924 
   11925 static int
   11926 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   11927 {
   11928 	uint32_t fwsm;
   11929 
   11930 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11931 
   11932 	if (((fwsm & FWSM_FW_VALID) != 0)
   11933 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11934 		return 1;
   11935 
   11936 	return 0;
   11937 }
   11938 
   11939 static int
   11940 wm_check_mng_mode_82574(struct wm_softc *sc)
   11941 {
   11942 	uint16_t data;
   11943 
   11944 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11945 
   11946 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   11947 		return 1;
   11948 
   11949 	return 0;
   11950 }
   11951 
   11952 static int
   11953 wm_check_mng_mode_generic(struct wm_softc *sc)
   11954 {
   11955 	uint32_t fwsm;
   11956 
   11957 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11958 
   11959 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   11960 		return 1;
   11961 
   11962 	return 0;
   11963 }
   11964 #endif /* WM_WOL */
   11965 
   11966 static int
   11967 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11968 {
   11969 	uint32_t manc, fwsm, factps;
   11970 
   11971 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11972 		return 0;
   11973 
   11974 	manc = CSR_READ(sc, WMREG_MANC);
   11975 
   11976 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11977 		device_xname(sc->sc_dev), manc));
   11978 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11979 		return 0;
   11980 
   11981 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11982 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11983 		factps = CSR_READ(sc, WMREG_FACTPS);
   11984 		if (((factps & FACTPS_MNGCG) == 0)
   11985 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11986 			return 1;
   11987 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11988 		uint16_t data;
   11989 
   11990 		factps = CSR_READ(sc, WMREG_FACTPS);
   11991 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11992 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11993 			device_xname(sc->sc_dev), factps, data));
   11994 		if (((factps & FACTPS_MNGCG) == 0)
   11995 		    && ((data & NVM_CFG2_MNGM_MASK)
   11996 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11997 			return 1;
   11998 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11999 	    && ((manc & MANC_ASF_EN) == 0))
   12000 		return 1;
   12001 
   12002 	return 0;
   12003 }
   12004 
   12005 static bool
   12006 wm_phy_resetisblocked(struct wm_softc *sc)
   12007 {
   12008 	bool blocked = false;
   12009 	uint32_t reg;
   12010 	int i = 0;
   12011 
   12012 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12013 		device_xname(sc->sc_dev), __func__));
   12014 
   12015 	switch (sc->sc_type) {
   12016 	case WM_T_ICH8:
   12017 	case WM_T_ICH9:
   12018 	case WM_T_ICH10:
   12019 	case WM_T_PCH:
   12020 	case WM_T_PCH2:
   12021 	case WM_T_PCH_LPT:
   12022 	case WM_T_PCH_SPT:
   12023 		do {
   12024 			reg = CSR_READ(sc, WMREG_FWSM);
   12025 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12026 				blocked = true;
   12027 				delay(10*1000);
   12028 				continue;
   12029 			}
   12030 			blocked = false;
   12031 		} while (blocked && (i++ < 30));
   12032 		return blocked;
   12033 		break;
   12034 	case WM_T_82571:
   12035 	case WM_T_82572:
   12036 	case WM_T_82573:
   12037 	case WM_T_82574:
   12038 	case WM_T_82583:
   12039 	case WM_T_80003:
   12040 		reg = CSR_READ(sc, WMREG_MANC);
   12041 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12042 			return true;
   12043 		else
   12044 			return false;
   12045 		break;
   12046 	default:
   12047 		/* no problem */
   12048 		break;
   12049 	}
   12050 
   12051 	return false;
   12052 }
   12053 
   12054 static void
   12055 wm_get_hw_control(struct wm_softc *sc)
   12056 {
   12057 	uint32_t reg;
   12058 
   12059 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12060 		device_xname(sc->sc_dev), __func__));
   12061 
   12062 	if (sc->sc_type == WM_T_82573) {
   12063 		reg = CSR_READ(sc, WMREG_SWSM);
   12064 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12065 	} else if (sc->sc_type >= WM_T_82571) {
   12066 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12067 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12068 	}
   12069 }
   12070 
   12071 static void
   12072 wm_release_hw_control(struct wm_softc *sc)
   12073 {
   12074 	uint32_t reg;
   12075 
   12076 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12077 		device_xname(sc->sc_dev), __func__));
   12078 
   12079 	if (sc->sc_type == WM_T_82573) {
   12080 		reg = CSR_READ(sc, WMREG_SWSM);
   12081 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12082 	} else if (sc->sc_type >= WM_T_82571) {
   12083 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12084 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12085 	}
   12086 }
   12087 
   12088 static void
   12089 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12090 {
   12091 	uint32_t reg;
   12092 
   12093 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12094 		device_xname(sc->sc_dev), __func__));
   12095 
   12096 	if (sc->sc_type < WM_T_PCH2)
   12097 		return;
   12098 
   12099 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12100 
   12101 	if (gate)
   12102 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12103 	else
   12104 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12105 
   12106 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12107 }
   12108 
   12109 static void
   12110 wm_smbustopci(struct wm_softc *sc)
   12111 {
   12112 	uint32_t fwsm, reg;
   12113 	int rv = 0;
   12114 
   12115 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12116 		device_xname(sc->sc_dev), __func__));
   12117 
   12118 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12119 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12120 
   12121 	/* Disable ULP */
   12122 	wm_ulp_disable(sc);
   12123 
   12124 	/* Acquire PHY semaphore */
   12125 	sc->phy.acquire(sc);
   12126 
   12127 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12128 	switch (sc->sc_type) {
   12129 	case WM_T_PCH_LPT:
   12130 	case WM_T_PCH_SPT:
   12131 		if (wm_phy_is_accessible_pchlan(sc))
   12132 			break;
   12133 
   12134 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12135 		reg |= CTRL_EXT_FORCE_SMBUS;
   12136 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12137 #if 0
   12138 		/* XXX Isn't this required??? */
   12139 		CSR_WRITE_FLUSH(sc);
   12140 #endif
   12141 		delay(50 * 1000);
   12142 		/* FALLTHROUGH */
   12143 	case WM_T_PCH2:
   12144 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12145 			break;
   12146 		/* FALLTHROUGH */
   12147 	case WM_T_PCH:
   12148 		if (sc->sc_type == WM_T_PCH)
   12149 			if ((fwsm & FWSM_FW_VALID) != 0)
   12150 				break;
   12151 
   12152 		if (wm_phy_resetisblocked(sc) == true) {
   12153 			printf("XXX reset is blocked(3)\n");
   12154 			break;
   12155 		}
   12156 
   12157 		wm_toggle_lanphypc_pch_lpt(sc);
   12158 
   12159 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12160 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12161 				break;
   12162 
   12163 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12164 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12165 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12166 
   12167 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12168 				break;
   12169 			rv = -1;
   12170 		}
   12171 		break;
   12172 	default:
   12173 		break;
   12174 	}
   12175 
   12176 	/* Release semaphore */
   12177 	sc->phy.release(sc);
   12178 
   12179 	if (rv == 0) {
   12180 		if (wm_phy_resetisblocked(sc)) {
   12181 			printf("XXX reset is blocked(4)\n");
   12182 			goto out;
   12183 		}
   12184 		wm_reset_phy(sc);
   12185 		if (wm_phy_resetisblocked(sc))
   12186 			printf("XXX reset is blocked(4)\n");
   12187 	}
   12188 
   12189 out:
   12190 	/*
   12191 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   12192 	 */
   12193 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   12194 		delay(10*1000);
   12195 		wm_gate_hw_phy_config_ich8lan(sc, false);
   12196 	}
   12197 }
   12198 
   12199 static void
   12200 wm_init_manageability(struct wm_softc *sc)
   12201 {
   12202 
   12203 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12204 		device_xname(sc->sc_dev), __func__));
   12205 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12206 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   12207 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12208 
   12209 		/* Disable hardware interception of ARP */
   12210 		manc &= ~MANC_ARP_EN;
   12211 
   12212 		/* Enable receiving management packets to the host */
   12213 		if (sc->sc_type >= WM_T_82571) {
   12214 			manc |= MANC_EN_MNG2HOST;
   12215 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   12216 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   12217 		}
   12218 
   12219 		CSR_WRITE(sc, WMREG_MANC, manc);
   12220 	}
   12221 }
   12222 
   12223 static void
   12224 wm_release_manageability(struct wm_softc *sc)
   12225 {
   12226 
   12227 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12228 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12229 
   12230 		manc |= MANC_ARP_EN;
   12231 		if (sc->sc_type >= WM_T_82571)
   12232 			manc &= ~MANC_EN_MNG2HOST;
   12233 
   12234 		CSR_WRITE(sc, WMREG_MANC, manc);
   12235 	}
   12236 }
   12237 
   12238 static void
   12239 wm_get_wakeup(struct wm_softc *sc)
   12240 {
   12241 
   12242 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   12243 	switch (sc->sc_type) {
   12244 	case WM_T_82573:
   12245 	case WM_T_82583:
   12246 		sc->sc_flags |= WM_F_HAS_AMT;
   12247 		/* FALLTHROUGH */
   12248 	case WM_T_80003:
   12249 	case WM_T_82575:
   12250 	case WM_T_82576:
   12251 	case WM_T_82580:
   12252 	case WM_T_I350:
   12253 	case WM_T_I354:
   12254 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   12255 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   12256 		/* FALLTHROUGH */
   12257 	case WM_T_82541:
   12258 	case WM_T_82541_2:
   12259 	case WM_T_82547:
   12260 	case WM_T_82547_2:
   12261 	case WM_T_82571:
   12262 	case WM_T_82572:
   12263 	case WM_T_82574:
   12264 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12265 		break;
   12266 	case WM_T_ICH8:
   12267 	case WM_T_ICH9:
   12268 	case WM_T_ICH10:
   12269 	case WM_T_PCH:
   12270 	case WM_T_PCH2:
   12271 	case WM_T_PCH_LPT:
   12272 	case WM_T_PCH_SPT:
   12273 		sc->sc_flags |= WM_F_HAS_AMT;
   12274 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12275 		break;
   12276 	default:
   12277 		break;
   12278 	}
   12279 
   12280 	/* 1: HAS_MANAGE */
   12281 	if (wm_enable_mng_pass_thru(sc) != 0)
   12282 		sc->sc_flags |= WM_F_HAS_MANAGE;
   12283 
   12284 #ifdef WM_DEBUG
   12285 	printf("\n");
   12286 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   12287 		printf("HAS_AMT,");
   12288 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   12289 		printf("ARC_SUBSYS_VALID,");
   12290 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   12291 		printf("ASF_FIRMWARE_PRES,");
   12292 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   12293 		printf("HAS_MANAGE,");
   12294 	printf("\n");
   12295 #endif
   12296 	/*
   12297 	 * Note that the WOL flags is set after the resetting of the eeprom
   12298 	 * stuff
   12299 	 */
   12300 }
   12301 
   12302 /*
   12303  * Unconfigure Ultra Low Power mode.
   12304  * Only for I217 and newer (see below).
   12305  */
   12306 static void
   12307 wm_ulp_disable(struct wm_softc *sc)
   12308 {
   12309 	uint32_t reg;
   12310 	int i = 0;
   12311 
   12312 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12313 		device_xname(sc->sc_dev), __func__));
   12314 	/* Exclude old devices */
   12315 	if ((sc->sc_type < WM_T_PCH_LPT)
   12316 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   12317 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   12318 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   12319 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   12320 		return;
   12321 
   12322 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   12323 		/* Request ME un-configure ULP mode in the PHY */
   12324 		reg = CSR_READ(sc, WMREG_H2ME);
   12325 		reg &= ~H2ME_ULP;
   12326 		reg |= H2ME_ENFORCE_SETTINGS;
   12327 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12328 
   12329 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   12330 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   12331 			if (i++ == 30) {
   12332 				printf("%s timed out\n", __func__);
   12333 				return;
   12334 			}
   12335 			delay(10 * 1000);
   12336 		}
   12337 		reg = CSR_READ(sc, WMREG_H2ME);
   12338 		reg &= ~H2ME_ENFORCE_SETTINGS;
   12339 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12340 
   12341 		return;
   12342 	}
   12343 
   12344 	/* Acquire semaphore */
   12345 	sc->phy.acquire(sc);
   12346 
   12347 	/* Toggle LANPHYPC */
   12348 	wm_toggle_lanphypc_pch_lpt(sc);
   12349 
   12350 	/* Unforce SMBus mode in PHY */
   12351 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12352 	if (reg == 0x0000 || reg == 0xffff) {
   12353 		uint32_t reg2;
   12354 
   12355 		printf("%s: Force SMBus first.\n", __func__);
   12356 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   12357 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   12358 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   12359 		delay(50 * 1000);
   12360 
   12361 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12362 	}
   12363 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   12364 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   12365 
   12366 	/* Unforce SMBus mode in MAC */
   12367 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12368 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   12369 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12370 
   12371 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   12372 	reg |= HV_PM_CTRL_K1_ENA;
   12373 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   12374 
   12375 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   12376 	reg &= ~(I218_ULP_CONFIG1_IND
   12377 	    | I218_ULP_CONFIG1_STICKY_ULP
   12378 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   12379 	    | I218_ULP_CONFIG1_WOL_HOST
   12380 	    | I218_ULP_CONFIG1_INBAND_EXIT
   12381 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   12382 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   12383 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   12384 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12385 	reg |= I218_ULP_CONFIG1_START;
   12386 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12387 
   12388 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   12389 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   12390 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   12391 
   12392 	/* Release semaphore */
   12393 	sc->phy.release(sc);
   12394 	wm_gmii_reset(sc);
   12395 	delay(50 * 1000);
   12396 }
   12397 
   12398 /* WOL in the newer chipset interfaces (pchlan) */
   12399 static void
   12400 wm_enable_phy_wakeup(struct wm_softc *sc)
   12401 {
   12402 #if 0
   12403 	uint16_t preg;
   12404 
   12405 	/* Copy MAC RARs to PHY RARs */
   12406 
   12407 	/* Copy MAC MTA to PHY MTA */
   12408 
   12409 	/* Configure PHY Rx Control register */
   12410 
   12411 	/* Enable PHY wakeup in MAC register */
   12412 
   12413 	/* Configure and enable PHY wakeup in PHY registers */
   12414 
   12415 	/* Activate PHY wakeup */
   12416 
   12417 	/* XXX */
   12418 #endif
   12419 }
   12420 
   12421 /* Power down workaround on D3 */
   12422 static void
   12423 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   12424 {
   12425 	uint32_t reg;
   12426 	int i;
   12427 
   12428 	for (i = 0; i < 2; i++) {
   12429 		/* Disable link */
   12430 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12431 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12432 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12433 
   12434 		/*
   12435 		 * Call gig speed drop workaround on Gig disable before
   12436 		 * accessing any PHY registers
   12437 		 */
   12438 		if (sc->sc_type == WM_T_ICH8)
   12439 			wm_gig_downshift_workaround_ich8lan(sc);
   12440 
   12441 		/* Write VR power-down enable */
   12442 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12443 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12444 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   12445 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   12446 
   12447 		/* Read it back and test */
   12448 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12449 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12450 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   12451 			break;
   12452 
   12453 		/* Issue PHY reset and repeat at most one more time */
   12454 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   12455 	}
   12456 }
   12457 
   12458 static void
   12459 wm_enable_wakeup(struct wm_softc *sc)
   12460 {
   12461 	uint32_t reg, pmreg;
   12462 	pcireg_t pmode;
   12463 
   12464 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12465 		device_xname(sc->sc_dev), __func__));
   12466 
   12467 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12468 		&pmreg, NULL) == 0)
   12469 		return;
   12470 
   12471 	/* Advertise the wakeup capability */
   12472 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   12473 	    | CTRL_SWDPIN(3));
   12474 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   12475 
   12476 	/* ICH workaround */
   12477 	switch (sc->sc_type) {
   12478 	case WM_T_ICH8:
   12479 	case WM_T_ICH9:
   12480 	case WM_T_ICH10:
   12481 	case WM_T_PCH:
   12482 	case WM_T_PCH2:
   12483 	case WM_T_PCH_LPT:
   12484 	case WM_T_PCH_SPT:
   12485 		/* Disable gig during WOL */
   12486 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12487 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   12488 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12489 		if (sc->sc_type == WM_T_PCH)
   12490 			wm_gmii_reset(sc);
   12491 
   12492 		/* Power down workaround */
   12493 		if (sc->sc_phytype == WMPHY_82577) {
   12494 			struct mii_softc *child;
   12495 
   12496 			/* Assume that the PHY is copper */
   12497 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12498 			if (child->mii_mpd_rev <= 2)
   12499 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   12500 				    (768 << 5) | 25, 0x0444); /* magic num */
   12501 		}
   12502 		break;
   12503 	default:
   12504 		break;
   12505 	}
   12506 
   12507 	/* Keep the laser running on fiber adapters */
   12508 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   12509 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12510 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12511 		reg |= CTRL_EXT_SWDPIN(3);
   12512 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12513 	}
   12514 
   12515 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   12516 #if 0	/* for the multicast packet */
   12517 	reg |= WUFC_MC;
   12518 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   12519 #endif
   12520 
   12521 	if (sc->sc_type >= WM_T_PCH)
   12522 		wm_enable_phy_wakeup(sc);
   12523 	else {
   12524 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   12525 		CSR_WRITE(sc, WMREG_WUFC, reg);
   12526 	}
   12527 
   12528 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12529 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12530 		|| (sc->sc_type == WM_T_PCH2))
   12531 		    && (sc->sc_phytype == WMPHY_IGP_3))
   12532 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   12533 
   12534 	/* Request PME */
   12535 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   12536 #if 0
   12537 	/* Disable WOL */
   12538 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   12539 #else
   12540 	/* For WOL */
   12541 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   12542 #endif
   12543 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   12544 }
   12545 
   12546 /* LPLU */
   12547 
   12548 static void
   12549 wm_lplu_d0_disable(struct wm_softc *sc)
   12550 {
   12551 	uint32_t reg;
   12552 
   12553 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12554 		device_xname(sc->sc_dev), __func__));
   12555 
   12556 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12557 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   12558 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12559 }
   12560 
   12561 static void
   12562 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   12563 {
   12564 	uint32_t reg;
   12565 
   12566 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12567 		device_xname(sc->sc_dev), __func__));
   12568 
   12569 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   12570 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   12571 	reg |= HV_OEM_BITS_ANEGNOW;
   12572 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   12573 }
   12574 
   12575 /* EEE */
   12576 
   12577 static void
   12578 wm_set_eee_i350(struct wm_softc *sc)
   12579 {
   12580 	uint32_t ipcnfg, eeer;
   12581 
   12582 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   12583 	eeer = CSR_READ(sc, WMREG_EEER);
   12584 
   12585 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   12586 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12587 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12588 		    | EEER_LPI_FC);
   12589 	} else {
   12590 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12591 		ipcnfg &= ~IPCNFG_10BASE_TE;
   12592 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12593 		    | EEER_LPI_FC);
   12594 	}
   12595 
   12596 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   12597 	CSR_WRITE(sc, WMREG_EEER, eeer);
   12598 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   12599 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   12600 }
   12601 
   12602 /*
   12603  * Workarounds (mainly PHY related).
   12604  * Basically, PHY's workarounds are in the PHY drivers.
   12605  */
   12606 
   12607 /* Work-around for 82566 Kumeran PCS lock loss */
   12608 static void
   12609 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   12610 {
   12611 #if 0
   12612 	int miistatus, active, i;
   12613 	int reg;
   12614 
   12615 	miistatus = sc->sc_mii.mii_media_status;
   12616 
   12617 	/* If the link is not up, do nothing */
   12618 	if ((miistatus & IFM_ACTIVE) == 0)
   12619 		return;
   12620 
   12621 	active = sc->sc_mii.mii_media_active;
   12622 
   12623 	/* Nothing to do if the link is other than 1Gbps */
   12624 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   12625 		return;
   12626 
   12627 	for (i = 0; i < 10; i++) {
   12628 		/* read twice */
   12629 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12630 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12631 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   12632 			goto out;	/* GOOD! */
   12633 
   12634 		/* Reset the PHY */
   12635 		wm_gmii_reset(sc);
   12636 		delay(5*1000);
   12637 	}
   12638 
   12639 	/* Disable GigE link negotiation */
   12640 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12641 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12642 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12643 
   12644 	/*
   12645 	 * Call gig speed drop workaround on Gig disable before accessing
   12646 	 * any PHY registers.
   12647 	 */
   12648 	wm_gig_downshift_workaround_ich8lan(sc);
   12649 
   12650 out:
   12651 	return;
   12652 #endif
   12653 }
   12654 
   12655 /* WOL from S5 stops working */
   12656 static void
   12657 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   12658 {
   12659 	uint16_t kmrn_reg;
   12660 
   12661 	/* Only for igp3 */
   12662 	if (sc->sc_phytype == WMPHY_IGP_3) {
   12663 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   12664 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   12665 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12666 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   12667 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12668 	}
   12669 }
   12670 
   12671 /*
   12672  * Workaround for pch's PHYs
   12673  * XXX should be moved to new PHY driver?
   12674  */
   12675 static void
   12676 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   12677 {
   12678 
   12679 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12680 		device_xname(sc->sc_dev), __func__));
   12681 	KASSERT(sc->sc_type == WM_T_PCH);
   12682 
   12683 	if (sc->sc_phytype == WMPHY_82577)
   12684 		wm_set_mdio_slow_mode_hv(sc);
   12685 
   12686 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   12687 
   12688 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   12689 
   12690 	/* 82578 */
   12691 	if (sc->sc_phytype == WMPHY_82578) {
   12692 		struct mii_softc *child;
   12693 
   12694 		/*
   12695 		 * Return registers to default by doing a soft reset then
   12696 		 * writing 0x3140 to the control register
   12697 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   12698 		 */
   12699 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12700 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   12701 			PHY_RESET(child);
   12702 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   12703 			    0x3140);
   12704 		}
   12705 	}
   12706 
   12707 	/* Select page 0 */
   12708 	sc->phy.acquire(sc);
   12709 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   12710 	sc->phy.release(sc);
   12711 
   12712 	/*
   12713 	 * Configure the K1 Si workaround during phy reset assuming there is
   12714 	 * link so that it disables K1 if link is in 1Gbps.
   12715 	 */
   12716 	wm_k1_gig_workaround_hv(sc, 1);
   12717 }
   12718 
   12719 static void
   12720 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   12721 {
   12722 
   12723 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12724 		device_xname(sc->sc_dev), __func__));
   12725 	KASSERT(sc->sc_type == WM_T_PCH2);
   12726 
   12727 	wm_set_mdio_slow_mode_hv(sc);
   12728 }
   12729 
   12730 static int
   12731 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   12732 {
   12733 	int k1_enable = sc->sc_nvm_k1_enabled;
   12734 
   12735 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12736 		device_xname(sc->sc_dev), __func__));
   12737 
   12738 	if (sc->phy.acquire(sc) != 0)
   12739 		return -1;
   12740 
   12741 	if (link) {
   12742 		k1_enable = 0;
   12743 
   12744 		/* Link stall fix for link up */
   12745 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   12746 	} else {
   12747 		/* Link stall fix for link down */
   12748 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   12749 	}
   12750 
   12751 	wm_configure_k1_ich8lan(sc, k1_enable);
   12752 	sc->phy.release(sc);
   12753 
   12754 	return 0;
   12755 }
   12756 
   12757 static void
   12758 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   12759 {
   12760 	uint32_t reg;
   12761 
   12762 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   12763 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   12764 	    reg | HV_KMRN_MDIO_SLOW);
   12765 }
   12766 
   12767 static void
   12768 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   12769 {
   12770 	uint32_t ctrl, ctrl_ext, tmp;
   12771 	uint16_t kmrn_reg;
   12772 
   12773 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   12774 
   12775 	if (k1_enable)
   12776 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   12777 	else
   12778 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   12779 
   12780 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   12781 
   12782 	delay(20);
   12783 
   12784 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12785 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12786 
   12787 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   12788 	tmp |= CTRL_FRCSPD;
   12789 
   12790 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   12791 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   12792 	CSR_WRITE_FLUSH(sc);
   12793 	delay(20);
   12794 
   12795 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   12796 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12797 	CSR_WRITE_FLUSH(sc);
   12798 	delay(20);
   12799 }
   12800 
   12801 /* special case - for 82575 - need to do manual init ... */
   12802 static void
   12803 wm_reset_init_script_82575(struct wm_softc *sc)
   12804 {
   12805 	/*
   12806 	 * remark: this is untested code - we have no board without EEPROM
   12807 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   12808 	 */
   12809 
   12810 	/* SerDes configuration via SERDESCTRL */
   12811 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   12812 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   12813 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   12814 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   12815 
   12816 	/* CCM configuration via CCMCTL register */
   12817 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   12818 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   12819 
   12820 	/* PCIe lanes configuration */
   12821 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   12822 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   12823 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   12824 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   12825 
   12826 	/* PCIe PLL Configuration */
   12827 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   12828 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   12829 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   12830 }
   12831 
   12832 static void
   12833 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   12834 {
   12835 	uint32_t reg;
   12836 	uint16_t nvmword;
   12837 	int rv;
   12838 
   12839 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   12840 		return;
   12841 
   12842 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   12843 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   12844 	if (rv != 0) {
   12845 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   12846 		    __func__);
   12847 		return;
   12848 	}
   12849 
   12850 	reg = CSR_READ(sc, WMREG_MDICNFG);
   12851 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   12852 		reg |= MDICNFG_DEST;
   12853 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   12854 		reg |= MDICNFG_COM_MDIO;
   12855 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12856 }
   12857 
   12858 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   12859 
   12860 static bool
   12861 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   12862 {
   12863 	int i;
   12864 	uint32_t reg;
   12865 	uint16_t id1, id2;
   12866 
   12867 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12868 		device_xname(sc->sc_dev), __func__));
   12869 	id1 = id2 = 0xffff;
   12870 	for (i = 0; i < 2; i++) {
   12871 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   12872 		if (MII_INVALIDID(id1))
   12873 			continue;
   12874 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   12875 		if (MII_INVALIDID(id2))
   12876 			continue;
   12877 		break;
   12878 	}
   12879 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   12880 		goto out;
   12881 	}
   12882 
   12883 	if (sc->sc_type < WM_T_PCH_LPT) {
   12884 		sc->phy.release(sc);
   12885 		wm_set_mdio_slow_mode_hv(sc);
   12886 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   12887 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   12888 		sc->phy.acquire(sc);
   12889 	}
   12890 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   12891 		printf("XXX return with false\n");
   12892 		return false;
   12893 	}
   12894 out:
   12895 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   12896 		/* Only unforce SMBus if ME is not active */
   12897 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   12898 			/* Unforce SMBus mode in PHY */
   12899 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   12900 			    CV_SMB_CTRL);
   12901 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   12902 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   12903 			    CV_SMB_CTRL, reg);
   12904 
   12905 			/* Unforce SMBus mode in MAC */
   12906 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12907 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12908 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12909 		}
   12910 	}
   12911 	return true;
   12912 }
   12913 
   12914 static void
   12915 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   12916 {
   12917 	uint32_t reg;
   12918 	int i;
   12919 
   12920 	/* Set PHY Config Counter to 50msec */
   12921 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   12922 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   12923 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   12924 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   12925 
   12926 	/* Toggle LANPHYPC */
   12927 	reg = CSR_READ(sc, WMREG_CTRL);
   12928 	reg |= CTRL_LANPHYPC_OVERRIDE;
   12929 	reg &= ~CTRL_LANPHYPC_VALUE;
   12930 	CSR_WRITE(sc, WMREG_CTRL, reg);
   12931 	CSR_WRITE_FLUSH(sc);
   12932 	delay(1000);
   12933 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   12934 	CSR_WRITE(sc, WMREG_CTRL, reg);
   12935 	CSR_WRITE_FLUSH(sc);
   12936 
   12937 	if (sc->sc_type < WM_T_PCH_LPT)
   12938 		delay(50 * 1000);
   12939 	else {
   12940 		i = 20;
   12941 
   12942 		do {
   12943 			delay(5 * 1000);
   12944 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   12945 		    && i--);
   12946 
   12947 		delay(30 * 1000);
   12948 	}
   12949 }
   12950 
   12951 static int
   12952 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   12953 {
   12954 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   12955 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   12956 	uint32_t rxa;
   12957 	uint16_t scale = 0, lat_enc = 0;
   12958 	int64_t lat_ns, value;
   12959 
   12960 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12961 		device_xname(sc->sc_dev), __func__));
   12962 
   12963 	if (link) {
   12964 		pcireg_t preg;
   12965 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   12966 
   12967 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   12968 
   12969 		/*
   12970 		 * Determine the maximum latency tolerated by the device.
   12971 		 *
   12972 		 * Per the PCIe spec, the tolerated latencies are encoded as
   12973 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   12974 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   12975 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   12976 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   12977 		 */
   12978 		lat_ns = ((int64_t)rxa * 1024 -
   12979 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   12980 		if (lat_ns < 0)
   12981 			lat_ns = 0;
   12982 		else {
   12983 			uint32_t status;
   12984 			uint16_t speed;
   12985 
   12986 			status = CSR_READ(sc, WMREG_STATUS);
   12987 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   12988 			case STATUS_SPEED_10:
   12989 				speed = 10;
   12990 				break;
   12991 			case STATUS_SPEED_100:
   12992 				speed = 100;
   12993 				break;
   12994 			case STATUS_SPEED_1000:
   12995 				speed = 1000;
   12996 				break;
   12997 			default:
   12998 				printf("%s: Unknown speed (status = %08x)\n",
   12999 				    device_xname(sc->sc_dev), status);
   13000 				return -1;
   13001 			}
   13002 			lat_ns /= speed;
   13003 		}
   13004 		value = lat_ns;
   13005 
   13006 		while (value > LTRV_VALUE) {
   13007 			scale ++;
   13008 			value = howmany(value, __BIT(5));
   13009 		}
   13010 		if (scale > LTRV_SCALE_MAX) {
   13011 			printf("%s: Invalid LTR latency scale %d\n",
   13012 			    device_xname(sc->sc_dev), scale);
   13013 			return -1;
   13014 		}
   13015 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   13016 
   13017 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13018 		    WM_PCI_LTR_CAP_LPT);
   13019 		max_snoop = preg & 0xffff;
   13020 		max_nosnoop = preg >> 16;
   13021 
   13022 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   13023 
   13024 		if (lat_enc > max_ltr_enc) {
   13025 			lat_enc = max_ltr_enc;
   13026 		}
   13027 	}
   13028 	/* Snoop and No-Snoop latencies the same */
   13029 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   13030 	CSR_WRITE(sc, WMREG_LTRV, reg);
   13031 
   13032 	return 0;
   13033 }
   13034 
   13035 /*
   13036  * I210 Errata 25 and I211 Errata 10
   13037  * Slow System Clock.
   13038  */
   13039 static void
   13040 wm_pll_workaround_i210(struct wm_softc *sc)
   13041 {
   13042 	uint32_t mdicnfg, wuc;
   13043 	uint32_t reg;
   13044 	pcireg_t pcireg;
   13045 	uint32_t pmreg;
   13046 	uint16_t nvmword, tmp_nvmword;
   13047 	int phyval;
   13048 	bool wa_done = false;
   13049 	int i;
   13050 
   13051 	/* Save WUC and MDICNFG registers */
   13052 	wuc = CSR_READ(sc, WMREG_WUC);
   13053 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13054 
   13055 	reg = mdicnfg & ~MDICNFG_DEST;
   13056 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13057 
   13058 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13059 		nvmword = INVM_DEFAULT_AL;
   13060 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13061 
   13062 	/* Get Power Management cap offset */
   13063 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13064 		&pmreg, NULL) == 0)
   13065 		return;
   13066 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13067 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13068 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13069 
   13070 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13071 			break; /* OK */
   13072 		}
   13073 
   13074 		wa_done = true;
   13075 		/* Directly reset the internal PHY */
   13076 		reg = CSR_READ(sc, WMREG_CTRL);
   13077 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13078 
   13079 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13080 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13081 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13082 
   13083 		CSR_WRITE(sc, WMREG_WUC, 0);
   13084 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13085 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13086 
   13087 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13088 		    pmreg + PCI_PMCSR);
   13089 		pcireg |= PCI_PMCSR_STATE_D3;
   13090 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13091 		    pmreg + PCI_PMCSR, pcireg);
   13092 		delay(1000);
   13093 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13094 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13095 		    pmreg + PCI_PMCSR, pcireg);
   13096 
   13097 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13098 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13099 
   13100 		/* Restore WUC register */
   13101 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13102 	}
   13103 
   13104 	/* Restore MDICNFG setting */
   13105 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13106 	if (wa_done)
   13107 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13108 }
   13109