Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.456
      1 /*	$NetBSD: if_wm.c,v 1.456 2016/12/08 01:12:01 ozaki-r Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Advanced Receive Descriptor
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.456 2016/12/08 01:12:01 ozaki-r Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #endif
    165 
    166 /*
    167  * This device driver's max interrupt numbers.
    168  */
    169 #define WM_MAX_NQUEUEINTR	16
    170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    171 
    172 /*
    173  * Transmit descriptor list size.  Due to errata, we can only have
    174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    175  * on >= 82544.  We tell the upper layers that they can queue a lot
    176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    177  * of them at a time.
    178  *
    179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    180  * chains containing many small mbufs have been observed in zero-copy
    181  * situations with jumbo frames.
    182  */
    183 #define	WM_NTXSEGS		256
    184 #define	WM_IFQUEUELEN		256
    185 #define	WM_TXQUEUELEN_MAX	64
    186 #define	WM_TXQUEUELEN_MAX_82547	16
    187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    190 #define	WM_NTXDESC_82542	256
    191 #define	WM_NTXDESC_82544	4096
    192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    197 
    198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    199 
    200 #define	WM_TXINTERQSIZE		256
    201 
    202 /*
    203  * Receive descriptor list size.  We have one Rx buffer for normal
    204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    205  * packet.  We allocate 256 receive descriptors, each with a 2k
    206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    207  */
    208 #define	WM_NRXDESC		256
    209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    212 
    213 typedef union txdescs {
    214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    216 } txdescs_t;
    217 
    218 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    219 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    220 
    221 /*
    222  * Software state for transmit jobs.
    223  */
    224 struct wm_txsoft {
    225 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    226 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    227 	int txs_firstdesc;		/* first descriptor in packet */
    228 	int txs_lastdesc;		/* last descriptor in packet */
    229 	int txs_ndesc;			/* # of descriptors used */
    230 };
    231 
    232 /*
    233  * Software state for receive buffers.  Each descriptor gets a
    234  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    235  * more than one buffer, we chain them together.
    236  */
    237 struct wm_rxsoft {
    238 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    239 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    240 };
    241 
    242 #define WM_LINKUP_TIMEOUT	50
    243 
    244 static uint16_t swfwphysem[] = {
    245 	SWFW_PHY0_SM,
    246 	SWFW_PHY1_SM,
    247 	SWFW_PHY2_SM,
    248 	SWFW_PHY3_SM
    249 };
    250 
    251 static const uint32_t wm_82580_rxpbs_table[] = {
    252 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    253 };
    254 
    255 struct wm_softc;
    256 
    257 #ifdef WM_EVENT_COUNTERS
    258 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    259 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    260 	struct evcnt qname##_ev_##evname;
    261 
    262 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    263 	do{								\
    264 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    265 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    266 		    "%s%02d%s", #qname, (qnum), #evname);		\
    267 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    268 		    (evtype), NULL, (xname),				\
    269 		    (q)->qname##_##evname##_evcnt_name);		\
    270 	}while(0)
    271 
    272 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    273 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    274 
    275 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    276 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    277 #endif /* WM_EVENT_COUNTERS */
    278 
    279 struct wm_txqueue {
    280 	kmutex_t *txq_lock;		/* lock for tx operations */
    281 
    282 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    283 
    284 	/* Software state for the transmit descriptors. */
    285 	int txq_num;			/* must be a power of two */
    286 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    287 
    288 	/* TX control data structures. */
    289 	int txq_ndesc;			/* must be a power of two */
    290 	size_t txq_descsize;		/* a tx descriptor size */
    291 	txdescs_t *txq_descs_u;
    292         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    293 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    294 	int txq_desc_rseg;		/* real number of control segment */
    295 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    296 #define	txq_descs	txq_descs_u->sctxu_txdescs
    297 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    298 
    299 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    300 
    301 	int txq_free;			/* number of free Tx descriptors */
    302 	int txq_next;			/* next ready Tx descriptor */
    303 
    304 	int txq_sfree;			/* number of free Tx jobs */
    305 	int txq_snext;			/* next free Tx job */
    306 	int txq_sdirty;			/* dirty Tx jobs */
    307 
    308 	/* These 4 variables are used only on the 82547. */
    309 	int txq_fifo_size;		/* Tx FIFO size */
    310 	int txq_fifo_head;		/* current head of FIFO */
    311 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    312 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    313 
    314 	/*
    315 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    316 	 * CPUs. This queue intermediate them without block.
    317 	 */
    318 	pcq_t *txq_interq;
    319 
    320 	/*
    321 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    322 	 * to manage Tx H/W queue's busy flag.
    323 	 */
    324 	int txq_flags;			/* flags for H/W queue, see below */
    325 #define	WM_TXQ_NO_SPACE	0x1
    326 
    327 	bool txq_stopping;
    328 
    329 #ifdef WM_EVENT_COUNTERS
    330 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    331 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    332 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    333 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    334 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    335 						/* XXX not used? */
    336 
    337 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    338 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    339 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    340 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    341 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    342 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    343 
    344 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    345 
    346 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    347 
    348 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    349 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    350 #endif /* WM_EVENT_COUNTERS */
    351 };
    352 
    353 struct wm_rxqueue {
    354 	kmutex_t *rxq_lock;		/* lock for rx operations */
    355 
    356 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    357 
    358 	/* Software state for the receive descriptors. */
    359 	wiseman_rxdesc_t *rxq_descs;
    360 
    361 	/* RX control data structures. */
    362 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    363 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    364 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    365 	int rxq_desc_rseg;		/* real number of control segment */
    366 	size_t rxq_desc_size;		/* control data size */
    367 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    368 
    369 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    370 
    371 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    372 	int rxq_discard;
    373 	int rxq_len;
    374 	struct mbuf *rxq_head;
    375 	struct mbuf *rxq_tail;
    376 	struct mbuf **rxq_tailp;
    377 
    378 	bool rxq_stopping;
    379 
    380 #ifdef WM_EVENT_COUNTERS
    381 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    382 
    383 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    384 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    385 #endif
    386 };
    387 
    388 struct wm_queue {
    389 	int wmq_id;			/* index of transmit and receive queues */
    390 	int wmq_intr_idx;		/* index of MSI-X tables */
    391 
    392 	struct wm_txqueue wmq_txq;
    393 	struct wm_rxqueue wmq_rxq;
    394 };
    395 
    396 struct wm_phyop {
    397 	int (*acquire)(struct wm_softc *);
    398 	void (*release)(struct wm_softc *);
    399 	int reset_delay_us;
    400 };
    401 
    402 /*
    403  * Software state per device.
    404  */
    405 struct wm_softc {
    406 	device_t sc_dev;		/* generic device information */
    407 	bus_space_tag_t sc_st;		/* bus space tag */
    408 	bus_space_handle_t sc_sh;	/* bus space handle */
    409 	bus_size_t sc_ss;		/* bus space size */
    410 	bus_space_tag_t sc_iot;		/* I/O space tag */
    411 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    412 	bus_size_t sc_ios;		/* I/O space size */
    413 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    414 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    415 	bus_size_t sc_flashs;		/* flash registers space size */
    416 	off_t sc_flashreg_offset;	/*
    417 					 * offset to flash registers from
    418 					 * start of BAR
    419 					 */
    420 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    421 
    422 	struct ethercom sc_ethercom;	/* ethernet common data */
    423 	struct mii_data sc_mii;		/* MII/media information */
    424 
    425 	pci_chipset_tag_t sc_pc;
    426 	pcitag_t sc_pcitag;
    427 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    428 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    429 
    430 	uint16_t sc_pcidevid;		/* PCI device ID */
    431 	wm_chip_type sc_type;		/* MAC type */
    432 	int sc_rev;			/* MAC revision */
    433 	wm_phy_type sc_phytype;		/* PHY type */
    434 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    435 #define	WM_MEDIATYPE_UNKNOWN		0x00
    436 #define	WM_MEDIATYPE_FIBER		0x01
    437 #define	WM_MEDIATYPE_COPPER		0x02
    438 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    439 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    440 	int sc_flags;			/* flags; see below */
    441 	int sc_if_flags;		/* last if_flags */
    442 	int sc_flowflags;		/* 802.3x flow control flags */
    443 	int sc_align_tweak;
    444 
    445 	void *sc_ihs[WM_MAX_NINTR];	/*
    446 					 * interrupt cookie.
    447 					 * legacy and msi use sc_ihs[0].
    448 					 */
    449 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    450 	int sc_nintrs;			/* number of interrupts */
    451 
    452 	int sc_link_intr_idx;		/* index of MSI-X tables */
    453 
    454 	callout_t sc_tick_ch;		/* tick callout */
    455 	bool sc_core_stopping;
    456 
    457 	int sc_nvm_ver_major;
    458 	int sc_nvm_ver_minor;
    459 	int sc_nvm_ver_build;
    460 	int sc_nvm_addrbits;		/* NVM address bits */
    461 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    462 	int sc_ich8_flash_base;
    463 	int sc_ich8_flash_bank_size;
    464 	int sc_nvm_k1_enabled;
    465 
    466 	int sc_nqueues;
    467 	struct wm_queue *sc_queue;
    468 
    469 	int sc_affinity_offset;
    470 
    471 #ifdef WM_EVENT_COUNTERS
    472 	/* Event counters. */
    473 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    474 
    475         /* WM_T_82542_2_1 only */
    476 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    477 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    478 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    479 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    480 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    481 #endif /* WM_EVENT_COUNTERS */
    482 
    483 	/* This variable are used only on the 82547. */
    484 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    485 
    486 	uint32_t sc_ctrl;		/* prototype CTRL register */
    487 #if 0
    488 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    489 #endif
    490 	uint32_t sc_icr;		/* prototype interrupt bits */
    491 	uint32_t sc_itr;		/* prototype intr throttling reg */
    492 	uint32_t sc_tctl;		/* prototype TCTL register */
    493 	uint32_t sc_rctl;		/* prototype RCTL register */
    494 	uint32_t sc_txcw;		/* prototype TXCW register */
    495 	uint32_t sc_tipg;		/* prototype TIPG register */
    496 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    497 	uint32_t sc_pba;		/* prototype PBA register */
    498 
    499 	int sc_tbi_linkup;		/* TBI link status */
    500 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    501 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    502 
    503 	int sc_mchash_type;		/* multicast filter offset */
    504 
    505 	krndsource_t rnd_source;	/* random source */
    506 
    507 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    508 
    509 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    510 	kmutex_t *sc_ich_phymtx;	/*
    511 					 * 82574/82583/ICH/PCH specific PHY
    512 					 * mutex. For 82574/82583, the mutex
    513 					 * is used for both PHY and NVM.
    514 					 */
    515 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    516 
    517 	struct wm_phyop phy;
    518 };
    519 
    520 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    521 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    522 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    523 
    524 #ifdef WM_MPSAFE
    525 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    526 #else
    527 #define CALLOUT_FLAGS	0
    528 #endif
    529 
    530 #define	WM_RXCHAIN_RESET(rxq)						\
    531 do {									\
    532 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    533 	*(rxq)->rxq_tailp = NULL;					\
    534 	(rxq)->rxq_len = 0;						\
    535 } while (/*CONSTCOND*/0)
    536 
    537 #define	WM_RXCHAIN_LINK(rxq, m)						\
    538 do {									\
    539 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    540 	(rxq)->rxq_tailp = &(m)->m_next;				\
    541 } while (/*CONSTCOND*/0)
    542 
    543 #ifdef WM_EVENT_COUNTERS
    544 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    545 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    546 
    547 #define WM_Q_EVCNT_INCR(qname, evname)			\
    548 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    549 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    550 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    551 #else /* !WM_EVENT_COUNTERS */
    552 #define	WM_EVCNT_INCR(ev)	/* nothing */
    553 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    554 
    555 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    556 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    557 #endif /* !WM_EVENT_COUNTERS */
    558 
    559 #define	CSR_READ(sc, reg)						\
    560 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    561 #define	CSR_WRITE(sc, reg, val)						\
    562 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    563 #define	CSR_WRITE_FLUSH(sc)						\
    564 	(void) CSR_READ((sc), WMREG_STATUS)
    565 
    566 #define ICH8_FLASH_READ32(sc, reg)					\
    567 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    568 	    (reg) + sc->sc_flashreg_offset)
    569 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    570 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    571 	    (reg) + sc->sc_flashreg_offset, (data))
    572 
    573 #define ICH8_FLASH_READ16(sc, reg)					\
    574 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    575 	    (reg) + sc->sc_flashreg_offset)
    576 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    577 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    578 	    (reg) + sc->sc_flashreg_offset, (data))
    579 
    580 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    581 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    582 
    583 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    584 #define	WM_CDTXADDR_HI(txq, x)						\
    585 	(sizeof(bus_addr_t) == 8 ?					\
    586 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    587 
    588 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    589 #define	WM_CDRXADDR_HI(rxq, x)						\
    590 	(sizeof(bus_addr_t) == 8 ?					\
    591 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    592 
    593 /*
    594  * Register read/write functions.
    595  * Other than CSR_{READ|WRITE}().
    596  */
    597 #if 0
    598 static inline uint32_t wm_io_read(struct wm_softc *, int);
    599 #endif
    600 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    601 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    602 	uint32_t, uint32_t);
    603 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    604 
    605 /*
    606  * Descriptor sync/init functions.
    607  */
    608 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    609 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    610 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    611 
    612 /*
    613  * Device driver interface functions and commonly used functions.
    614  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    615  */
    616 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    617 static int	wm_match(device_t, cfdata_t, void *);
    618 static void	wm_attach(device_t, device_t, void *);
    619 static int	wm_detach(device_t, int);
    620 static bool	wm_suspend(device_t, const pmf_qual_t *);
    621 static bool	wm_resume(device_t, const pmf_qual_t *);
    622 static void	wm_watchdog(struct ifnet *);
    623 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    624 static void	wm_tick(void *);
    625 static int	wm_ifflags_cb(struct ethercom *);
    626 static int	wm_ioctl(struct ifnet *, u_long, void *);
    627 /* MAC address related */
    628 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    629 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    630 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    631 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    632 static void	wm_set_filter(struct wm_softc *);
    633 /* Reset and init related */
    634 static void	wm_set_vlan(struct wm_softc *);
    635 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    636 static void	wm_get_auto_rd_done(struct wm_softc *);
    637 static void	wm_lan_init_done(struct wm_softc *);
    638 static void	wm_get_cfg_done(struct wm_softc *);
    639 static void	wm_initialize_hardware_bits(struct wm_softc *);
    640 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    641 static void	wm_reset_phy(struct wm_softc *);
    642 static void	wm_flush_desc_rings(struct wm_softc *);
    643 static void	wm_reset(struct wm_softc *);
    644 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    645 static void	wm_rxdrain(struct wm_rxqueue *);
    646 static void	wm_rss_getkey(uint8_t *);
    647 static void	wm_init_rss(struct wm_softc *);
    648 static void	wm_adjust_qnum(struct wm_softc *, int);
    649 static int	wm_setup_legacy(struct wm_softc *);
    650 static int	wm_setup_msix(struct wm_softc *);
    651 static int	wm_init(struct ifnet *);
    652 static int	wm_init_locked(struct ifnet *);
    653 static void	wm_turnon(struct wm_softc *);
    654 static void	wm_turnoff(struct wm_softc *);
    655 static void	wm_stop(struct ifnet *, int);
    656 static void	wm_stop_locked(struct ifnet *, int);
    657 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    658 static void	wm_82547_txfifo_stall(void *);
    659 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    660 /* DMA related */
    661 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    662 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    663 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    664 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    665     struct wm_txqueue *);
    666 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    667 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    668 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    669     struct wm_rxqueue *);
    670 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    671 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    672 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    673 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    674 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    675 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    676 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    677     struct wm_txqueue *);
    678 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    679     struct wm_rxqueue *);
    680 static int	wm_alloc_txrx_queues(struct wm_softc *);
    681 static void	wm_free_txrx_queues(struct wm_softc *);
    682 static int	wm_init_txrx_queues(struct wm_softc *);
    683 /* Start */
    684 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    685     uint32_t *, uint8_t *);
    686 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    687 static void	wm_start(struct ifnet *);
    688 static void	wm_start_locked(struct ifnet *);
    689 static int	wm_transmit(struct ifnet *, struct mbuf *);
    690 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    691 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    692 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    693     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    694 static void	wm_nq_start(struct ifnet *);
    695 static void	wm_nq_start_locked(struct ifnet *);
    696 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    697 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    698 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    699 static void	wm_deferred_start(struct ifnet *);
    700 /* Interrupt */
    701 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    702 static void	wm_rxeof(struct wm_rxqueue *);
    703 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    704 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    705 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    706 static void	wm_linkintr(struct wm_softc *, uint32_t);
    707 static int	wm_intr_legacy(void *);
    708 static int	wm_txrxintr_msix(void *);
    709 static int	wm_linkintr_msix(void *);
    710 
    711 /*
    712  * Media related.
    713  * GMII, SGMII, TBI, SERDES and SFP.
    714  */
    715 /* Common */
    716 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    717 /* GMII related */
    718 static void	wm_gmii_reset(struct wm_softc *);
    719 static int	wm_get_phy_id_82575(struct wm_softc *);
    720 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    721 static int	wm_gmii_mediachange(struct ifnet *);
    722 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    723 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    724 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    725 static int	wm_gmii_i82543_readreg(device_t, int, int);
    726 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    727 static int	wm_gmii_mdic_readreg(device_t, int, int);
    728 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    729 static int	wm_gmii_i82544_readreg(device_t, int, int);
    730 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    731 static int	wm_gmii_i80003_readreg(device_t, int, int);
    732 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    733 static int	wm_gmii_bm_readreg(device_t, int, int);
    734 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    735 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    736 static int	wm_gmii_hv_readreg(device_t, int, int);
    737 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    738 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    739 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    740 static int	wm_gmii_82580_readreg(device_t, int, int);
    741 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    742 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    743 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    744 static void	wm_gmii_statchg(struct ifnet *);
    745 /*
    746  * kumeran related (80003, ICH* and PCH*).
    747  * These functions are not for accessing MII registers but for accessing
    748  * kumeran specific registers.
    749  */
    750 static int	wm_kmrn_readreg(struct wm_softc *, int);
    751 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    752 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    753 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    754 /* SGMII */
    755 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    756 static int	wm_sgmii_readreg(device_t, int, int);
    757 static void	wm_sgmii_writereg(device_t, int, int, int);
    758 /* TBI related */
    759 static void	wm_tbi_mediainit(struct wm_softc *);
    760 static int	wm_tbi_mediachange(struct ifnet *);
    761 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    762 static int	wm_check_for_link(struct wm_softc *);
    763 static void	wm_tbi_tick(struct wm_softc *);
    764 /* SERDES related */
    765 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    766 static int	wm_serdes_mediachange(struct ifnet *);
    767 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    768 static void	wm_serdes_tick(struct wm_softc *);
    769 /* SFP related */
    770 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    771 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    772 
    773 /*
    774  * NVM related.
    775  * Microwire, SPI (w/wo EERD) and Flash.
    776  */
    777 /* Misc functions */
    778 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    779 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    780 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    781 /* Microwire */
    782 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    783 /* SPI */
    784 static int	wm_nvm_ready_spi(struct wm_softc *);
    785 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    786 /* Using with EERD */
    787 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    788 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    789 /* Flash */
    790 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    791     unsigned int *);
    792 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    793 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    794 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    795 	uint32_t *);
    796 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    797 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    798 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    799 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    800 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    801 /* iNVM */
    802 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    803 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    804 /* Lock, detecting NVM type, validate checksum and read */
    805 static int	wm_nvm_acquire(struct wm_softc *);
    806 static void	wm_nvm_release(struct wm_softc *);
    807 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    808 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    809 static int	wm_nvm_validate_checksum(struct wm_softc *);
    810 static void	wm_nvm_version_invm(struct wm_softc *);
    811 static void	wm_nvm_version(struct wm_softc *);
    812 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    813 
    814 /*
    815  * Hardware semaphores.
    816  * Very complexed...
    817  */
    818 static int	wm_get_null(struct wm_softc *);
    819 static void	wm_put_null(struct wm_softc *);
    820 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    821 static void	wm_put_swsm_semaphore(struct wm_softc *);
    822 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    823 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    824 static int	wm_get_phy_82575(struct wm_softc *);
    825 static void	wm_put_phy_82575(struct wm_softc *);
    826 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    827 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    828 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    829 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    830 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    831 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    832 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    833 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    834 
    835 /*
    836  * Management mode and power management related subroutines.
    837  * BMC, AMT, suspend/resume and EEE.
    838  */
    839 #if 0
    840 static int	wm_check_mng_mode(struct wm_softc *);
    841 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    842 static int	wm_check_mng_mode_82574(struct wm_softc *);
    843 static int	wm_check_mng_mode_generic(struct wm_softc *);
    844 #endif
    845 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    846 static bool	wm_phy_resetisblocked(struct wm_softc *);
    847 static void	wm_get_hw_control(struct wm_softc *);
    848 static void	wm_release_hw_control(struct wm_softc *);
    849 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    850 static void	wm_smbustopci(struct wm_softc *);
    851 static void	wm_init_manageability(struct wm_softc *);
    852 static void	wm_release_manageability(struct wm_softc *);
    853 static void	wm_get_wakeup(struct wm_softc *);
    854 static void	wm_ulp_disable(struct wm_softc *);
    855 static void	wm_enable_phy_wakeup(struct wm_softc *);
    856 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    857 static void	wm_enable_wakeup(struct wm_softc *);
    858 /* LPLU (Low Power Link Up) */
    859 static void	wm_lplu_d0_disable(struct wm_softc *);
    860 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    861 /* EEE */
    862 static void	wm_set_eee_i350(struct wm_softc *);
    863 
    864 /*
    865  * Workarounds (mainly PHY related).
    866  * Basically, PHY's workarounds are in the PHY drivers.
    867  */
    868 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    869 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    870 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    871 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    872 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    873 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    874 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    875 static void	wm_reset_init_script_82575(struct wm_softc *);
    876 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    877 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    878 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    879 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    880 static void	wm_pll_workaround_i210(struct wm_softc *);
    881 
    882 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    883     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    884 
    885 /*
    886  * Devices supported by this driver.
    887  */
    888 static const struct wm_product {
    889 	pci_vendor_id_t		wmp_vendor;
    890 	pci_product_id_t	wmp_product;
    891 	const char		*wmp_name;
    892 	wm_chip_type		wmp_type;
    893 	uint32_t		wmp_flags;
    894 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    895 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    896 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    897 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    898 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    899 } wm_products[] = {
    900 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    901 	  "Intel i82542 1000BASE-X Ethernet",
    902 	  WM_T_82542_2_1,	WMP_F_FIBER },
    903 
    904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    905 	  "Intel i82543GC 1000BASE-X Ethernet",
    906 	  WM_T_82543,		WMP_F_FIBER },
    907 
    908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    909 	  "Intel i82543GC 1000BASE-T Ethernet",
    910 	  WM_T_82543,		WMP_F_COPPER },
    911 
    912 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    913 	  "Intel i82544EI 1000BASE-T Ethernet",
    914 	  WM_T_82544,		WMP_F_COPPER },
    915 
    916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    917 	  "Intel i82544EI 1000BASE-X Ethernet",
    918 	  WM_T_82544,		WMP_F_FIBER },
    919 
    920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    921 	  "Intel i82544GC 1000BASE-T Ethernet",
    922 	  WM_T_82544,		WMP_F_COPPER },
    923 
    924 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    925 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    926 	  WM_T_82544,		WMP_F_COPPER },
    927 
    928 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    929 	  "Intel i82540EM 1000BASE-T Ethernet",
    930 	  WM_T_82540,		WMP_F_COPPER },
    931 
    932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    933 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    934 	  WM_T_82540,		WMP_F_COPPER },
    935 
    936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    937 	  "Intel i82540EP 1000BASE-T Ethernet",
    938 	  WM_T_82540,		WMP_F_COPPER },
    939 
    940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    941 	  "Intel i82540EP 1000BASE-T Ethernet",
    942 	  WM_T_82540,		WMP_F_COPPER },
    943 
    944 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    945 	  "Intel i82540EP 1000BASE-T Ethernet",
    946 	  WM_T_82540,		WMP_F_COPPER },
    947 
    948 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    949 	  "Intel i82545EM 1000BASE-T Ethernet",
    950 	  WM_T_82545,		WMP_F_COPPER },
    951 
    952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    953 	  "Intel i82545GM 1000BASE-T Ethernet",
    954 	  WM_T_82545_3,		WMP_F_COPPER },
    955 
    956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    957 	  "Intel i82545GM 1000BASE-X Ethernet",
    958 	  WM_T_82545_3,		WMP_F_FIBER },
    959 
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    961 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    962 	  WM_T_82545_3,		WMP_F_SERDES },
    963 
    964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    965 	  "Intel i82546EB 1000BASE-T Ethernet",
    966 	  WM_T_82546,		WMP_F_COPPER },
    967 
    968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    969 	  "Intel i82546EB 1000BASE-T Ethernet",
    970 	  WM_T_82546,		WMP_F_COPPER },
    971 
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    973 	  "Intel i82545EM 1000BASE-X Ethernet",
    974 	  WM_T_82545,		WMP_F_FIBER },
    975 
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    977 	  "Intel i82546EB 1000BASE-X Ethernet",
    978 	  WM_T_82546,		WMP_F_FIBER },
    979 
    980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    981 	  "Intel i82546GB 1000BASE-T Ethernet",
    982 	  WM_T_82546_3,		WMP_F_COPPER },
    983 
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    985 	  "Intel i82546GB 1000BASE-X Ethernet",
    986 	  WM_T_82546_3,		WMP_F_FIBER },
    987 
    988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    989 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    990 	  WM_T_82546_3,		WMP_F_SERDES },
    991 
    992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    993 	  "i82546GB quad-port Gigabit Ethernet",
    994 	  WM_T_82546_3,		WMP_F_COPPER },
    995 
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    997 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    998 	  WM_T_82546_3,		WMP_F_COPPER },
    999 
   1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1001 	  "Intel PRO/1000MT (82546GB)",
   1002 	  WM_T_82546_3,		WMP_F_COPPER },
   1003 
   1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1005 	  "Intel i82541EI 1000BASE-T Ethernet",
   1006 	  WM_T_82541,		WMP_F_COPPER },
   1007 
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1009 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1010 	  WM_T_82541,		WMP_F_COPPER },
   1011 
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1013 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1014 	  WM_T_82541,		WMP_F_COPPER },
   1015 
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1017 	  "Intel i82541ER 1000BASE-T Ethernet",
   1018 	  WM_T_82541_2,		WMP_F_COPPER },
   1019 
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1021 	  "Intel i82541GI 1000BASE-T Ethernet",
   1022 	  WM_T_82541_2,		WMP_F_COPPER },
   1023 
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1025 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1026 	  WM_T_82541_2,		WMP_F_COPPER },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1029 	  "Intel i82541PI 1000BASE-T Ethernet",
   1030 	  WM_T_82541_2,		WMP_F_COPPER },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1033 	  "Intel i82547EI 1000BASE-T Ethernet",
   1034 	  WM_T_82547,		WMP_F_COPPER },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1037 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1038 	  WM_T_82547,		WMP_F_COPPER },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1041 	  "Intel i82547GI 1000BASE-T Ethernet",
   1042 	  WM_T_82547_2,		WMP_F_COPPER },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1045 	  "Intel PRO/1000 PT (82571EB)",
   1046 	  WM_T_82571,		WMP_F_COPPER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1049 	  "Intel PRO/1000 PF (82571EB)",
   1050 	  WM_T_82571,		WMP_F_FIBER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1053 	  "Intel PRO/1000 PB (82571EB)",
   1054 	  WM_T_82571,		WMP_F_SERDES },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1057 	  "Intel PRO/1000 QT (82571EB)",
   1058 	  WM_T_82571,		WMP_F_COPPER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1061 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1062 	  WM_T_82571,		WMP_F_COPPER, },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1065 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1066 	  WM_T_82571,		WMP_F_COPPER, },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1069 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1070 	  WM_T_82571,		WMP_F_SERDES, },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1073 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1074 	  WM_T_82571,		WMP_F_SERDES, },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1077 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1078 	  WM_T_82571,		WMP_F_FIBER, },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1081 	  "Intel i82572EI 1000baseT Ethernet",
   1082 	  WM_T_82572,		WMP_F_COPPER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1085 	  "Intel i82572EI 1000baseX Ethernet",
   1086 	  WM_T_82572,		WMP_F_FIBER },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1089 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1090 	  WM_T_82572,		WMP_F_SERDES },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1093 	  "Intel i82572EI 1000baseT Ethernet",
   1094 	  WM_T_82572,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1097 	  "Intel i82573E",
   1098 	  WM_T_82573,		WMP_F_COPPER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1101 	  "Intel i82573E IAMT",
   1102 	  WM_T_82573,		WMP_F_COPPER },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1105 	  "Intel i82573L Gigabit Ethernet",
   1106 	  WM_T_82573,		WMP_F_COPPER },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1109 	  "Intel i82574L",
   1110 	  WM_T_82574,		WMP_F_COPPER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1113 	  "Intel i82574L",
   1114 	  WM_T_82574,		WMP_F_COPPER },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1117 	  "Intel i82583V",
   1118 	  WM_T_82583,		WMP_F_COPPER },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1121 	  "i80003 dual 1000baseT Ethernet",
   1122 	  WM_T_80003,		WMP_F_COPPER },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1125 	  "i80003 dual 1000baseX Ethernet",
   1126 	  WM_T_80003,		WMP_F_COPPER },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1129 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1130 	  WM_T_80003,		WMP_F_SERDES },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1133 	  "Intel i80003 1000baseT Ethernet",
   1134 	  WM_T_80003,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1137 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1138 	  WM_T_80003,		WMP_F_SERDES },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1141 	  "Intel i82801H (M_AMT) LAN Controller",
   1142 	  WM_T_ICH8,		WMP_F_COPPER },
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1144 	  "Intel i82801H (AMT) LAN Controller",
   1145 	  WM_T_ICH8,		WMP_F_COPPER },
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1147 	  "Intel i82801H LAN Controller",
   1148 	  WM_T_ICH8,		WMP_F_COPPER },
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1150 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1151 	  WM_T_ICH8,		WMP_F_COPPER },
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1153 	  "Intel i82801H (M) LAN Controller",
   1154 	  WM_T_ICH8,		WMP_F_COPPER },
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1156 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1157 	  WM_T_ICH8,		WMP_F_COPPER },
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1159 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1160 	  WM_T_ICH8,		WMP_F_COPPER },
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1162 	  "82567V-3 LAN Controller",
   1163 	  WM_T_ICH8,		WMP_F_COPPER },
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1165 	  "82801I (AMT) LAN Controller",
   1166 	  WM_T_ICH9,		WMP_F_COPPER },
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1168 	  "82801I 10/100 LAN Controller",
   1169 	  WM_T_ICH9,		WMP_F_COPPER },
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1171 	  "82801I (G) 10/100 LAN Controller",
   1172 	  WM_T_ICH9,		WMP_F_COPPER },
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1174 	  "82801I (GT) 10/100 LAN Controller",
   1175 	  WM_T_ICH9,		WMP_F_COPPER },
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1177 	  "82801I (C) LAN Controller",
   1178 	  WM_T_ICH9,		WMP_F_COPPER },
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1180 	  "82801I mobile LAN Controller",
   1181 	  WM_T_ICH9,		WMP_F_COPPER },
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1183 	  "82801I mobile (V) LAN Controller",
   1184 	  WM_T_ICH9,		WMP_F_COPPER },
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1186 	  "82801I mobile (AMT) LAN Controller",
   1187 	  WM_T_ICH9,		WMP_F_COPPER },
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1189 	  "82567LM-4 LAN Controller",
   1190 	  WM_T_ICH9,		WMP_F_COPPER },
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1192 	  "82567LM-2 LAN Controller",
   1193 	  WM_T_ICH10,		WMP_F_COPPER },
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1195 	  "82567LF-2 LAN Controller",
   1196 	  WM_T_ICH10,		WMP_F_COPPER },
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1198 	  "82567LM-3 LAN Controller",
   1199 	  WM_T_ICH10,		WMP_F_COPPER },
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1201 	  "82567LF-3 LAN Controller",
   1202 	  WM_T_ICH10,		WMP_F_COPPER },
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1204 	  "82567V-2 LAN Controller",
   1205 	  WM_T_ICH10,		WMP_F_COPPER },
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1207 	  "82567V-3? LAN Controller",
   1208 	  WM_T_ICH10,		WMP_F_COPPER },
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1210 	  "HANKSVILLE LAN Controller",
   1211 	  WM_T_ICH10,		WMP_F_COPPER },
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1213 	  "PCH LAN (82577LM) Controller",
   1214 	  WM_T_PCH,		WMP_F_COPPER },
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1216 	  "PCH LAN (82577LC) Controller",
   1217 	  WM_T_PCH,		WMP_F_COPPER },
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1219 	  "PCH LAN (82578DM) Controller",
   1220 	  WM_T_PCH,		WMP_F_COPPER },
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1222 	  "PCH LAN (82578DC) Controller",
   1223 	  WM_T_PCH,		WMP_F_COPPER },
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1225 	  "PCH2 LAN (82579LM) Controller",
   1226 	  WM_T_PCH2,		WMP_F_COPPER },
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1228 	  "PCH2 LAN (82579V) Controller",
   1229 	  WM_T_PCH2,		WMP_F_COPPER },
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1231 	  "82575EB dual-1000baseT Ethernet",
   1232 	  WM_T_82575,		WMP_F_COPPER },
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1234 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1235 	  WM_T_82575,		WMP_F_SERDES },
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1237 	  "82575GB quad-1000baseT Ethernet",
   1238 	  WM_T_82575,		WMP_F_COPPER },
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1240 	  "82575GB quad-1000baseT Ethernet (PM)",
   1241 	  WM_T_82575,		WMP_F_COPPER },
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1243 	  "82576 1000BaseT Ethernet",
   1244 	  WM_T_82576,		WMP_F_COPPER },
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1246 	  "82576 1000BaseX Ethernet",
   1247 	  WM_T_82576,		WMP_F_FIBER },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1250 	  "82576 gigabit Ethernet (SERDES)",
   1251 	  WM_T_82576,		WMP_F_SERDES },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1254 	  "82576 quad-1000BaseT Ethernet",
   1255 	  WM_T_82576,		WMP_F_COPPER },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1258 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1259 	  WM_T_82576,		WMP_F_COPPER },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1262 	  "82576 gigabit Ethernet",
   1263 	  WM_T_82576,		WMP_F_COPPER },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1266 	  "82576 gigabit Ethernet (SERDES)",
   1267 	  WM_T_82576,		WMP_F_SERDES },
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1269 	  "82576 quad-gigabit Ethernet (SERDES)",
   1270 	  WM_T_82576,		WMP_F_SERDES },
   1271 
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1273 	  "82580 1000BaseT Ethernet",
   1274 	  WM_T_82580,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1276 	  "82580 1000BaseX Ethernet",
   1277 	  WM_T_82580,		WMP_F_FIBER },
   1278 
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1280 	  "82580 1000BaseT Ethernet (SERDES)",
   1281 	  WM_T_82580,		WMP_F_SERDES },
   1282 
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1284 	  "82580 gigabit Ethernet (SGMII)",
   1285 	  WM_T_82580,		WMP_F_COPPER },
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1287 	  "82580 dual-1000BaseT Ethernet",
   1288 	  WM_T_82580,		WMP_F_COPPER },
   1289 
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1291 	  "82580 quad-1000BaseX Ethernet",
   1292 	  WM_T_82580,		WMP_F_FIBER },
   1293 
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1295 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1296 	  WM_T_82580,		WMP_F_COPPER },
   1297 
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1299 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1300 	  WM_T_82580,		WMP_F_SERDES },
   1301 
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1303 	  "DH89XXCC 1000BASE-KX Ethernet",
   1304 	  WM_T_82580,		WMP_F_SERDES },
   1305 
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1307 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1308 	  WM_T_82580,		WMP_F_SERDES },
   1309 
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1311 	  "I350 Gigabit Network Connection",
   1312 	  WM_T_I350,		WMP_F_COPPER },
   1313 
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1315 	  "I350 Gigabit Fiber Network Connection",
   1316 	  WM_T_I350,		WMP_F_FIBER },
   1317 
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1319 	  "I350 Gigabit Backplane Connection",
   1320 	  WM_T_I350,		WMP_F_SERDES },
   1321 
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1323 	  "I350 Quad Port Gigabit Ethernet",
   1324 	  WM_T_I350,		WMP_F_SERDES },
   1325 
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1327 	  "I350 Gigabit Connection",
   1328 	  WM_T_I350,		WMP_F_COPPER },
   1329 
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1331 	  "I354 Gigabit Ethernet (KX)",
   1332 	  WM_T_I354,		WMP_F_SERDES },
   1333 
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1335 	  "I354 Gigabit Ethernet (SGMII)",
   1336 	  WM_T_I354,		WMP_F_COPPER },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1339 	  "I354 Gigabit Ethernet (2.5G)",
   1340 	  WM_T_I354,		WMP_F_COPPER },
   1341 
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1343 	  "I210-T1 Ethernet Server Adapter",
   1344 	  WM_T_I210,		WMP_F_COPPER },
   1345 
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1347 	  "I210 Ethernet (Copper OEM)",
   1348 	  WM_T_I210,		WMP_F_COPPER },
   1349 
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1351 	  "I210 Ethernet (Copper IT)",
   1352 	  WM_T_I210,		WMP_F_COPPER },
   1353 
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1355 	  "I210 Ethernet (FLASH less)",
   1356 	  WM_T_I210,		WMP_F_COPPER },
   1357 
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1359 	  "I210 Gigabit Ethernet (Fiber)",
   1360 	  WM_T_I210,		WMP_F_FIBER },
   1361 
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1363 	  "I210 Gigabit Ethernet (SERDES)",
   1364 	  WM_T_I210,		WMP_F_SERDES },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1367 	  "I210 Gigabit Ethernet (FLASH less)",
   1368 	  WM_T_I210,		WMP_F_SERDES },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1371 	  "I210 Gigabit Ethernet (SGMII)",
   1372 	  WM_T_I210,		WMP_F_COPPER },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1375 	  "I211 Ethernet (COPPER)",
   1376 	  WM_T_I211,		WMP_F_COPPER },
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1378 	  "I217 V Ethernet Connection",
   1379 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1381 	  "I217 LM Ethernet Connection",
   1382 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1384 	  "I218 V Ethernet Connection",
   1385 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1387 	  "I218 V Ethernet Connection",
   1388 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1390 	  "I218 V Ethernet Connection",
   1391 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1393 	  "I218 LM Ethernet Connection",
   1394 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1396 	  "I218 LM Ethernet Connection",
   1397 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1399 	  "I218 LM Ethernet Connection",
   1400 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1401 #if 0
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1403 	  "I219 V Ethernet Connection",
   1404 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1406 	  "I219 V Ethernet Connection",
   1407 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1409 	  "I219 V Ethernet Connection",
   1410 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1412 	  "I219 V Ethernet Connection",
   1413 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1415 	  "I219 LM Ethernet Connection",
   1416 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1418 	  "I219 LM Ethernet Connection",
   1419 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1421 	  "I219 LM Ethernet Connection",
   1422 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1424 	  "I219 LM Ethernet Connection",
   1425 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1427 	  "I219 LM Ethernet Connection",
   1428 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1429 #endif
   1430 	{ 0,			0,
   1431 	  NULL,
   1432 	  0,			0 },
   1433 };
   1434 
   1435 /*
   1436  * Register read/write functions.
   1437  * Other than CSR_{READ|WRITE}().
   1438  */
   1439 
   1440 #if 0 /* Not currently used */
   1441 static inline uint32_t
   1442 wm_io_read(struct wm_softc *sc, int reg)
   1443 {
   1444 
   1445 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1446 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1447 }
   1448 #endif
   1449 
   1450 static inline void
   1451 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1452 {
   1453 
   1454 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1455 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1456 }
   1457 
   1458 static inline void
   1459 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1460     uint32_t data)
   1461 {
   1462 	uint32_t regval;
   1463 	int i;
   1464 
   1465 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1466 
   1467 	CSR_WRITE(sc, reg, regval);
   1468 
   1469 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1470 		delay(5);
   1471 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1472 			break;
   1473 	}
   1474 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1475 		aprint_error("%s: WARNING:"
   1476 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1477 		    device_xname(sc->sc_dev), reg);
   1478 	}
   1479 }
   1480 
   1481 static inline void
   1482 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1483 {
   1484 	wa->wa_low = htole32(v & 0xffffffffU);
   1485 	if (sizeof(bus_addr_t) == 8)
   1486 		wa->wa_high = htole32((uint64_t) v >> 32);
   1487 	else
   1488 		wa->wa_high = 0;
   1489 }
   1490 
   1491 /*
   1492  * Descriptor sync/init functions.
   1493  */
   1494 static inline void
   1495 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1496 {
   1497 	struct wm_softc *sc = txq->txq_sc;
   1498 
   1499 	/* If it will wrap around, sync to the end of the ring. */
   1500 	if ((start + num) > WM_NTXDESC(txq)) {
   1501 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1502 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1503 		    (WM_NTXDESC(txq) - start), ops);
   1504 		num -= (WM_NTXDESC(txq) - start);
   1505 		start = 0;
   1506 	}
   1507 
   1508 	/* Now sync whatever is left. */
   1509 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1510 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1511 }
   1512 
   1513 static inline void
   1514 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1515 {
   1516 	struct wm_softc *sc = rxq->rxq_sc;
   1517 
   1518 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1519 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1520 }
   1521 
   1522 static inline void
   1523 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1524 {
   1525 	struct wm_softc *sc = rxq->rxq_sc;
   1526 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1527 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1528 	struct mbuf *m = rxs->rxs_mbuf;
   1529 
   1530 	/*
   1531 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1532 	 * so that the payload after the Ethernet header is aligned
   1533 	 * to a 4-byte boundary.
   1534 
   1535 	 * XXX BRAINDAMAGE ALERT!
   1536 	 * The stupid chip uses the same size for every buffer, which
   1537 	 * is set in the Receive Control register.  We are using the 2K
   1538 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1539 	 * reason, we can't "scoot" packets longer than the standard
   1540 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1541 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1542 	 * the upper layer copy the headers.
   1543 	 */
   1544 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1545 
   1546 	wm_set_dma_addr(&rxd->wrx_addr,
   1547 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1548 	rxd->wrx_len = 0;
   1549 	rxd->wrx_cksum = 0;
   1550 	rxd->wrx_status = 0;
   1551 	rxd->wrx_errors = 0;
   1552 	rxd->wrx_special = 0;
   1553 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1554 
   1555 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1556 }
   1557 
   1558 /*
   1559  * Device driver interface functions and commonly used functions.
   1560  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1561  */
   1562 
   1563 /* Lookup supported device table */
   1564 static const struct wm_product *
   1565 wm_lookup(const struct pci_attach_args *pa)
   1566 {
   1567 	const struct wm_product *wmp;
   1568 
   1569 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1570 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1571 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1572 			return wmp;
   1573 	}
   1574 	return NULL;
   1575 }
   1576 
   1577 /* The match function (ca_match) */
   1578 static int
   1579 wm_match(device_t parent, cfdata_t cf, void *aux)
   1580 {
   1581 	struct pci_attach_args *pa = aux;
   1582 
   1583 	if (wm_lookup(pa) != NULL)
   1584 		return 1;
   1585 
   1586 	return 0;
   1587 }
   1588 
   1589 /* The attach function (ca_attach) */
   1590 static void
   1591 wm_attach(device_t parent, device_t self, void *aux)
   1592 {
   1593 	struct wm_softc *sc = device_private(self);
   1594 	struct pci_attach_args *pa = aux;
   1595 	prop_dictionary_t dict;
   1596 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1597 	pci_chipset_tag_t pc = pa->pa_pc;
   1598 	int counts[PCI_INTR_TYPE_SIZE];
   1599 	pci_intr_type_t max_type;
   1600 	const char *eetype, *xname;
   1601 	bus_space_tag_t memt;
   1602 	bus_space_handle_t memh;
   1603 	bus_size_t memsize;
   1604 	int memh_valid;
   1605 	int i, error;
   1606 	const struct wm_product *wmp;
   1607 	prop_data_t ea;
   1608 	prop_number_t pn;
   1609 	uint8_t enaddr[ETHER_ADDR_LEN];
   1610 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1611 	pcireg_t preg, memtype;
   1612 	uint16_t eeprom_data, apme_mask;
   1613 	bool force_clear_smbi;
   1614 	uint32_t link_mode;
   1615 	uint32_t reg;
   1616 	void (*deferred_start_func)(struct ifnet *) = NULL;
   1617 
   1618 	sc->sc_dev = self;
   1619 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1620 	sc->sc_core_stopping = false;
   1621 
   1622 	wmp = wm_lookup(pa);
   1623 #ifdef DIAGNOSTIC
   1624 	if (wmp == NULL) {
   1625 		printf("\n");
   1626 		panic("wm_attach: impossible");
   1627 	}
   1628 #endif
   1629 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1630 
   1631 	sc->sc_pc = pa->pa_pc;
   1632 	sc->sc_pcitag = pa->pa_tag;
   1633 
   1634 	if (pci_dma64_available(pa))
   1635 		sc->sc_dmat = pa->pa_dmat64;
   1636 	else
   1637 		sc->sc_dmat = pa->pa_dmat;
   1638 
   1639 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1640 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1641 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1642 
   1643 	sc->sc_type = wmp->wmp_type;
   1644 
   1645 	/* Set default function pointers */
   1646 	sc->phy.acquire = wm_get_null;
   1647 	sc->phy.release = wm_put_null;
   1648 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1649 
   1650 	if (sc->sc_type < WM_T_82543) {
   1651 		if (sc->sc_rev < 2) {
   1652 			aprint_error_dev(sc->sc_dev,
   1653 			    "i82542 must be at least rev. 2\n");
   1654 			return;
   1655 		}
   1656 		if (sc->sc_rev < 3)
   1657 			sc->sc_type = WM_T_82542_2_0;
   1658 	}
   1659 
   1660 	/*
   1661 	 * Disable MSI for Errata:
   1662 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1663 	 *
   1664 	 *  82544: Errata 25
   1665 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1666 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1667 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1668 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1669 	 *
   1670 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1671 	 *
   1672 	 *  82571 & 82572: Errata 63
   1673 	 */
   1674 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1675 	    || (sc->sc_type == WM_T_82572))
   1676 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1677 
   1678 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1679 	    || (sc->sc_type == WM_T_82580)
   1680 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1681 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1682 		sc->sc_flags |= WM_F_NEWQUEUE;
   1683 
   1684 	/* Set device properties (mactype) */
   1685 	dict = device_properties(sc->sc_dev);
   1686 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1687 
   1688 	/*
   1689 	 * Map the device.  All devices support memory-mapped acccess,
   1690 	 * and it is really required for normal operation.
   1691 	 */
   1692 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1693 	switch (memtype) {
   1694 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1695 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1696 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1697 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1698 		break;
   1699 	default:
   1700 		memh_valid = 0;
   1701 		break;
   1702 	}
   1703 
   1704 	if (memh_valid) {
   1705 		sc->sc_st = memt;
   1706 		sc->sc_sh = memh;
   1707 		sc->sc_ss = memsize;
   1708 	} else {
   1709 		aprint_error_dev(sc->sc_dev,
   1710 		    "unable to map device registers\n");
   1711 		return;
   1712 	}
   1713 
   1714 	/*
   1715 	 * In addition, i82544 and later support I/O mapped indirect
   1716 	 * register access.  It is not desirable (nor supported in
   1717 	 * this driver) to use it for normal operation, though it is
   1718 	 * required to work around bugs in some chip versions.
   1719 	 */
   1720 	if (sc->sc_type >= WM_T_82544) {
   1721 		/* First we have to find the I/O BAR. */
   1722 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1723 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1724 			if (memtype == PCI_MAPREG_TYPE_IO)
   1725 				break;
   1726 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1727 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1728 				i += 4;	/* skip high bits, too */
   1729 		}
   1730 		if (i < PCI_MAPREG_END) {
   1731 			/*
   1732 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1733 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1734 			 * It's no problem because newer chips has no this
   1735 			 * bug.
   1736 			 *
   1737 			 * The i8254x doesn't apparently respond when the
   1738 			 * I/O BAR is 0, which looks somewhat like it's not
   1739 			 * been configured.
   1740 			 */
   1741 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1742 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1743 				aprint_error_dev(sc->sc_dev,
   1744 				    "WARNING: I/O BAR at zero.\n");
   1745 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1746 					0, &sc->sc_iot, &sc->sc_ioh,
   1747 					NULL, &sc->sc_ios) == 0) {
   1748 				sc->sc_flags |= WM_F_IOH_VALID;
   1749 			} else {
   1750 				aprint_error_dev(sc->sc_dev,
   1751 				    "WARNING: unable to map I/O space\n");
   1752 			}
   1753 		}
   1754 
   1755 	}
   1756 
   1757 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1758 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1759 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1760 	if (sc->sc_type < WM_T_82542_2_1)
   1761 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1762 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1763 
   1764 	/* power up chip */
   1765 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1766 	    NULL)) && error != EOPNOTSUPP) {
   1767 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1768 		return;
   1769 	}
   1770 
   1771 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1772 
   1773 	/* Allocation settings */
   1774 	max_type = PCI_INTR_TYPE_MSIX;
   1775 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1776 	counts[PCI_INTR_TYPE_MSI] = 1;
   1777 	counts[PCI_INTR_TYPE_INTX] = 1;
   1778 
   1779 alloc_retry:
   1780 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1781 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1782 		return;
   1783 	}
   1784 
   1785 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1786 		error = wm_setup_msix(sc);
   1787 		if (error) {
   1788 			pci_intr_release(pc, sc->sc_intrs,
   1789 			    counts[PCI_INTR_TYPE_MSIX]);
   1790 
   1791 			/* Setup for MSI: Disable MSI-X */
   1792 			max_type = PCI_INTR_TYPE_MSI;
   1793 			counts[PCI_INTR_TYPE_MSI] = 1;
   1794 			counts[PCI_INTR_TYPE_INTX] = 1;
   1795 			goto alloc_retry;
   1796 		}
   1797 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1798 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1799 		error = wm_setup_legacy(sc);
   1800 		if (error) {
   1801 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1802 			    counts[PCI_INTR_TYPE_MSI]);
   1803 
   1804 			/* The next try is for INTx: Disable MSI */
   1805 			max_type = PCI_INTR_TYPE_INTX;
   1806 			counts[PCI_INTR_TYPE_INTX] = 1;
   1807 			goto alloc_retry;
   1808 		}
   1809 	} else {
   1810 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1811 		error = wm_setup_legacy(sc);
   1812 		if (error) {
   1813 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1814 			    counts[PCI_INTR_TYPE_INTX]);
   1815 			return;
   1816 		}
   1817 	}
   1818 
   1819 	/*
   1820 	 * Check the function ID (unit number of the chip).
   1821 	 */
   1822 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1823 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1824 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1825 	    || (sc->sc_type == WM_T_82580)
   1826 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1827 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1828 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1829 	else
   1830 		sc->sc_funcid = 0;
   1831 
   1832 	/*
   1833 	 * Determine a few things about the bus we're connected to.
   1834 	 */
   1835 	if (sc->sc_type < WM_T_82543) {
   1836 		/* We don't really know the bus characteristics here. */
   1837 		sc->sc_bus_speed = 33;
   1838 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1839 		/*
   1840 		 * CSA (Communication Streaming Architecture) is about as fast
   1841 		 * a 32-bit 66MHz PCI Bus.
   1842 		 */
   1843 		sc->sc_flags |= WM_F_CSA;
   1844 		sc->sc_bus_speed = 66;
   1845 		aprint_verbose_dev(sc->sc_dev,
   1846 		    "Communication Streaming Architecture\n");
   1847 		if (sc->sc_type == WM_T_82547) {
   1848 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1849 			callout_setfunc(&sc->sc_txfifo_ch,
   1850 					wm_82547_txfifo_stall, sc);
   1851 			aprint_verbose_dev(sc->sc_dev,
   1852 			    "using 82547 Tx FIFO stall work-around\n");
   1853 		}
   1854 	} else if (sc->sc_type >= WM_T_82571) {
   1855 		sc->sc_flags |= WM_F_PCIE;
   1856 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1857 		    && (sc->sc_type != WM_T_ICH10)
   1858 		    && (sc->sc_type != WM_T_PCH)
   1859 		    && (sc->sc_type != WM_T_PCH2)
   1860 		    && (sc->sc_type != WM_T_PCH_LPT)
   1861 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1862 			/* ICH* and PCH* have no PCIe capability registers */
   1863 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1864 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1865 				NULL) == 0)
   1866 				aprint_error_dev(sc->sc_dev,
   1867 				    "unable to find PCIe capability\n");
   1868 		}
   1869 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1870 	} else {
   1871 		reg = CSR_READ(sc, WMREG_STATUS);
   1872 		if (reg & STATUS_BUS64)
   1873 			sc->sc_flags |= WM_F_BUS64;
   1874 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1875 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1876 
   1877 			sc->sc_flags |= WM_F_PCIX;
   1878 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1879 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1880 				aprint_error_dev(sc->sc_dev,
   1881 				    "unable to find PCIX capability\n");
   1882 			else if (sc->sc_type != WM_T_82545_3 &&
   1883 				 sc->sc_type != WM_T_82546_3) {
   1884 				/*
   1885 				 * Work around a problem caused by the BIOS
   1886 				 * setting the max memory read byte count
   1887 				 * incorrectly.
   1888 				 */
   1889 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1890 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1891 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1892 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1893 
   1894 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1895 				    PCIX_CMD_BYTECNT_SHIFT;
   1896 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1897 				    PCIX_STATUS_MAXB_SHIFT;
   1898 				if (bytecnt > maxb) {
   1899 					aprint_verbose_dev(sc->sc_dev,
   1900 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1901 					    512 << bytecnt, 512 << maxb);
   1902 					pcix_cmd = (pcix_cmd &
   1903 					    ~PCIX_CMD_BYTECNT_MASK) |
   1904 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1905 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1906 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1907 					    pcix_cmd);
   1908 				}
   1909 			}
   1910 		}
   1911 		/*
   1912 		 * The quad port adapter is special; it has a PCIX-PCIX
   1913 		 * bridge on the board, and can run the secondary bus at
   1914 		 * a higher speed.
   1915 		 */
   1916 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1917 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1918 								      : 66;
   1919 		} else if (sc->sc_flags & WM_F_PCIX) {
   1920 			switch (reg & STATUS_PCIXSPD_MASK) {
   1921 			case STATUS_PCIXSPD_50_66:
   1922 				sc->sc_bus_speed = 66;
   1923 				break;
   1924 			case STATUS_PCIXSPD_66_100:
   1925 				sc->sc_bus_speed = 100;
   1926 				break;
   1927 			case STATUS_PCIXSPD_100_133:
   1928 				sc->sc_bus_speed = 133;
   1929 				break;
   1930 			default:
   1931 				aprint_error_dev(sc->sc_dev,
   1932 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1933 				    reg & STATUS_PCIXSPD_MASK);
   1934 				sc->sc_bus_speed = 66;
   1935 				break;
   1936 			}
   1937 		} else
   1938 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1939 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1940 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1941 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1942 	}
   1943 
   1944 	/* clear interesting stat counters */
   1945 	CSR_READ(sc, WMREG_COLC);
   1946 	CSR_READ(sc, WMREG_RXERRC);
   1947 
   1948 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   1949 	    || (sc->sc_type >= WM_T_ICH8))
   1950 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1951 	if (sc->sc_type >= WM_T_ICH8)
   1952 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1953 
   1954 	/* Set PHY, NVM mutex related stuff */
   1955 	switch (sc->sc_type) {
   1956 	case WM_T_82542_2_0:
   1957 	case WM_T_82542_2_1:
   1958 	case WM_T_82543:
   1959 	case WM_T_82544:
   1960 		/* Microwire */
   1961 		sc->sc_nvm_wordsize = 64;
   1962 		sc->sc_nvm_addrbits = 6;
   1963 		break;
   1964 	case WM_T_82540:
   1965 	case WM_T_82545:
   1966 	case WM_T_82545_3:
   1967 	case WM_T_82546:
   1968 	case WM_T_82546_3:
   1969 		/* Microwire */
   1970 		reg = CSR_READ(sc, WMREG_EECD);
   1971 		if (reg & EECD_EE_SIZE) {
   1972 			sc->sc_nvm_wordsize = 256;
   1973 			sc->sc_nvm_addrbits = 8;
   1974 		} else {
   1975 			sc->sc_nvm_wordsize = 64;
   1976 			sc->sc_nvm_addrbits = 6;
   1977 		}
   1978 		sc->sc_flags |= WM_F_LOCK_EECD;
   1979 		break;
   1980 	case WM_T_82541:
   1981 	case WM_T_82541_2:
   1982 	case WM_T_82547:
   1983 	case WM_T_82547_2:
   1984 		sc->sc_flags |= WM_F_LOCK_EECD;
   1985 		reg = CSR_READ(sc, WMREG_EECD);
   1986 		if (reg & EECD_EE_TYPE) {
   1987 			/* SPI */
   1988 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1989 			wm_nvm_set_addrbits_size_eecd(sc);
   1990 		} else {
   1991 			/* Microwire */
   1992 			if ((reg & EECD_EE_ABITS) != 0) {
   1993 				sc->sc_nvm_wordsize = 256;
   1994 				sc->sc_nvm_addrbits = 8;
   1995 			} else {
   1996 				sc->sc_nvm_wordsize = 64;
   1997 				sc->sc_nvm_addrbits = 6;
   1998 			}
   1999 		}
   2000 		break;
   2001 	case WM_T_82571:
   2002 	case WM_T_82572:
   2003 		/* SPI */
   2004 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2005 		wm_nvm_set_addrbits_size_eecd(sc);
   2006 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2007 		sc->phy.acquire = wm_get_swsm_semaphore;
   2008 		sc->phy.release = wm_put_swsm_semaphore;
   2009 		break;
   2010 	case WM_T_82573:
   2011 	case WM_T_82574:
   2012 	case WM_T_82583:
   2013 		if (sc->sc_type == WM_T_82573) {
   2014 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2015 			sc->phy.acquire = wm_get_swsm_semaphore;
   2016 			sc->phy.release = wm_put_swsm_semaphore;
   2017 		} else {
   2018 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2019 			/* Both PHY and NVM use the same semaphore. */
   2020 			sc->phy.acquire
   2021 			    = wm_get_swfwhw_semaphore;
   2022 			sc->phy.release
   2023 			    = wm_put_swfwhw_semaphore;
   2024 		}
   2025 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2026 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2027 			sc->sc_nvm_wordsize = 2048;
   2028 		} else {
   2029 			/* SPI */
   2030 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2031 			wm_nvm_set_addrbits_size_eecd(sc);
   2032 		}
   2033 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2034 		break;
   2035 	case WM_T_82575:
   2036 	case WM_T_82576:
   2037 	case WM_T_82580:
   2038 	case WM_T_I350:
   2039 	case WM_T_I354:
   2040 	case WM_T_80003:
   2041 		/* SPI */
   2042 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2043 		wm_nvm_set_addrbits_size_eecd(sc);
   2044 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2045 		    | WM_F_LOCK_SWSM;
   2046 		sc->phy.acquire = wm_get_phy_82575;
   2047 		sc->phy.release = wm_put_phy_82575;
   2048 		break;
   2049 	case WM_T_ICH8:
   2050 	case WM_T_ICH9:
   2051 	case WM_T_ICH10:
   2052 	case WM_T_PCH:
   2053 	case WM_T_PCH2:
   2054 	case WM_T_PCH_LPT:
   2055 		/* FLASH */
   2056 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2057 		sc->sc_nvm_wordsize = 2048;
   2058 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2059 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2060 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2061 			aprint_error_dev(sc->sc_dev,
   2062 			    "can't map FLASH registers\n");
   2063 			goto out;
   2064 		}
   2065 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2066 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2067 		    ICH_FLASH_SECTOR_SIZE;
   2068 		sc->sc_ich8_flash_bank_size =
   2069 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2070 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2071 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2072 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2073 		sc->sc_flashreg_offset = 0;
   2074 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2075 		sc->phy.release = wm_put_swflag_ich8lan;
   2076 		break;
   2077 	case WM_T_PCH_SPT:
   2078 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2079 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2080 		sc->sc_flasht = sc->sc_st;
   2081 		sc->sc_flashh = sc->sc_sh;
   2082 		sc->sc_ich8_flash_base = 0;
   2083 		sc->sc_nvm_wordsize =
   2084 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2085 			* NVM_SIZE_MULTIPLIER;
   2086 		/* It is size in bytes, we want words */
   2087 		sc->sc_nvm_wordsize /= 2;
   2088 		/* assume 2 banks */
   2089 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2090 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2091 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2092 		sc->phy.release = wm_put_swflag_ich8lan;
   2093 		break;
   2094 	case WM_T_I210:
   2095 	case WM_T_I211:
   2096 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2097 			wm_nvm_set_addrbits_size_eecd(sc);
   2098 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2099 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2100 		} else {
   2101 			sc->sc_nvm_wordsize = INVM_SIZE;
   2102 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2103 		}
   2104 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2105 		sc->phy.acquire = wm_get_phy_82575;
   2106 		sc->phy.release = wm_put_phy_82575;
   2107 		break;
   2108 	default:
   2109 		break;
   2110 	}
   2111 
   2112 	/* Reset the chip to a known state. */
   2113 	wm_reset(sc);
   2114 
   2115 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2116 	switch (sc->sc_type) {
   2117 	case WM_T_82571:
   2118 	case WM_T_82572:
   2119 		reg = CSR_READ(sc, WMREG_SWSM2);
   2120 		if ((reg & SWSM2_LOCK) == 0) {
   2121 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2122 			force_clear_smbi = true;
   2123 		} else
   2124 			force_clear_smbi = false;
   2125 		break;
   2126 	case WM_T_82573:
   2127 	case WM_T_82574:
   2128 	case WM_T_82583:
   2129 		force_clear_smbi = true;
   2130 		break;
   2131 	default:
   2132 		force_clear_smbi = false;
   2133 		break;
   2134 	}
   2135 	if (force_clear_smbi) {
   2136 		reg = CSR_READ(sc, WMREG_SWSM);
   2137 		if ((reg & SWSM_SMBI) != 0)
   2138 			aprint_error_dev(sc->sc_dev,
   2139 			    "Please update the Bootagent\n");
   2140 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2141 	}
   2142 
   2143 	/*
   2144 	 * Defer printing the EEPROM type until after verifying the checksum
   2145 	 * This allows the EEPROM type to be printed correctly in the case
   2146 	 * that no EEPROM is attached.
   2147 	 */
   2148 	/*
   2149 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2150 	 * this for later, so we can fail future reads from the EEPROM.
   2151 	 */
   2152 	if (wm_nvm_validate_checksum(sc)) {
   2153 		/*
   2154 		 * Read twice again because some PCI-e parts fail the
   2155 		 * first check due to the link being in sleep state.
   2156 		 */
   2157 		if (wm_nvm_validate_checksum(sc))
   2158 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2159 	}
   2160 
   2161 	/* Set device properties (macflags) */
   2162 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2163 
   2164 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2165 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2166 	else {
   2167 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2168 		    sc->sc_nvm_wordsize);
   2169 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2170 			aprint_verbose("iNVM");
   2171 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2172 			aprint_verbose("FLASH(HW)");
   2173 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2174 			aprint_verbose("FLASH");
   2175 		else {
   2176 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2177 				eetype = "SPI";
   2178 			else
   2179 				eetype = "MicroWire";
   2180 			aprint_verbose("(%d address bits) %s EEPROM",
   2181 			    sc->sc_nvm_addrbits, eetype);
   2182 		}
   2183 	}
   2184 	wm_nvm_version(sc);
   2185 	aprint_verbose("\n");
   2186 
   2187 	/* Check for I21[01] PLL workaround */
   2188 	if (sc->sc_type == WM_T_I210)
   2189 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2190 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2191 		/* NVM image release 3.25 has a workaround */
   2192 		if ((sc->sc_nvm_ver_major < 3)
   2193 		    || ((sc->sc_nvm_ver_major == 3)
   2194 			&& (sc->sc_nvm_ver_minor < 25))) {
   2195 			aprint_verbose_dev(sc->sc_dev,
   2196 			    "ROM image version %d.%d is older than 3.25\n",
   2197 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2198 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2199 		}
   2200 	}
   2201 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2202 		wm_pll_workaround_i210(sc);
   2203 
   2204 	wm_get_wakeup(sc);
   2205 
   2206 	/* Non-AMT based hardware can now take control from firmware */
   2207 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2208 		wm_get_hw_control(sc);
   2209 
   2210 	/*
   2211 	 * Read the Ethernet address from the EEPROM, if not first found
   2212 	 * in device properties.
   2213 	 */
   2214 	ea = prop_dictionary_get(dict, "mac-address");
   2215 	if (ea != NULL) {
   2216 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2217 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2218 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2219 	} else {
   2220 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2221 			aprint_error_dev(sc->sc_dev,
   2222 			    "unable to read Ethernet address\n");
   2223 			goto out;
   2224 		}
   2225 	}
   2226 
   2227 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2228 	    ether_sprintf(enaddr));
   2229 
   2230 	/*
   2231 	 * Read the config info from the EEPROM, and set up various
   2232 	 * bits in the control registers based on their contents.
   2233 	 */
   2234 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2235 	if (pn != NULL) {
   2236 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2237 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2238 	} else {
   2239 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2240 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2241 			goto out;
   2242 		}
   2243 	}
   2244 
   2245 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2246 	if (pn != NULL) {
   2247 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2248 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2249 	} else {
   2250 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2251 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2252 			goto out;
   2253 		}
   2254 	}
   2255 
   2256 	/* check for WM_F_WOL */
   2257 	switch (sc->sc_type) {
   2258 	case WM_T_82542_2_0:
   2259 	case WM_T_82542_2_1:
   2260 	case WM_T_82543:
   2261 		/* dummy? */
   2262 		eeprom_data = 0;
   2263 		apme_mask = NVM_CFG3_APME;
   2264 		break;
   2265 	case WM_T_82544:
   2266 		apme_mask = NVM_CFG2_82544_APM_EN;
   2267 		eeprom_data = cfg2;
   2268 		break;
   2269 	case WM_T_82546:
   2270 	case WM_T_82546_3:
   2271 	case WM_T_82571:
   2272 	case WM_T_82572:
   2273 	case WM_T_82573:
   2274 	case WM_T_82574:
   2275 	case WM_T_82583:
   2276 	case WM_T_80003:
   2277 	default:
   2278 		apme_mask = NVM_CFG3_APME;
   2279 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2280 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2281 		break;
   2282 	case WM_T_82575:
   2283 	case WM_T_82576:
   2284 	case WM_T_82580:
   2285 	case WM_T_I350:
   2286 	case WM_T_I354: /* XXX ok? */
   2287 	case WM_T_ICH8:
   2288 	case WM_T_ICH9:
   2289 	case WM_T_ICH10:
   2290 	case WM_T_PCH:
   2291 	case WM_T_PCH2:
   2292 	case WM_T_PCH_LPT:
   2293 	case WM_T_PCH_SPT:
   2294 		/* XXX The funcid should be checked on some devices */
   2295 		apme_mask = WUC_APME;
   2296 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2297 		break;
   2298 	}
   2299 
   2300 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2301 	if ((eeprom_data & apme_mask) != 0)
   2302 		sc->sc_flags |= WM_F_WOL;
   2303 #ifdef WM_DEBUG
   2304 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2305 		printf("WOL\n");
   2306 #endif
   2307 
   2308 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2309 		/* Check NVM for autonegotiation */
   2310 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2311 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2312 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2313 		}
   2314 	}
   2315 
   2316 	/*
   2317 	 * XXX need special handling for some multiple port cards
   2318 	 * to disable a paticular port.
   2319 	 */
   2320 
   2321 	if (sc->sc_type >= WM_T_82544) {
   2322 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2323 		if (pn != NULL) {
   2324 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2325 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2326 		} else {
   2327 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2328 				aprint_error_dev(sc->sc_dev,
   2329 				    "unable to read SWDPIN\n");
   2330 				goto out;
   2331 			}
   2332 		}
   2333 	}
   2334 
   2335 	if (cfg1 & NVM_CFG1_ILOS)
   2336 		sc->sc_ctrl |= CTRL_ILOS;
   2337 
   2338 	/*
   2339 	 * XXX
   2340 	 * This code isn't correct because pin 2 and 3 are located
   2341 	 * in different position on newer chips. Check all datasheet.
   2342 	 *
   2343 	 * Until resolve this problem, check if a chip < 82580
   2344 	 */
   2345 	if (sc->sc_type <= WM_T_82580) {
   2346 		if (sc->sc_type >= WM_T_82544) {
   2347 			sc->sc_ctrl |=
   2348 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2349 			    CTRL_SWDPIO_SHIFT;
   2350 			sc->sc_ctrl |=
   2351 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2352 			    CTRL_SWDPINS_SHIFT;
   2353 		} else {
   2354 			sc->sc_ctrl |=
   2355 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2356 			    CTRL_SWDPIO_SHIFT;
   2357 		}
   2358 	}
   2359 
   2360 	/* XXX For other than 82580? */
   2361 	if (sc->sc_type == WM_T_82580) {
   2362 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2363 		if (nvmword & __BIT(13))
   2364 			sc->sc_ctrl |= CTRL_ILOS;
   2365 	}
   2366 
   2367 #if 0
   2368 	if (sc->sc_type >= WM_T_82544) {
   2369 		if (cfg1 & NVM_CFG1_IPS0)
   2370 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2371 		if (cfg1 & NVM_CFG1_IPS1)
   2372 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2373 		sc->sc_ctrl_ext |=
   2374 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2375 		    CTRL_EXT_SWDPIO_SHIFT;
   2376 		sc->sc_ctrl_ext |=
   2377 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2378 		    CTRL_EXT_SWDPINS_SHIFT;
   2379 	} else {
   2380 		sc->sc_ctrl_ext |=
   2381 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2382 		    CTRL_EXT_SWDPIO_SHIFT;
   2383 	}
   2384 #endif
   2385 
   2386 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2387 #if 0
   2388 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2389 #endif
   2390 
   2391 	if (sc->sc_type == WM_T_PCH) {
   2392 		uint16_t val;
   2393 
   2394 		/* Save the NVM K1 bit setting */
   2395 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2396 
   2397 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2398 			sc->sc_nvm_k1_enabled = 1;
   2399 		else
   2400 			sc->sc_nvm_k1_enabled = 0;
   2401 	}
   2402 
   2403 	/*
   2404 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2405 	 * media structures accordingly.
   2406 	 */
   2407 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2408 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2409 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2410 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2411 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2412 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2413 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2414 	} else if (sc->sc_type < WM_T_82543 ||
   2415 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2416 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2417 			aprint_error_dev(sc->sc_dev,
   2418 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2419 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2420 		}
   2421 		wm_tbi_mediainit(sc);
   2422 	} else {
   2423 		switch (sc->sc_type) {
   2424 		case WM_T_82575:
   2425 		case WM_T_82576:
   2426 		case WM_T_82580:
   2427 		case WM_T_I350:
   2428 		case WM_T_I354:
   2429 		case WM_T_I210:
   2430 		case WM_T_I211:
   2431 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2432 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2433 			switch (link_mode) {
   2434 			case CTRL_EXT_LINK_MODE_1000KX:
   2435 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2436 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2437 				break;
   2438 			case CTRL_EXT_LINK_MODE_SGMII:
   2439 				if (wm_sgmii_uses_mdio(sc)) {
   2440 					aprint_verbose_dev(sc->sc_dev,
   2441 					    "SGMII(MDIO)\n");
   2442 					sc->sc_flags |= WM_F_SGMII;
   2443 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2444 					break;
   2445 				}
   2446 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2447 				/*FALLTHROUGH*/
   2448 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2449 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2450 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2451 					if (link_mode
   2452 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2453 						sc->sc_mediatype
   2454 						    = WM_MEDIATYPE_COPPER;
   2455 						sc->sc_flags |= WM_F_SGMII;
   2456 					} else {
   2457 						sc->sc_mediatype
   2458 						    = WM_MEDIATYPE_SERDES;
   2459 						aprint_verbose_dev(sc->sc_dev,
   2460 						    "SERDES\n");
   2461 					}
   2462 					break;
   2463 				}
   2464 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2465 					aprint_verbose_dev(sc->sc_dev,
   2466 					    "SERDES\n");
   2467 
   2468 				/* Change current link mode setting */
   2469 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2470 				switch (sc->sc_mediatype) {
   2471 				case WM_MEDIATYPE_COPPER:
   2472 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2473 					break;
   2474 				case WM_MEDIATYPE_SERDES:
   2475 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2476 					break;
   2477 				default:
   2478 					break;
   2479 				}
   2480 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2481 				break;
   2482 			case CTRL_EXT_LINK_MODE_GMII:
   2483 			default:
   2484 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2485 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2486 				break;
   2487 			}
   2488 
   2489 			reg &= ~CTRL_EXT_I2C_ENA;
   2490 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2491 				reg |= CTRL_EXT_I2C_ENA;
   2492 			else
   2493 				reg &= ~CTRL_EXT_I2C_ENA;
   2494 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2495 
   2496 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2497 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2498 			else
   2499 				wm_tbi_mediainit(sc);
   2500 			break;
   2501 		default:
   2502 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2503 				aprint_error_dev(sc->sc_dev,
   2504 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2505 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2506 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2507 		}
   2508 	}
   2509 
   2510 	ifp = &sc->sc_ethercom.ec_if;
   2511 	xname = device_xname(sc->sc_dev);
   2512 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2513 	ifp->if_softc = sc;
   2514 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2515 	ifp->if_extflags = IFEF_START_MPSAFE;
   2516 	ifp->if_ioctl = wm_ioctl;
   2517 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2518 		ifp->if_start = wm_nq_start;
   2519 		if (sc->sc_nqueues > 1) {
   2520 			ifp->if_transmit = wm_nq_transmit;
   2521 			deferred_start_func = wm_deferred_start;
   2522 		}
   2523 	} else {
   2524 		ifp->if_start = wm_start;
   2525 		if (sc->sc_nqueues > 1) {
   2526 			ifp->if_transmit = wm_transmit;
   2527 			deferred_start_func = wm_deferred_start;
   2528 		}
   2529 	}
   2530 	ifp->if_watchdog = wm_watchdog;
   2531 	ifp->if_init = wm_init;
   2532 	ifp->if_stop = wm_stop;
   2533 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2534 	IFQ_SET_READY(&ifp->if_snd);
   2535 
   2536 	/* Check for jumbo frame */
   2537 	switch (sc->sc_type) {
   2538 	case WM_T_82573:
   2539 		/* XXX limited to 9234 if ASPM is disabled */
   2540 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2541 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2542 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2543 		break;
   2544 	case WM_T_82571:
   2545 	case WM_T_82572:
   2546 	case WM_T_82574:
   2547 	case WM_T_82575:
   2548 	case WM_T_82576:
   2549 	case WM_T_82580:
   2550 	case WM_T_I350:
   2551 	case WM_T_I354: /* XXXX ok? */
   2552 	case WM_T_I210:
   2553 	case WM_T_I211:
   2554 	case WM_T_80003:
   2555 	case WM_T_ICH9:
   2556 	case WM_T_ICH10:
   2557 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2558 	case WM_T_PCH_LPT:
   2559 	case WM_T_PCH_SPT:
   2560 		/* XXX limited to 9234 */
   2561 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2562 		break;
   2563 	case WM_T_PCH:
   2564 		/* XXX limited to 4096 */
   2565 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2566 		break;
   2567 	case WM_T_82542_2_0:
   2568 	case WM_T_82542_2_1:
   2569 	case WM_T_82583:
   2570 	case WM_T_ICH8:
   2571 		/* No support for jumbo frame */
   2572 		break;
   2573 	default:
   2574 		/* ETHER_MAX_LEN_JUMBO */
   2575 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2576 		break;
   2577 	}
   2578 
   2579 	/* If we're a i82543 or greater, we can support VLANs. */
   2580 	if (sc->sc_type >= WM_T_82543)
   2581 		sc->sc_ethercom.ec_capabilities |=
   2582 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2583 
   2584 	/*
   2585 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2586 	 * on i82543 and later.
   2587 	 */
   2588 	if (sc->sc_type >= WM_T_82543) {
   2589 		ifp->if_capabilities |=
   2590 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2591 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2592 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2593 		    IFCAP_CSUM_TCPv6_Tx |
   2594 		    IFCAP_CSUM_UDPv6_Tx;
   2595 	}
   2596 
   2597 	/*
   2598 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2599 	 *
   2600 	 *	82541GI (8086:1076) ... no
   2601 	 *	82572EI (8086:10b9) ... yes
   2602 	 */
   2603 	if (sc->sc_type >= WM_T_82571) {
   2604 		ifp->if_capabilities |=
   2605 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2606 	}
   2607 
   2608 	/*
   2609 	 * If we're a i82544 or greater (except i82547), we can do
   2610 	 * TCP segmentation offload.
   2611 	 */
   2612 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2613 		ifp->if_capabilities |= IFCAP_TSOv4;
   2614 	}
   2615 
   2616 	if (sc->sc_type >= WM_T_82571) {
   2617 		ifp->if_capabilities |= IFCAP_TSOv6;
   2618 	}
   2619 
   2620 #ifdef WM_MPSAFE
   2621 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2622 #else
   2623 	sc->sc_core_lock = NULL;
   2624 #endif
   2625 
   2626 	/* Attach the interface. */
   2627 	if_initialize(ifp);
   2628 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2629 	if_deferred_start_init(ifp, deferred_start_func);
   2630 	ether_ifattach(ifp, enaddr);
   2631 	if_register(ifp);
   2632 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2633 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2634 			  RND_FLAG_DEFAULT);
   2635 
   2636 #ifdef WM_EVENT_COUNTERS
   2637 	/* Attach event counters. */
   2638 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2639 	    NULL, xname, "linkintr");
   2640 
   2641 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2642 	    NULL, xname, "tx_xoff");
   2643 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2644 	    NULL, xname, "tx_xon");
   2645 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2646 	    NULL, xname, "rx_xoff");
   2647 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2648 	    NULL, xname, "rx_xon");
   2649 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2650 	    NULL, xname, "rx_macctl");
   2651 #endif /* WM_EVENT_COUNTERS */
   2652 
   2653 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2654 		pmf_class_network_register(self, ifp);
   2655 	else
   2656 		aprint_error_dev(self, "couldn't establish power handler\n");
   2657 
   2658 	sc->sc_flags |= WM_F_ATTACHED;
   2659  out:
   2660 	return;
   2661 }
   2662 
   2663 /* The detach function (ca_detach) */
   2664 static int
   2665 wm_detach(device_t self, int flags __unused)
   2666 {
   2667 	struct wm_softc *sc = device_private(self);
   2668 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2669 	int i;
   2670 
   2671 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2672 		return 0;
   2673 
   2674 	/* Stop the interface. Callouts are stopped in it. */
   2675 	wm_stop(ifp, 1);
   2676 
   2677 	pmf_device_deregister(self);
   2678 
   2679 	/* Tell the firmware about the release */
   2680 	WM_CORE_LOCK(sc);
   2681 	wm_release_manageability(sc);
   2682 	wm_release_hw_control(sc);
   2683 	wm_enable_wakeup(sc);
   2684 	WM_CORE_UNLOCK(sc);
   2685 
   2686 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2687 
   2688 	/* Delete all remaining media. */
   2689 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2690 
   2691 	ether_ifdetach(ifp);
   2692 	if_detach(ifp);
   2693 	if_percpuq_destroy(sc->sc_ipq);
   2694 
   2695 	/* Unload RX dmamaps and free mbufs */
   2696 	for (i = 0; i < sc->sc_nqueues; i++) {
   2697 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2698 		mutex_enter(rxq->rxq_lock);
   2699 		wm_rxdrain(rxq);
   2700 		mutex_exit(rxq->rxq_lock);
   2701 	}
   2702 	/* Must unlock here */
   2703 
   2704 	/* Disestablish the interrupt handler */
   2705 	for (i = 0; i < sc->sc_nintrs; i++) {
   2706 		if (sc->sc_ihs[i] != NULL) {
   2707 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2708 			sc->sc_ihs[i] = NULL;
   2709 		}
   2710 	}
   2711 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2712 
   2713 	wm_free_txrx_queues(sc);
   2714 
   2715 	/* Unmap the registers */
   2716 	if (sc->sc_ss) {
   2717 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2718 		sc->sc_ss = 0;
   2719 	}
   2720 	if (sc->sc_ios) {
   2721 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2722 		sc->sc_ios = 0;
   2723 	}
   2724 	if (sc->sc_flashs) {
   2725 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2726 		sc->sc_flashs = 0;
   2727 	}
   2728 
   2729 	if (sc->sc_core_lock)
   2730 		mutex_obj_free(sc->sc_core_lock);
   2731 	if (sc->sc_ich_phymtx)
   2732 		mutex_obj_free(sc->sc_ich_phymtx);
   2733 	if (sc->sc_ich_nvmmtx)
   2734 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2735 
   2736 	return 0;
   2737 }
   2738 
   2739 static bool
   2740 wm_suspend(device_t self, const pmf_qual_t *qual)
   2741 {
   2742 	struct wm_softc *sc = device_private(self);
   2743 
   2744 	wm_release_manageability(sc);
   2745 	wm_release_hw_control(sc);
   2746 	wm_enable_wakeup(sc);
   2747 
   2748 	return true;
   2749 }
   2750 
   2751 static bool
   2752 wm_resume(device_t self, const pmf_qual_t *qual)
   2753 {
   2754 	struct wm_softc *sc = device_private(self);
   2755 
   2756 	wm_init_manageability(sc);
   2757 
   2758 	return true;
   2759 }
   2760 
   2761 /*
   2762  * wm_watchdog:		[ifnet interface function]
   2763  *
   2764  *	Watchdog timer handler.
   2765  */
   2766 static void
   2767 wm_watchdog(struct ifnet *ifp)
   2768 {
   2769 	int qid;
   2770 	struct wm_softc *sc = ifp->if_softc;
   2771 
   2772 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2773 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2774 
   2775 		wm_watchdog_txq(ifp, txq);
   2776 	}
   2777 
   2778 	/* Reset the interface. */
   2779 	(void) wm_init(ifp);
   2780 
   2781 	/*
   2782 	 * There are still some upper layer processing which call
   2783 	 * ifp->if_start(). e.g. ALTQ
   2784 	 */
   2785 	/* Try to get more packets going. */
   2786 	ifp->if_start(ifp);
   2787 }
   2788 
   2789 static void
   2790 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2791 {
   2792 	struct wm_softc *sc = ifp->if_softc;
   2793 
   2794 	/*
   2795 	 * Since we're using delayed interrupts, sweep up
   2796 	 * before we report an error.
   2797 	 */
   2798 	mutex_enter(txq->txq_lock);
   2799 	wm_txeof(sc, txq);
   2800 	mutex_exit(txq->txq_lock);
   2801 
   2802 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2803 #ifdef WM_DEBUG
   2804 		int i, j;
   2805 		struct wm_txsoft *txs;
   2806 #endif
   2807 		log(LOG_ERR,
   2808 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2809 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2810 		    txq->txq_next);
   2811 		ifp->if_oerrors++;
   2812 #ifdef WM_DEBUG
   2813 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2814 		    i = WM_NEXTTXS(txq, i)) {
   2815 		    txs = &txq->txq_soft[i];
   2816 		    printf("txs %d tx %d -> %d\n",
   2817 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2818 		    for (j = txs->txs_firstdesc; ;
   2819 			j = WM_NEXTTX(txq, j)) {
   2820 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2821 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2822 			printf("\t %#08x%08x\n",
   2823 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2824 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2825 			if (j == txs->txs_lastdesc)
   2826 				break;
   2827 			}
   2828 		}
   2829 #endif
   2830 	}
   2831 }
   2832 
   2833 /*
   2834  * wm_tick:
   2835  *
   2836  *	One second timer, used to check link status, sweep up
   2837  *	completed transmit jobs, etc.
   2838  */
   2839 static void
   2840 wm_tick(void *arg)
   2841 {
   2842 	struct wm_softc *sc = arg;
   2843 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2844 #ifndef WM_MPSAFE
   2845 	int s = splnet();
   2846 #endif
   2847 
   2848 	WM_CORE_LOCK(sc);
   2849 
   2850 	if (sc->sc_core_stopping)
   2851 		goto out;
   2852 
   2853 	if (sc->sc_type >= WM_T_82542_2_1) {
   2854 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2855 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2856 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2857 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2858 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2859 	}
   2860 
   2861 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2862 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2863 	    + CSR_READ(sc, WMREG_CRCERRS)
   2864 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2865 	    + CSR_READ(sc, WMREG_SYMERRC)
   2866 	    + CSR_READ(sc, WMREG_RXERRC)
   2867 	    + CSR_READ(sc, WMREG_SEC)
   2868 	    + CSR_READ(sc, WMREG_CEXTERR)
   2869 	    + CSR_READ(sc, WMREG_RLEC);
   2870 	/*
   2871 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2872 	 * memory. It does not mean the number of dropped packet. Because
   2873 	 * ethernet controller can receive packets in such case if there is
   2874 	 * space in phy's FIFO.
   2875 	 *
   2876 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2877 	 * own EVCNT instead of if_iqdrops.
   2878 	 */
   2879 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2880 
   2881 	if (sc->sc_flags & WM_F_HAS_MII)
   2882 		mii_tick(&sc->sc_mii);
   2883 	else if ((sc->sc_type >= WM_T_82575)
   2884 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2885 		wm_serdes_tick(sc);
   2886 	else
   2887 		wm_tbi_tick(sc);
   2888 
   2889 out:
   2890 	WM_CORE_UNLOCK(sc);
   2891 #ifndef WM_MPSAFE
   2892 	splx(s);
   2893 #endif
   2894 
   2895 	if (!sc->sc_core_stopping)
   2896 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2897 }
   2898 
   2899 static int
   2900 wm_ifflags_cb(struct ethercom *ec)
   2901 {
   2902 	struct ifnet *ifp = &ec->ec_if;
   2903 	struct wm_softc *sc = ifp->if_softc;
   2904 	int rc = 0;
   2905 
   2906 	WM_CORE_LOCK(sc);
   2907 
   2908 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2909 	sc->sc_if_flags = ifp->if_flags;
   2910 
   2911 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2912 		rc = ENETRESET;
   2913 		goto out;
   2914 	}
   2915 
   2916 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2917 		wm_set_filter(sc);
   2918 
   2919 	wm_set_vlan(sc);
   2920 
   2921 out:
   2922 	WM_CORE_UNLOCK(sc);
   2923 
   2924 	return rc;
   2925 }
   2926 
   2927 /*
   2928  * wm_ioctl:		[ifnet interface function]
   2929  *
   2930  *	Handle control requests from the operator.
   2931  */
   2932 static int
   2933 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2934 {
   2935 	struct wm_softc *sc = ifp->if_softc;
   2936 	struct ifreq *ifr = (struct ifreq *) data;
   2937 	struct ifaddr *ifa = (struct ifaddr *)data;
   2938 	struct sockaddr_dl *sdl;
   2939 	int s, error;
   2940 
   2941 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2942 		device_xname(sc->sc_dev), __func__));
   2943 
   2944 #ifndef WM_MPSAFE
   2945 	s = splnet();
   2946 #endif
   2947 	switch (cmd) {
   2948 	case SIOCSIFMEDIA:
   2949 	case SIOCGIFMEDIA:
   2950 		WM_CORE_LOCK(sc);
   2951 		/* Flow control requires full-duplex mode. */
   2952 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2953 		    (ifr->ifr_media & IFM_FDX) == 0)
   2954 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2955 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2956 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2957 				/* We can do both TXPAUSE and RXPAUSE. */
   2958 				ifr->ifr_media |=
   2959 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2960 			}
   2961 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2962 		}
   2963 		WM_CORE_UNLOCK(sc);
   2964 #ifdef WM_MPSAFE
   2965 		s = splnet();
   2966 #endif
   2967 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2968 #ifdef WM_MPSAFE
   2969 		splx(s);
   2970 #endif
   2971 		break;
   2972 	case SIOCINITIFADDR:
   2973 		WM_CORE_LOCK(sc);
   2974 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2975 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2976 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2977 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2978 			/* unicast address is first multicast entry */
   2979 			wm_set_filter(sc);
   2980 			error = 0;
   2981 			WM_CORE_UNLOCK(sc);
   2982 			break;
   2983 		}
   2984 		WM_CORE_UNLOCK(sc);
   2985 		/*FALLTHROUGH*/
   2986 	default:
   2987 #ifdef WM_MPSAFE
   2988 		s = splnet();
   2989 #endif
   2990 		/* It may call wm_start, so unlock here */
   2991 		error = ether_ioctl(ifp, cmd, data);
   2992 #ifdef WM_MPSAFE
   2993 		splx(s);
   2994 #endif
   2995 		if (error != ENETRESET)
   2996 			break;
   2997 
   2998 		error = 0;
   2999 
   3000 		if (cmd == SIOCSIFCAP) {
   3001 			error = (*ifp->if_init)(ifp);
   3002 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3003 			;
   3004 		else if (ifp->if_flags & IFF_RUNNING) {
   3005 			/*
   3006 			 * Multicast list has changed; set the hardware filter
   3007 			 * accordingly.
   3008 			 */
   3009 			WM_CORE_LOCK(sc);
   3010 			wm_set_filter(sc);
   3011 			WM_CORE_UNLOCK(sc);
   3012 		}
   3013 		break;
   3014 	}
   3015 
   3016 #ifndef WM_MPSAFE
   3017 	splx(s);
   3018 #endif
   3019 	return error;
   3020 }
   3021 
   3022 /* MAC address related */
   3023 
   3024 /*
   3025  * Get the offset of MAC address and return it.
   3026  * If error occured, use offset 0.
   3027  */
   3028 static uint16_t
   3029 wm_check_alt_mac_addr(struct wm_softc *sc)
   3030 {
   3031 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3032 	uint16_t offset = NVM_OFF_MACADDR;
   3033 
   3034 	/* Try to read alternative MAC address pointer */
   3035 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3036 		return 0;
   3037 
   3038 	/* Check pointer if it's valid or not. */
   3039 	if ((offset == 0x0000) || (offset == 0xffff))
   3040 		return 0;
   3041 
   3042 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3043 	/*
   3044 	 * Check whether alternative MAC address is valid or not.
   3045 	 * Some cards have non 0xffff pointer but those don't use
   3046 	 * alternative MAC address in reality.
   3047 	 *
   3048 	 * Check whether the broadcast bit is set or not.
   3049 	 */
   3050 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3051 		if (((myea[0] & 0xff) & 0x01) == 0)
   3052 			return offset; /* Found */
   3053 
   3054 	/* Not found */
   3055 	return 0;
   3056 }
   3057 
   3058 static int
   3059 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3060 {
   3061 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3062 	uint16_t offset = NVM_OFF_MACADDR;
   3063 	int do_invert = 0;
   3064 
   3065 	switch (sc->sc_type) {
   3066 	case WM_T_82580:
   3067 	case WM_T_I350:
   3068 	case WM_T_I354:
   3069 		/* EEPROM Top Level Partitioning */
   3070 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3071 		break;
   3072 	case WM_T_82571:
   3073 	case WM_T_82575:
   3074 	case WM_T_82576:
   3075 	case WM_T_80003:
   3076 	case WM_T_I210:
   3077 	case WM_T_I211:
   3078 		offset = wm_check_alt_mac_addr(sc);
   3079 		if (offset == 0)
   3080 			if ((sc->sc_funcid & 0x01) == 1)
   3081 				do_invert = 1;
   3082 		break;
   3083 	default:
   3084 		if ((sc->sc_funcid & 0x01) == 1)
   3085 			do_invert = 1;
   3086 		break;
   3087 	}
   3088 
   3089 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3090 		goto bad;
   3091 
   3092 	enaddr[0] = myea[0] & 0xff;
   3093 	enaddr[1] = myea[0] >> 8;
   3094 	enaddr[2] = myea[1] & 0xff;
   3095 	enaddr[3] = myea[1] >> 8;
   3096 	enaddr[4] = myea[2] & 0xff;
   3097 	enaddr[5] = myea[2] >> 8;
   3098 
   3099 	/*
   3100 	 * Toggle the LSB of the MAC address on the second port
   3101 	 * of some dual port cards.
   3102 	 */
   3103 	if (do_invert != 0)
   3104 		enaddr[5] ^= 1;
   3105 
   3106 	return 0;
   3107 
   3108  bad:
   3109 	return -1;
   3110 }
   3111 
   3112 /*
   3113  * wm_set_ral:
   3114  *
   3115  *	Set an entery in the receive address list.
   3116  */
   3117 static void
   3118 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3119 {
   3120 	uint32_t ral_lo, ral_hi;
   3121 
   3122 	if (enaddr != NULL) {
   3123 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3124 		    (enaddr[3] << 24);
   3125 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3126 		ral_hi |= RAL_AV;
   3127 	} else {
   3128 		ral_lo = 0;
   3129 		ral_hi = 0;
   3130 	}
   3131 
   3132 	if (sc->sc_type >= WM_T_82544) {
   3133 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3134 		    ral_lo);
   3135 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3136 		    ral_hi);
   3137 	} else {
   3138 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3139 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3140 	}
   3141 }
   3142 
   3143 /*
   3144  * wm_mchash:
   3145  *
   3146  *	Compute the hash of the multicast address for the 4096-bit
   3147  *	multicast filter.
   3148  */
   3149 static uint32_t
   3150 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3151 {
   3152 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3153 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3154 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3155 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3156 	uint32_t hash;
   3157 
   3158 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3159 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3160 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3161 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3162 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3163 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3164 		return (hash & 0x3ff);
   3165 	}
   3166 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3167 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3168 
   3169 	return (hash & 0xfff);
   3170 }
   3171 
   3172 /*
   3173  * wm_set_filter:
   3174  *
   3175  *	Set up the receive filter.
   3176  */
   3177 static void
   3178 wm_set_filter(struct wm_softc *sc)
   3179 {
   3180 	struct ethercom *ec = &sc->sc_ethercom;
   3181 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3182 	struct ether_multi *enm;
   3183 	struct ether_multistep step;
   3184 	bus_addr_t mta_reg;
   3185 	uint32_t hash, reg, bit;
   3186 	int i, size, ralmax;
   3187 
   3188 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3189 		device_xname(sc->sc_dev), __func__));
   3190 
   3191 	if (sc->sc_type >= WM_T_82544)
   3192 		mta_reg = WMREG_CORDOVA_MTA;
   3193 	else
   3194 		mta_reg = WMREG_MTA;
   3195 
   3196 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3197 
   3198 	if (ifp->if_flags & IFF_BROADCAST)
   3199 		sc->sc_rctl |= RCTL_BAM;
   3200 	if (ifp->if_flags & IFF_PROMISC) {
   3201 		sc->sc_rctl |= RCTL_UPE;
   3202 		goto allmulti;
   3203 	}
   3204 
   3205 	/*
   3206 	 * Set the station address in the first RAL slot, and
   3207 	 * clear the remaining slots.
   3208 	 */
   3209 	if (sc->sc_type == WM_T_ICH8)
   3210 		size = WM_RAL_TABSIZE_ICH8 -1;
   3211 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3212 	    || (sc->sc_type == WM_T_PCH))
   3213 		size = WM_RAL_TABSIZE_ICH8;
   3214 	else if (sc->sc_type == WM_T_PCH2)
   3215 		size = WM_RAL_TABSIZE_PCH2;
   3216 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3217 		size = WM_RAL_TABSIZE_PCH_LPT;
   3218 	else if (sc->sc_type == WM_T_82575)
   3219 		size = WM_RAL_TABSIZE_82575;
   3220 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3221 		size = WM_RAL_TABSIZE_82576;
   3222 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3223 		size = WM_RAL_TABSIZE_I350;
   3224 	else
   3225 		size = WM_RAL_TABSIZE;
   3226 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3227 
   3228 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3229 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3230 		switch (i) {
   3231 		case 0:
   3232 			/* We can use all entries */
   3233 			ralmax = size;
   3234 			break;
   3235 		case 1:
   3236 			/* Only RAR[0] */
   3237 			ralmax = 1;
   3238 			break;
   3239 		default:
   3240 			/* available SHRA + RAR[0] */
   3241 			ralmax = i + 1;
   3242 		}
   3243 	} else
   3244 		ralmax = size;
   3245 	for (i = 1; i < size; i++) {
   3246 		if (i < ralmax)
   3247 			wm_set_ral(sc, NULL, i);
   3248 	}
   3249 
   3250 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3251 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3252 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3253 	    || (sc->sc_type == WM_T_PCH_SPT))
   3254 		size = WM_ICH8_MC_TABSIZE;
   3255 	else
   3256 		size = WM_MC_TABSIZE;
   3257 	/* Clear out the multicast table. */
   3258 	for (i = 0; i < size; i++)
   3259 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3260 
   3261 	ETHER_FIRST_MULTI(step, ec, enm);
   3262 	while (enm != NULL) {
   3263 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3264 			/*
   3265 			 * We must listen to a range of multicast addresses.
   3266 			 * For now, just accept all multicasts, rather than
   3267 			 * trying to set only those filter bits needed to match
   3268 			 * the range.  (At this time, the only use of address
   3269 			 * ranges is for IP multicast routing, for which the
   3270 			 * range is big enough to require all bits set.)
   3271 			 */
   3272 			goto allmulti;
   3273 		}
   3274 
   3275 		hash = wm_mchash(sc, enm->enm_addrlo);
   3276 
   3277 		reg = (hash >> 5);
   3278 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3279 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3280 		    || (sc->sc_type == WM_T_PCH2)
   3281 		    || (sc->sc_type == WM_T_PCH_LPT)
   3282 		    || (sc->sc_type == WM_T_PCH_SPT))
   3283 			reg &= 0x1f;
   3284 		else
   3285 			reg &= 0x7f;
   3286 		bit = hash & 0x1f;
   3287 
   3288 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3289 		hash |= 1U << bit;
   3290 
   3291 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3292 			/*
   3293 			 * 82544 Errata 9: Certain register cannot be written
   3294 			 * with particular alignments in PCI-X bus operation
   3295 			 * (FCAH, MTA and VFTA).
   3296 			 */
   3297 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3298 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3299 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3300 		} else
   3301 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3302 
   3303 		ETHER_NEXT_MULTI(step, enm);
   3304 	}
   3305 
   3306 	ifp->if_flags &= ~IFF_ALLMULTI;
   3307 	goto setit;
   3308 
   3309  allmulti:
   3310 	ifp->if_flags |= IFF_ALLMULTI;
   3311 	sc->sc_rctl |= RCTL_MPE;
   3312 
   3313  setit:
   3314 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3315 }
   3316 
   3317 /* Reset and init related */
   3318 
   3319 static void
   3320 wm_set_vlan(struct wm_softc *sc)
   3321 {
   3322 
   3323 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3324 		device_xname(sc->sc_dev), __func__));
   3325 
   3326 	/* Deal with VLAN enables. */
   3327 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3328 		sc->sc_ctrl |= CTRL_VME;
   3329 	else
   3330 		sc->sc_ctrl &= ~CTRL_VME;
   3331 
   3332 	/* Write the control registers. */
   3333 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3334 }
   3335 
   3336 static void
   3337 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3338 {
   3339 	uint32_t gcr;
   3340 	pcireg_t ctrl2;
   3341 
   3342 	gcr = CSR_READ(sc, WMREG_GCR);
   3343 
   3344 	/* Only take action if timeout value is defaulted to 0 */
   3345 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3346 		goto out;
   3347 
   3348 	if ((gcr & GCR_CAP_VER2) == 0) {
   3349 		gcr |= GCR_CMPL_TMOUT_10MS;
   3350 		goto out;
   3351 	}
   3352 
   3353 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3354 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3355 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3356 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3357 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3358 
   3359 out:
   3360 	/* Disable completion timeout resend */
   3361 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3362 
   3363 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3364 }
   3365 
   3366 void
   3367 wm_get_auto_rd_done(struct wm_softc *sc)
   3368 {
   3369 	int i;
   3370 
   3371 	/* wait for eeprom to reload */
   3372 	switch (sc->sc_type) {
   3373 	case WM_T_82571:
   3374 	case WM_T_82572:
   3375 	case WM_T_82573:
   3376 	case WM_T_82574:
   3377 	case WM_T_82583:
   3378 	case WM_T_82575:
   3379 	case WM_T_82576:
   3380 	case WM_T_82580:
   3381 	case WM_T_I350:
   3382 	case WM_T_I354:
   3383 	case WM_T_I210:
   3384 	case WM_T_I211:
   3385 	case WM_T_80003:
   3386 	case WM_T_ICH8:
   3387 	case WM_T_ICH9:
   3388 		for (i = 0; i < 10; i++) {
   3389 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3390 				break;
   3391 			delay(1000);
   3392 		}
   3393 		if (i == 10) {
   3394 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3395 			    "complete\n", device_xname(sc->sc_dev));
   3396 		}
   3397 		break;
   3398 	default:
   3399 		break;
   3400 	}
   3401 }
   3402 
   3403 void
   3404 wm_lan_init_done(struct wm_softc *sc)
   3405 {
   3406 	uint32_t reg = 0;
   3407 	int i;
   3408 
   3409 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3410 		device_xname(sc->sc_dev), __func__));
   3411 
   3412 	/* Wait for eeprom to reload */
   3413 	switch (sc->sc_type) {
   3414 	case WM_T_ICH10:
   3415 	case WM_T_PCH:
   3416 	case WM_T_PCH2:
   3417 	case WM_T_PCH_LPT:
   3418 	case WM_T_PCH_SPT:
   3419 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3420 			reg = CSR_READ(sc, WMREG_STATUS);
   3421 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3422 				break;
   3423 			delay(100);
   3424 		}
   3425 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3426 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3427 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3428 		}
   3429 		break;
   3430 	default:
   3431 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3432 		    __func__);
   3433 		break;
   3434 	}
   3435 
   3436 	reg &= ~STATUS_LAN_INIT_DONE;
   3437 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3438 }
   3439 
   3440 void
   3441 wm_get_cfg_done(struct wm_softc *sc)
   3442 {
   3443 	int mask;
   3444 	uint32_t reg;
   3445 	int i;
   3446 
   3447 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3448 		device_xname(sc->sc_dev), __func__));
   3449 
   3450 	/* Wait for eeprom to reload */
   3451 	switch (sc->sc_type) {
   3452 	case WM_T_82542_2_0:
   3453 	case WM_T_82542_2_1:
   3454 		/* null */
   3455 		break;
   3456 	case WM_T_82543:
   3457 	case WM_T_82544:
   3458 	case WM_T_82540:
   3459 	case WM_T_82545:
   3460 	case WM_T_82545_3:
   3461 	case WM_T_82546:
   3462 	case WM_T_82546_3:
   3463 	case WM_T_82541:
   3464 	case WM_T_82541_2:
   3465 	case WM_T_82547:
   3466 	case WM_T_82547_2:
   3467 	case WM_T_82573:
   3468 	case WM_T_82574:
   3469 	case WM_T_82583:
   3470 		/* generic */
   3471 		delay(10*1000);
   3472 		break;
   3473 	case WM_T_80003:
   3474 	case WM_T_82571:
   3475 	case WM_T_82572:
   3476 	case WM_T_82575:
   3477 	case WM_T_82576:
   3478 	case WM_T_82580:
   3479 	case WM_T_I350:
   3480 	case WM_T_I354:
   3481 	case WM_T_I210:
   3482 	case WM_T_I211:
   3483 		if (sc->sc_type == WM_T_82571) {
   3484 			/* Only 82571 shares port 0 */
   3485 			mask = EEMNGCTL_CFGDONE_0;
   3486 		} else
   3487 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3488 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3489 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3490 				break;
   3491 			delay(1000);
   3492 		}
   3493 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3494 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3495 				device_xname(sc->sc_dev), __func__));
   3496 		}
   3497 		break;
   3498 	case WM_T_ICH8:
   3499 	case WM_T_ICH9:
   3500 	case WM_T_ICH10:
   3501 	case WM_T_PCH:
   3502 	case WM_T_PCH2:
   3503 	case WM_T_PCH_LPT:
   3504 	case WM_T_PCH_SPT:
   3505 		delay(10*1000);
   3506 		if (sc->sc_type >= WM_T_ICH10)
   3507 			wm_lan_init_done(sc);
   3508 		else
   3509 			wm_get_auto_rd_done(sc);
   3510 
   3511 		reg = CSR_READ(sc, WMREG_STATUS);
   3512 		if ((reg & STATUS_PHYRA) != 0)
   3513 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3514 		break;
   3515 	default:
   3516 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3517 		    __func__);
   3518 		break;
   3519 	}
   3520 }
   3521 
   3522 /* Init hardware bits */
   3523 void
   3524 wm_initialize_hardware_bits(struct wm_softc *sc)
   3525 {
   3526 	uint32_t tarc0, tarc1, reg;
   3527 
   3528 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3529 		device_xname(sc->sc_dev), __func__));
   3530 
   3531 	/* For 82571 variant, 80003 and ICHs */
   3532 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3533 	    || (sc->sc_type >= WM_T_80003)) {
   3534 
   3535 		/* Transmit Descriptor Control 0 */
   3536 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3537 		reg |= TXDCTL_COUNT_DESC;
   3538 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3539 
   3540 		/* Transmit Descriptor Control 1 */
   3541 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3542 		reg |= TXDCTL_COUNT_DESC;
   3543 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3544 
   3545 		/* TARC0 */
   3546 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3547 		switch (sc->sc_type) {
   3548 		case WM_T_82571:
   3549 		case WM_T_82572:
   3550 		case WM_T_82573:
   3551 		case WM_T_82574:
   3552 		case WM_T_82583:
   3553 		case WM_T_80003:
   3554 			/* Clear bits 30..27 */
   3555 			tarc0 &= ~__BITS(30, 27);
   3556 			break;
   3557 		default:
   3558 			break;
   3559 		}
   3560 
   3561 		switch (sc->sc_type) {
   3562 		case WM_T_82571:
   3563 		case WM_T_82572:
   3564 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3565 
   3566 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3567 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3568 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3569 			/* 8257[12] Errata No.7 */
   3570 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3571 
   3572 			/* TARC1 bit 28 */
   3573 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3574 				tarc1 &= ~__BIT(28);
   3575 			else
   3576 				tarc1 |= __BIT(28);
   3577 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3578 
   3579 			/*
   3580 			 * 8257[12] Errata No.13
   3581 			 * Disable Dyamic Clock Gating.
   3582 			 */
   3583 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3584 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3585 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3586 			break;
   3587 		case WM_T_82573:
   3588 		case WM_T_82574:
   3589 		case WM_T_82583:
   3590 			if ((sc->sc_type == WM_T_82574)
   3591 			    || (sc->sc_type == WM_T_82583))
   3592 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3593 
   3594 			/* Extended Device Control */
   3595 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3596 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3597 			reg |= __BIT(22);	/* Set bit 22 */
   3598 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3599 
   3600 			/* Device Control */
   3601 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3602 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3603 
   3604 			/* PCIe Control Register */
   3605 			/*
   3606 			 * 82573 Errata (unknown).
   3607 			 *
   3608 			 * 82574 Errata 25 and 82583 Errata 12
   3609 			 * "Dropped Rx Packets":
   3610 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3611 			 */
   3612 			reg = CSR_READ(sc, WMREG_GCR);
   3613 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3614 			CSR_WRITE(sc, WMREG_GCR, reg);
   3615 
   3616 			if ((sc->sc_type == WM_T_82574)
   3617 			    || (sc->sc_type == WM_T_82583)) {
   3618 				/*
   3619 				 * Document says this bit must be set for
   3620 				 * proper operation.
   3621 				 */
   3622 				reg = CSR_READ(sc, WMREG_GCR);
   3623 				reg |= __BIT(22);
   3624 				CSR_WRITE(sc, WMREG_GCR, reg);
   3625 
   3626 				/*
   3627 				 * Apply workaround for hardware errata
   3628 				 * documented in errata docs Fixes issue where
   3629 				 * some error prone or unreliable PCIe
   3630 				 * completions are occurring, particularly
   3631 				 * with ASPM enabled. Without fix, issue can
   3632 				 * cause Tx timeouts.
   3633 				 */
   3634 				reg = CSR_READ(sc, WMREG_GCR2);
   3635 				reg |= __BIT(0);
   3636 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3637 			}
   3638 			break;
   3639 		case WM_T_80003:
   3640 			/* TARC0 */
   3641 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3642 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3643 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3644 
   3645 			/* TARC1 bit 28 */
   3646 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3647 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3648 				tarc1 &= ~__BIT(28);
   3649 			else
   3650 				tarc1 |= __BIT(28);
   3651 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3652 			break;
   3653 		case WM_T_ICH8:
   3654 		case WM_T_ICH9:
   3655 		case WM_T_ICH10:
   3656 		case WM_T_PCH:
   3657 		case WM_T_PCH2:
   3658 		case WM_T_PCH_LPT:
   3659 		case WM_T_PCH_SPT:
   3660 			/* TARC0 */
   3661 			if ((sc->sc_type == WM_T_ICH8)
   3662 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3663 				/* Set TARC0 bits 29 and 28 */
   3664 				tarc0 |= __BITS(29, 28);
   3665 			}
   3666 			/* Set TARC0 bits 23,24,26,27 */
   3667 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3668 
   3669 			/* CTRL_EXT */
   3670 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3671 			reg |= __BIT(22);	/* Set bit 22 */
   3672 			/*
   3673 			 * Enable PHY low-power state when MAC is at D3
   3674 			 * w/o WoL
   3675 			 */
   3676 			if (sc->sc_type >= WM_T_PCH)
   3677 				reg |= CTRL_EXT_PHYPDEN;
   3678 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3679 
   3680 			/* TARC1 */
   3681 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3682 			/* bit 28 */
   3683 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3684 				tarc1 &= ~__BIT(28);
   3685 			else
   3686 				tarc1 |= __BIT(28);
   3687 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3688 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3689 
   3690 			/* Device Status */
   3691 			if (sc->sc_type == WM_T_ICH8) {
   3692 				reg = CSR_READ(sc, WMREG_STATUS);
   3693 				reg &= ~__BIT(31);
   3694 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3695 
   3696 			}
   3697 
   3698 			/* IOSFPC */
   3699 			if (sc->sc_type == WM_T_PCH_SPT) {
   3700 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3701 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3702 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3703 			}
   3704 			/*
   3705 			 * Work-around descriptor data corruption issue during
   3706 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3707 			 * capability.
   3708 			 */
   3709 			reg = CSR_READ(sc, WMREG_RFCTL);
   3710 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3711 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3712 			break;
   3713 		default:
   3714 			break;
   3715 		}
   3716 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3717 
   3718 		/*
   3719 		 * 8257[12] Errata No.52 and some others.
   3720 		 * Avoid RSS Hash Value bug.
   3721 		 */
   3722 		switch (sc->sc_type) {
   3723 		case WM_T_82571:
   3724 		case WM_T_82572:
   3725 		case WM_T_82573:
   3726 		case WM_T_80003:
   3727 		case WM_T_ICH8:
   3728 			reg = CSR_READ(sc, WMREG_RFCTL);
   3729 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3730 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3731 			break;
   3732 		default:
   3733 			break;
   3734 		}
   3735 	}
   3736 }
   3737 
   3738 static uint32_t
   3739 wm_rxpbs_adjust_82580(uint32_t val)
   3740 {
   3741 	uint32_t rv = 0;
   3742 
   3743 	if (val < __arraycount(wm_82580_rxpbs_table))
   3744 		rv = wm_82580_rxpbs_table[val];
   3745 
   3746 	return rv;
   3747 }
   3748 
   3749 /*
   3750  * wm_reset_phy:
   3751  *
   3752  *	generic PHY reset function.
   3753  *	Same as e1000_phy_hw_reset_generic()
   3754  */
   3755 static void
   3756 wm_reset_phy(struct wm_softc *sc)
   3757 {
   3758 	uint32_t reg;
   3759 
   3760 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3761 		device_xname(sc->sc_dev), __func__));
   3762 	if (wm_phy_resetisblocked(sc))
   3763 		return;
   3764 
   3765 	sc->phy.acquire(sc);
   3766 
   3767 	reg = CSR_READ(sc, WMREG_CTRL);
   3768 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3769 	CSR_WRITE_FLUSH(sc);
   3770 
   3771 	delay(sc->phy.reset_delay_us);
   3772 
   3773 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3774 	CSR_WRITE_FLUSH(sc);
   3775 
   3776 	delay(150);
   3777 
   3778 	sc->phy.release(sc);
   3779 
   3780 	wm_get_cfg_done(sc);
   3781 }
   3782 
   3783 static void
   3784 wm_flush_desc_rings(struct wm_softc *sc)
   3785 {
   3786 	pcireg_t preg;
   3787 	uint32_t reg;
   3788 	int nexttx;
   3789 
   3790 	/* First, disable MULR fix in FEXTNVM11 */
   3791 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3792 	reg |= FEXTNVM11_DIS_MULRFIX;
   3793 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3794 
   3795 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3796 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3797 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3798 		struct wm_txqueue *txq;
   3799 		wiseman_txdesc_t *txd;
   3800 
   3801 		/* TX */
   3802 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3803 		    device_xname(sc->sc_dev), preg, reg);
   3804 		reg = CSR_READ(sc, WMREG_TCTL);
   3805 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3806 
   3807 		txq = &sc->sc_queue[0].wmq_txq;
   3808 		nexttx = txq->txq_next;
   3809 		txd = &txq->txq_descs[nexttx];
   3810 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3811 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3812 		txd->wtx_fields.wtxu_status = 0;
   3813 		txd->wtx_fields.wtxu_options = 0;
   3814 		txd->wtx_fields.wtxu_vlan = 0;
   3815 
   3816 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3817 			BUS_SPACE_BARRIER_WRITE);
   3818 
   3819 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3820 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3821 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3822 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3823 		delay(250);
   3824 	}
   3825 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3826 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3827 		uint32_t rctl;
   3828 
   3829 		/* RX */
   3830 		printf("%s: Need RX flush (reg = %08x)\n",
   3831 		    device_xname(sc->sc_dev), preg);
   3832 		rctl = CSR_READ(sc, WMREG_RCTL);
   3833 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3834 		CSR_WRITE_FLUSH(sc);
   3835 		delay(150);
   3836 
   3837 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3838 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3839 		reg &= 0xffffc000;
   3840 		/*
   3841 		 * update thresholds: prefetch threshold to 31, host threshold
   3842 		 * to 1 and make sure the granularity is "descriptors" and not
   3843 		 * "cache lines"
   3844 		 */
   3845 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   3846 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   3847 
   3848 		/*
   3849 		 * momentarily enable the RX ring for the changes to take
   3850 		 * effect
   3851 		 */
   3852 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   3853 		CSR_WRITE_FLUSH(sc);
   3854 		delay(150);
   3855 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3856 	}
   3857 }
   3858 
   3859 /*
   3860  * wm_reset:
   3861  *
   3862  *	Reset the i82542 chip.
   3863  */
   3864 static void
   3865 wm_reset(struct wm_softc *sc)
   3866 {
   3867 	int phy_reset = 0;
   3868 	int i, error = 0;
   3869 	uint32_t reg;
   3870 
   3871 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3872 		device_xname(sc->sc_dev), __func__));
   3873 	KASSERT(sc->sc_type != 0);
   3874 
   3875 	/*
   3876 	 * Allocate on-chip memory according to the MTU size.
   3877 	 * The Packet Buffer Allocation register must be written
   3878 	 * before the chip is reset.
   3879 	 */
   3880 	switch (sc->sc_type) {
   3881 	case WM_T_82547:
   3882 	case WM_T_82547_2:
   3883 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3884 		    PBA_22K : PBA_30K;
   3885 		for (i = 0; i < sc->sc_nqueues; i++) {
   3886 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3887 			txq->txq_fifo_head = 0;
   3888 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3889 			txq->txq_fifo_size =
   3890 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3891 			txq->txq_fifo_stall = 0;
   3892 		}
   3893 		break;
   3894 	case WM_T_82571:
   3895 	case WM_T_82572:
   3896 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3897 	case WM_T_80003:
   3898 		sc->sc_pba = PBA_32K;
   3899 		break;
   3900 	case WM_T_82573:
   3901 		sc->sc_pba = PBA_12K;
   3902 		break;
   3903 	case WM_T_82574:
   3904 	case WM_T_82583:
   3905 		sc->sc_pba = PBA_20K;
   3906 		break;
   3907 	case WM_T_82576:
   3908 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3909 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3910 		break;
   3911 	case WM_T_82580:
   3912 	case WM_T_I350:
   3913 	case WM_T_I354:
   3914 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3915 		break;
   3916 	case WM_T_I210:
   3917 	case WM_T_I211:
   3918 		sc->sc_pba = PBA_34K;
   3919 		break;
   3920 	case WM_T_ICH8:
   3921 		/* Workaround for a bit corruption issue in FIFO memory */
   3922 		sc->sc_pba = PBA_8K;
   3923 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3924 		break;
   3925 	case WM_T_ICH9:
   3926 	case WM_T_ICH10:
   3927 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3928 		    PBA_14K : PBA_10K;
   3929 		break;
   3930 	case WM_T_PCH:
   3931 	case WM_T_PCH2:
   3932 	case WM_T_PCH_LPT:
   3933 	case WM_T_PCH_SPT:
   3934 		sc->sc_pba = PBA_26K;
   3935 		break;
   3936 	default:
   3937 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3938 		    PBA_40K : PBA_48K;
   3939 		break;
   3940 	}
   3941 	/*
   3942 	 * Only old or non-multiqueue devices have the PBA register
   3943 	 * XXX Need special handling for 82575.
   3944 	 */
   3945 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3946 	    || (sc->sc_type == WM_T_82575))
   3947 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3948 
   3949 	/* Prevent the PCI-E bus from sticking */
   3950 	if (sc->sc_flags & WM_F_PCIE) {
   3951 		int timeout = 800;
   3952 
   3953 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3954 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3955 
   3956 		while (timeout--) {
   3957 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3958 			    == 0)
   3959 				break;
   3960 			delay(100);
   3961 		}
   3962 	}
   3963 
   3964 	/* Set the completion timeout for interface */
   3965 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3966 	    || (sc->sc_type == WM_T_82580)
   3967 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3968 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3969 		wm_set_pcie_completion_timeout(sc);
   3970 
   3971 	/* Clear interrupt */
   3972 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3973 	if (sc->sc_nintrs > 1) {
   3974 		if (sc->sc_type != WM_T_82574) {
   3975 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3976 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3977 		} else {
   3978 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3979 		}
   3980 	}
   3981 
   3982 	/* Stop the transmit and receive processes. */
   3983 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3984 	sc->sc_rctl &= ~RCTL_EN;
   3985 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3986 	CSR_WRITE_FLUSH(sc);
   3987 
   3988 	/* XXX set_tbi_sbp_82543() */
   3989 
   3990 	delay(10*1000);
   3991 
   3992 	/* Must acquire the MDIO ownership before MAC reset */
   3993 	switch (sc->sc_type) {
   3994 	case WM_T_82573:
   3995 	case WM_T_82574:
   3996 	case WM_T_82583:
   3997 		error = wm_get_hw_semaphore_82573(sc);
   3998 		break;
   3999 	default:
   4000 		break;
   4001 	}
   4002 
   4003 	/*
   4004 	 * 82541 Errata 29? & 82547 Errata 28?
   4005 	 * See also the description about PHY_RST bit in CTRL register
   4006 	 * in 8254x_GBe_SDM.pdf.
   4007 	 */
   4008 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4009 		CSR_WRITE(sc, WMREG_CTRL,
   4010 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4011 		CSR_WRITE_FLUSH(sc);
   4012 		delay(5000);
   4013 	}
   4014 
   4015 	switch (sc->sc_type) {
   4016 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4017 	case WM_T_82541:
   4018 	case WM_T_82541_2:
   4019 	case WM_T_82547:
   4020 	case WM_T_82547_2:
   4021 		/*
   4022 		 * On some chipsets, a reset through a memory-mapped write
   4023 		 * cycle can cause the chip to reset before completing the
   4024 		 * write cycle.  This causes major headache that can be
   4025 		 * avoided by issuing the reset via indirect register writes
   4026 		 * through I/O space.
   4027 		 *
   4028 		 * So, if we successfully mapped the I/O BAR at attach time,
   4029 		 * use that.  Otherwise, try our luck with a memory-mapped
   4030 		 * reset.
   4031 		 */
   4032 		if (sc->sc_flags & WM_F_IOH_VALID)
   4033 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4034 		else
   4035 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4036 		break;
   4037 	case WM_T_82545_3:
   4038 	case WM_T_82546_3:
   4039 		/* Use the shadow control register on these chips. */
   4040 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4041 		break;
   4042 	case WM_T_80003:
   4043 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4044 		sc->phy.acquire(sc);
   4045 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4046 		sc->phy.release(sc);
   4047 		break;
   4048 	case WM_T_ICH8:
   4049 	case WM_T_ICH9:
   4050 	case WM_T_ICH10:
   4051 	case WM_T_PCH:
   4052 	case WM_T_PCH2:
   4053 	case WM_T_PCH_LPT:
   4054 	case WM_T_PCH_SPT:
   4055 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4056 		if (wm_phy_resetisblocked(sc) == false) {
   4057 			/*
   4058 			 * Gate automatic PHY configuration by hardware on
   4059 			 * non-managed 82579
   4060 			 */
   4061 			if ((sc->sc_type == WM_T_PCH2)
   4062 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4063 				== 0))
   4064 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4065 
   4066 			reg |= CTRL_PHY_RESET;
   4067 			phy_reset = 1;
   4068 		} else
   4069 			printf("XXX reset is blocked!!!\n");
   4070 		sc->phy.acquire(sc);
   4071 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4072 		/* Don't insert a completion barrier when reset */
   4073 		delay(20*1000);
   4074 		mutex_exit(sc->sc_ich_phymtx);
   4075 		break;
   4076 	case WM_T_82580:
   4077 	case WM_T_I350:
   4078 	case WM_T_I354:
   4079 	case WM_T_I210:
   4080 	case WM_T_I211:
   4081 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4082 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4083 			CSR_WRITE_FLUSH(sc);
   4084 		delay(5000);
   4085 		break;
   4086 	case WM_T_82542_2_0:
   4087 	case WM_T_82542_2_1:
   4088 	case WM_T_82543:
   4089 	case WM_T_82540:
   4090 	case WM_T_82545:
   4091 	case WM_T_82546:
   4092 	case WM_T_82571:
   4093 	case WM_T_82572:
   4094 	case WM_T_82573:
   4095 	case WM_T_82574:
   4096 	case WM_T_82575:
   4097 	case WM_T_82576:
   4098 	case WM_T_82583:
   4099 	default:
   4100 		/* Everything else can safely use the documented method. */
   4101 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4102 		break;
   4103 	}
   4104 
   4105 	/* Must release the MDIO ownership after MAC reset */
   4106 	switch (sc->sc_type) {
   4107 	case WM_T_82573:
   4108 	case WM_T_82574:
   4109 	case WM_T_82583:
   4110 		if (error == 0)
   4111 			wm_put_hw_semaphore_82573(sc);
   4112 		break;
   4113 	default:
   4114 		break;
   4115 	}
   4116 
   4117 	if (phy_reset != 0)
   4118 		wm_get_cfg_done(sc);
   4119 
   4120 	/* reload EEPROM */
   4121 	switch (sc->sc_type) {
   4122 	case WM_T_82542_2_0:
   4123 	case WM_T_82542_2_1:
   4124 	case WM_T_82543:
   4125 	case WM_T_82544:
   4126 		delay(10);
   4127 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4128 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4129 		CSR_WRITE_FLUSH(sc);
   4130 		delay(2000);
   4131 		break;
   4132 	case WM_T_82540:
   4133 	case WM_T_82545:
   4134 	case WM_T_82545_3:
   4135 	case WM_T_82546:
   4136 	case WM_T_82546_3:
   4137 		delay(5*1000);
   4138 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4139 		break;
   4140 	case WM_T_82541:
   4141 	case WM_T_82541_2:
   4142 	case WM_T_82547:
   4143 	case WM_T_82547_2:
   4144 		delay(20000);
   4145 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4146 		break;
   4147 	case WM_T_82571:
   4148 	case WM_T_82572:
   4149 	case WM_T_82573:
   4150 	case WM_T_82574:
   4151 	case WM_T_82583:
   4152 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4153 			delay(10);
   4154 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4155 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4156 			CSR_WRITE_FLUSH(sc);
   4157 		}
   4158 		/* check EECD_EE_AUTORD */
   4159 		wm_get_auto_rd_done(sc);
   4160 		/*
   4161 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4162 		 * is set.
   4163 		 */
   4164 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4165 		    || (sc->sc_type == WM_T_82583))
   4166 			delay(25*1000);
   4167 		break;
   4168 	case WM_T_82575:
   4169 	case WM_T_82576:
   4170 	case WM_T_82580:
   4171 	case WM_T_I350:
   4172 	case WM_T_I354:
   4173 	case WM_T_I210:
   4174 	case WM_T_I211:
   4175 	case WM_T_80003:
   4176 		/* check EECD_EE_AUTORD */
   4177 		wm_get_auto_rd_done(sc);
   4178 		break;
   4179 	case WM_T_ICH8:
   4180 	case WM_T_ICH9:
   4181 	case WM_T_ICH10:
   4182 	case WM_T_PCH:
   4183 	case WM_T_PCH2:
   4184 	case WM_T_PCH_LPT:
   4185 	case WM_T_PCH_SPT:
   4186 		break;
   4187 	default:
   4188 		panic("%s: unknown type\n", __func__);
   4189 	}
   4190 
   4191 	/* Check whether EEPROM is present or not */
   4192 	switch (sc->sc_type) {
   4193 	case WM_T_82575:
   4194 	case WM_T_82576:
   4195 	case WM_T_82580:
   4196 	case WM_T_I350:
   4197 	case WM_T_I354:
   4198 	case WM_T_ICH8:
   4199 	case WM_T_ICH9:
   4200 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4201 			/* Not found */
   4202 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4203 			if (sc->sc_type == WM_T_82575)
   4204 				wm_reset_init_script_82575(sc);
   4205 		}
   4206 		break;
   4207 	default:
   4208 		break;
   4209 	}
   4210 
   4211 	if ((sc->sc_type == WM_T_82580)
   4212 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4213 		/* clear global device reset status bit */
   4214 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4215 	}
   4216 
   4217 	/* Clear any pending interrupt events. */
   4218 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4219 	reg = CSR_READ(sc, WMREG_ICR);
   4220 	if (sc->sc_nintrs > 1) {
   4221 		if (sc->sc_type != WM_T_82574) {
   4222 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4223 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4224 		} else
   4225 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4226 	}
   4227 
   4228 	/* reload sc_ctrl */
   4229 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4230 
   4231 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4232 		wm_set_eee_i350(sc);
   4233 
   4234 	/* Clear the host wakeup bit after lcd reset */
   4235 	if (sc->sc_type >= WM_T_PCH) {
   4236 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4237 		    BM_PORT_GEN_CFG);
   4238 		reg &= ~BM_WUC_HOST_WU_BIT;
   4239 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4240 		    BM_PORT_GEN_CFG, reg);
   4241 	}
   4242 
   4243 	/*
   4244 	 * For PCH, this write will make sure that any noise will be detected
   4245 	 * as a CRC error and be dropped rather than show up as a bad packet
   4246 	 * to the DMA engine
   4247 	 */
   4248 	if (sc->sc_type == WM_T_PCH)
   4249 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4250 
   4251 	if (sc->sc_type >= WM_T_82544)
   4252 		CSR_WRITE(sc, WMREG_WUC, 0);
   4253 
   4254 	wm_reset_mdicnfg_82580(sc);
   4255 
   4256 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4257 		wm_pll_workaround_i210(sc);
   4258 }
   4259 
   4260 /*
   4261  * wm_add_rxbuf:
   4262  *
   4263  *	Add a receive buffer to the indiciated descriptor.
   4264  */
   4265 static int
   4266 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4267 {
   4268 	struct wm_softc *sc = rxq->rxq_sc;
   4269 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4270 	struct mbuf *m;
   4271 	int error;
   4272 
   4273 	KASSERT(mutex_owned(rxq->rxq_lock));
   4274 
   4275 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4276 	if (m == NULL)
   4277 		return ENOBUFS;
   4278 
   4279 	MCLGET(m, M_DONTWAIT);
   4280 	if ((m->m_flags & M_EXT) == 0) {
   4281 		m_freem(m);
   4282 		return ENOBUFS;
   4283 	}
   4284 
   4285 	if (rxs->rxs_mbuf != NULL)
   4286 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4287 
   4288 	rxs->rxs_mbuf = m;
   4289 
   4290 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4291 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4292 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4293 	if (error) {
   4294 		/* XXX XXX XXX */
   4295 		aprint_error_dev(sc->sc_dev,
   4296 		    "unable to load rx DMA map %d, error = %d\n",
   4297 		    idx, error);
   4298 		panic("wm_add_rxbuf");
   4299 	}
   4300 
   4301 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4302 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4303 
   4304 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4305 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4306 			wm_init_rxdesc(rxq, idx);
   4307 	} else
   4308 		wm_init_rxdesc(rxq, idx);
   4309 
   4310 	return 0;
   4311 }
   4312 
   4313 /*
   4314  * wm_rxdrain:
   4315  *
   4316  *	Drain the receive queue.
   4317  */
   4318 static void
   4319 wm_rxdrain(struct wm_rxqueue *rxq)
   4320 {
   4321 	struct wm_softc *sc = rxq->rxq_sc;
   4322 	struct wm_rxsoft *rxs;
   4323 	int i;
   4324 
   4325 	KASSERT(mutex_owned(rxq->rxq_lock));
   4326 
   4327 	for (i = 0; i < WM_NRXDESC; i++) {
   4328 		rxs = &rxq->rxq_soft[i];
   4329 		if (rxs->rxs_mbuf != NULL) {
   4330 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4331 			m_freem(rxs->rxs_mbuf);
   4332 			rxs->rxs_mbuf = NULL;
   4333 		}
   4334 	}
   4335 }
   4336 
   4337 
   4338 /*
   4339  * XXX copy from FreeBSD's sys/net/rss_config.c
   4340  */
   4341 /*
   4342  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4343  * effectiveness may be limited by algorithm choice and available entropy
   4344  * during the boot.
   4345  *
   4346  * XXXRW: And that we don't randomize it yet!
   4347  *
   4348  * This is the default Microsoft RSS specification key which is also
   4349  * the Chelsio T5 firmware default key.
   4350  */
   4351 #define RSS_KEYSIZE 40
   4352 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4353 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4354 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4355 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4356 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4357 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4358 };
   4359 
   4360 /*
   4361  * Caller must pass an array of size sizeof(rss_key).
   4362  *
   4363  * XXX
   4364  * As if_ixgbe may use this function, this function should not be
   4365  * if_wm specific function.
   4366  */
   4367 static void
   4368 wm_rss_getkey(uint8_t *key)
   4369 {
   4370 
   4371 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4372 }
   4373 
   4374 /*
   4375  * Setup registers for RSS.
   4376  *
   4377  * XXX not yet VMDq support
   4378  */
   4379 static void
   4380 wm_init_rss(struct wm_softc *sc)
   4381 {
   4382 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4383 	int i;
   4384 
   4385 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4386 
   4387 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4388 		int qid, reta_ent;
   4389 
   4390 		qid  = i % sc->sc_nqueues;
   4391 		switch(sc->sc_type) {
   4392 		case WM_T_82574:
   4393 			reta_ent = __SHIFTIN(qid,
   4394 			    RETA_ENT_QINDEX_MASK_82574);
   4395 			break;
   4396 		case WM_T_82575:
   4397 			reta_ent = __SHIFTIN(qid,
   4398 			    RETA_ENT_QINDEX1_MASK_82575);
   4399 			break;
   4400 		default:
   4401 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4402 			break;
   4403 		}
   4404 
   4405 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4406 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4407 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4408 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4409 	}
   4410 
   4411 	wm_rss_getkey((uint8_t *)rss_key);
   4412 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4413 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4414 
   4415 	if (sc->sc_type == WM_T_82574)
   4416 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4417 	else
   4418 		mrqc = MRQC_ENABLE_RSS_MQ;
   4419 
   4420 	/* XXXX
   4421 	 * The same as FreeBSD igb.
   4422 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4423 	 */
   4424 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4425 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4426 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4427 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4428 
   4429 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4430 }
   4431 
   4432 /*
   4433  * Adjust TX and RX queue numbers which the system actulally uses.
   4434  *
   4435  * The numbers are affected by below parameters.
   4436  *     - The nubmer of hardware queues
   4437  *     - The number of MSI-X vectors (= "nvectors" argument)
   4438  *     - ncpu
   4439  */
   4440 static void
   4441 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4442 {
   4443 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4444 
   4445 	if (nvectors < 2) {
   4446 		sc->sc_nqueues = 1;
   4447 		return;
   4448 	}
   4449 
   4450 	switch(sc->sc_type) {
   4451 	case WM_T_82572:
   4452 		hw_ntxqueues = 2;
   4453 		hw_nrxqueues = 2;
   4454 		break;
   4455 	case WM_T_82574:
   4456 		hw_ntxqueues = 2;
   4457 		hw_nrxqueues = 2;
   4458 		break;
   4459 	case WM_T_82575:
   4460 		hw_ntxqueues = 4;
   4461 		hw_nrxqueues = 4;
   4462 		break;
   4463 	case WM_T_82576:
   4464 		hw_ntxqueues = 16;
   4465 		hw_nrxqueues = 16;
   4466 		break;
   4467 	case WM_T_82580:
   4468 	case WM_T_I350:
   4469 	case WM_T_I354:
   4470 		hw_ntxqueues = 8;
   4471 		hw_nrxqueues = 8;
   4472 		break;
   4473 	case WM_T_I210:
   4474 		hw_ntxqueues = 4;
   4475 		hw_nrxqueues = 4;
   4476 		break;
   4477 	case WM_T_I211:
   4478 		hw_ntxqueues = 2;
   4479 		hw_nrxqueues = 2;
   4480 		break;
   4481 		/*
   4482 		 * As below ethernet controllers does not support MSI-X,
   4483 		 * this driver let them not use multiqueue.
   4484 		 *     - WM_T_80003
   4485 		 *     - WM_T_ICH8
   4486 		 *     - WM_T_ICH9
   4487 		 *     - WM_T_ICH10
   4488 		 *     - WM_T_PCH
   4489 		 *     - WM_T_PCH2
   4490 		 *     - WM_T_PCH_LPT
   4491 		 */
   4492 	default:
   4493 		hw_ntxqueues = 1;
   4494 		hw_nrxqueues = 1;
   4495 		break;
   4496 	}
   4497 
   4498 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4499 
   4500 	/*
   4501 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4502 	 * the number of queues used actually.
   4503 	 */
   4504 	if (nvectors < hw_nqueues + 1) {
   4505 		sc->sc_nqueues = nvectors - 1;
   4506 	} else {
   4507 		sc->sc_nqueues = hw_nqueues;
   4508 	}
   4509 
   4510 	/*
   4511 	 * As queues more then cpus cannot improve scaling, we limit
   4512 	 * the number of queues used actually.
   4513 	 */
   4514 	if (ncpu < sc->sc_nqueues)
   4515 		sc->sc_nqueues = ncpu;
   4516 }
   4517 
   4518 /*
   4519  * Both single interrupt MSI and INTx can use this function.
   4520  */
   4521 static int
   4522 wm_setup_legacy(struct wm_softc *sc)
   4523 {
   4524 	pci_chipset_tag_t pc = sc->sc_pc;
   4525 	const char *intrstr = NULL;
   4526 	char intrbuf[PCI_INTRSTR_LEN];
   4527 	int error;
   4528 
   4529 	error = wm_alloc_txrx_queues(sc);
   4530 	if (error) {
   4531 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4532 		    error);
   4533 		return ENOMEM;
   4534 	}
   4535 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4536 	    sizeof(intrbuf));
   4537 #ifdef WM_MPSAFE
   4538 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4539 #endif
   4540 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4541 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4542 	if (sc->sc_ihs[0] == NULL) {
   4543 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4544 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4545 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4546 		return ENOMEM;
   4547 	}
   4548 
   4549 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4550 	sc->sc_nintrs = 1;
   4551 	return 0;
   4552 }
   4553 
   4554 static int
   4555 wm_setup_msix(struct wm_softc *sc)
   4556 {
   4557 	void *vih;
   4558 	kcpuset_t *affinity;
   4559 	int qidx, error, intr_idx, txrx_established;
   4560 	pci_chipset_tag_t pc = sc->sc_pc;
   4561 	const char *intrstr = NULL;
   4562 	char intrbuf[PCI_INTRSTR_LEN];
   4563 	char intr_xname[INTRDEVNAMEBUF];
   4564 
   4565 	if (sc->sc_nqueues < ncpu) {
   4566 		/*
   4567 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4568 		 * interrupts start from CPU#1.
   4569 		 */
   4570 		sc->sc_affinity_offset = 1;
   4571 	} else {
   4572 		/*
   4573 		 * In this case, this device use all CPUs. So, we unify
   4574 		 * affinitied cpu_index to msix vector number for readability.
   4575 		 */
   4576 		sc->sc_affinity_offset = 0;
   4577 	}
   4578 
   4579 	error = wm_alloc_txrx_queues(sc);
   4580 	if (error) {
   4581 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4582 		    error);
   4583 		return ENOMEM;
   4584 	}
   4585 
   4586 	kcpuset_create(&affinity, false);
   4587 	intr_idx = 0;
   4588 
   4589 	/*
   4590 	 * TX and RX
   4591 	 */
   4592 	txrx_established = 0;
   4593 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4594 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4595 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4596 
   4597 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4598 		    sizeof(intrbuf));
   4599 #ifdef WM_MPSAFE
   4600 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4601 		    PCI_INTR_MPSAFE, true);
   4602 #endif
   4603 		memset(intr_xname, 0, sizeof(intr_xname));
   4604 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4605 		    device_xname(sc->sc_dev), qidx);
   4606 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4607 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4608 		if (vih == NULL) {
   4609 			aprint_error_dev(sc->sc_dev,
   4610 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4611 			    intrstr ? " at " : "",
   4612 			    intrstr ? intrstr : "");
   4613 
   4614 			goto fail;
   4615 		}
   4616 		kcpuset_zero(affinity);
   4617 		/* Round-robin affinity */
   4618 		kcpuset_set(affinity, affinity_to);
   4619 		error = interrupt_distribute(vih, affinity, NULL);
   4620 		if (error == 0) {
   4621 			aprint_normal_dev(sc->sc_dev,
   4622 			    "for TX and RX interrupting at %s affinity to %u\n",
   4623 			    intrstr, affinity_to);
   4624 		} else {
   4625 			aprint_normal_dev(sc->sc_dev,
   4626 			    "for TX and RX interrupting at %s\n", intrstr);
   4627 		}
   4628 		sc->sc_ihs[intr_idx] = vih;
   4629 		wmq->wmq_id= qidx;
   4630 		wmq->wmq_intr_idx = intr_idx;
   4631 
   4632 		txrx_established++;
   4633 		intr_idx++;
   4634 	}
   4635 
   4636 	/*
   4637 	 * LINK
   4638 	 */
   4639 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4640 	    sizeof(intrbuf));
   4641 #ifdef WM_MPSAFE
   4642 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4643 #endif
   4644 	memset(intr_xname, 0, sizeof(intr_xname));
   4645 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4646 	    device_xname(sc->sc_dev));
   4647 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4648 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4649 	if (vih == NULL) {
   4650 		aprint_error_dev(sc->sc_dev,
   4651 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4652 		    intrstr ? " at " : "",
   4653 		    intrstr ? intrstr : "");
   4654 
   4655 		goto fail;
   4656 	}
   4657 	/* keep default affinity to LINK interrupt */
   4658 	aprint_normal_dev(sc->sc_dev,
   4659 	    "for LINK interrupting at %s\n", intrstr);
   4660 	sc->sc_ihs[intr_idx] = vih;
   4661 	sc->sc_link_intr_idx = intr_idx;
   4662 
   4663 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4664 	kcpuset_destroy(affinity);
   4665 	return 0;
   4666 
   4667  fail:
   4668 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4669 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4670 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4671 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4672 	}
   4673 
   4674 	kcpuset_destroy(affinity);
   4675 	return ENOMEM;
   4676 }
   4677 
   4678 static void
   4679 wm_turnon(struct wm_softc *sc)
   4680 {
   4681 	int i;
   4682 
   4683 	KASSERT(WM_CORE_LOCKED(sc));
   4684 
   4685 	for(i = 0; i < sc->sc_nqueues; i++) {
   4686 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4687 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4688 
   4689 		mutex_enter(txq->txq_lock);
   4690 		txq->txq_stopping = false;
   4691 		mutex_exit(txq->txq_lock);
   4692 
   4693 		mutex_enter(rxq->rxq_lock);
   4694 		rxq->rxq_stopping = false;
   4695 		mutex_exit(rxq->rxq_lock);
   4696 	}
   4697 
   4698 	sc->sc_core_stopping = false;
   4699 }
   4700 
   4701 static void
   4702 wm_turnoff(struct wm_softc *sc)
   4703 {
   4704 	int i;
   4705 
   4706 	KASSERT(WM_CORE_LOCKED(sc));
   4707 
   4708 	sc->sc_core_stopping = true;
   4709 
   4710 	for(i = 0; i < sc->sc_nqueues; i++) {
   4711 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4712 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4713 
   4714 		mutex_enter(rxq->rxq_lock);
   4715 		rxq->rxq_stopping = true;
   4716 		mutex_exit(rxq->rxq_lock);
   4717 
   4718 		mutex_enter(txq->txq_lock);
   4719 		txq->txq_stopping = true;
   4720 		mutex_exit(txq->txq_lock);
   4721 	}
   4722 }
   4723 
   4724 /*
   4725  * wm_init:		[ifnet interface function]
   4726  *
   4727  *	Initialize the interface.
   4728  */
   4729 static int
   4730 wm_init(struct ifnet *ifp)
   4731 {
   4732 	struct wm_softc *sc = ifp->if_softc;
   4733 	int ret;
   4734 
   4735 	WM_CORE_LOCK(sc);
   4736 	ret = wm_init_locked(ifp);
   4737 	WM_CORE_UNLOCK(sc);
   4738 
   4739 	return ret;
   4740 }
   4741 
   4742 static int
   4743 wm_init_locked(struct ifnet *ifp)
   4744 {
   4745 	struct wm_softc *sc = ifp->if_softc;
   4746 	int i, j, trynum, error = 0;
   4747 	uint32_t reg;
   4748 
   4749 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4750 		device_xname(sc->sc_dev), __func__));
   4751 	KASSERT(WM_CORE_LOCKED(sc));
   4752 
   4753 	/*
   4754 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4755 	 * There is a small but measurable benefit to avoiding the adjusment
   4756 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4757 	 * on such platforms.  One possibility is that the DMA itself is
   4758 	 * slightly more efficient if the front of the entire packet (instead
   4759 	 * of the front of the headers) is aligned.
   4760 	 *
   4761 	 * Note we must always set align_tweak to 0 if we are using
   4762 	 * jumbo frames.
   4763 	 */
   4764 #ifdef __NO_STRICT_ALIGNMENT
   4765 	sc->sc_align_tweak = 0;
   4766 #else
   4767 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4768 		sc->sc_align_tweak = 0;
   4769 	else
   4770 		sc->sc_align_tweak = 2;
   4771 #endif /* __NO_STRICT_ALIGNMENT */
   4772 
   4773 	/* Cancel any pending I/O. */
   4774 	wm_stop_locked(ifp, 0);
   4775 
   4776 	/* update statistics before reset */
   4777 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4778 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4779 
   4780 	/* PCH_SPT hardware workaround */
   4781 	if (sc->sc_type == WM_T_PCH_SPT)
   4782 		wm_flush_desc_rings(sc);
   4783 
   4784 	/* Reset the chip to a known state. */
   4785 	wm_reset(sc);
   4786 
   4787 	/* AMT based hardware can now take control from firmware */
   4788 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4789 		wm_get_hw_control(sc);
   4790 
   4791 	/* Init hardware bits */
   4792 	wm_initialize_hardware_bits(sc);
   4793 
   4794 	/* Reset the PHY. */
   4795 	if (sc->sc_flags & WM_F_HAS_MII)
   4796 		wm_gmii_reset(sc);
   4797 
   4798 	/* Calculate (E)ITR value */
   4799 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4800 		sc->sc_itr = 450;	/* For EITR */
   4801 	} else if (sc->sc_type >= WM_T_82543) {
   4802 		/*
   4803 		 * Set up the interrupt throttling register (units of 256ns)
   4804 		 * Note that a footnote in Intel's documentation says this
   4805 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4806 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4807 		 * that that is also true for the 1024ns units of the other
   4808 		 * interrupt-related timer registers -- so, really, we ought
   4809 		 * to divide this value by 4 when the link speed is low.
   4810 		 *
   4811 		 * XXX implement this division at link speed change!
   4812 		 */
   4813 
   4814 		/*
   4815 		 * For N interrupts/sec, set this value to:
   4816 		 * 1000000000 / (N * 256).  Note that we set the
   4817 		 * absolute and packet timer values to this value
   4818 		 * divided by 4 to get "simple timer" behavior.
   4819 		 */
   4820 
   4821 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4822 	}
   4823 
   4824 	error = wm_init_txrx_queues(sc);
   4825 	if (error)
   4826 		goto out;
   4827 
   4828 	/*
   4829 	 * Clear out the VLAN table -- we don't use it (yet).
   4830 	 */
   4831 	CSR_WRITE(sc, WMREG_VET, 0);
   4832 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4833 		trynum = 10; /* Due to hw errata */
   4834 	else
   4835 		trynum = 1;
   4836 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4837 		for (j = 0; j < trynum; j++)
   4838 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4839 
   4840 	/*
   4841 	 * Set up flow-control parameters.
   4842 	 *
   4843 	 * XXX Values could probably stand some tuning.
   4844 	 */
   4845 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4846 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4847 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4848 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4849 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4850 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4851 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4852 	}
   4853 
   4854 	sc->sc_fcrtl = FCRTL_DFLT;
   4855 	if (sc->sc_type < WM_T_82543) {
   4856 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4857 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4858 	} else {
   4859 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4860 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4861 	}
   4862 
   4863 	if (sc->sc_type == WM_T_80003)
   4864 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4865 	else
   4866 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4867 
   4868 	/* Writes the control register. */
   4869 	wm_set_vlan(sc);
   4870 
   4871 	if (sc->sc_flags & WM_F_HAS_MII) {
   4872 		int val;
   4873 
   4874 		switch (sc->sc_type) {
   4875 		case WM_T_80003:
   4876 		case WM_T_ICH8:
   4877 		case WM_T_ICH9:
   4878 		case WM_T_ICH10:
   4879 		case WM_T_PCH:
   4880 		case WM_T_PCH2:
   4881 		case WM_T_PCH_LPT:
   4882 		case WM_T_PCH_SPT:
   4883 			/*
   4884 			 * Set the mac to wait the maximum time between each
   4885 			 * iteration and increase the max iterations when
   4886 			 * polling the phy; this fixes erroneous timeouts at
   4887 			 * 10Mbps.
   4888 			 */
   4889 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4890 			    0xFFFF);
   4891 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4892 			val |= 0x3F;
   4893 			wm_kmrn_writereg(sc,
   4894 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4895 			break;
   4896 		default:
   4897 			break;
   4898 		}
   4899 
   4900 		if (sc->sc_type == WM_T_80003) {
   4901 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4902 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4903 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4904 
   4905 			/* Bypass RX and TX FIFO's */
   4906 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4907 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4908 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4909 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4910 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4911 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4912 		}
   4913 	}
   4914 #if 0
   4915 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4916 #endif
   4917 
   4918 	/* Set up checksum offload parameters. */
   4919 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4920 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4921 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4922 		reg |= RXCSUM_IPOFL;
   4923 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4924 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4925 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4926 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4927 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4928 
   4929 	/* Set up MSI-X */
   4930 	if (sc->sc_nintrs > 1) {
   4931 		uint32_t ivar;
   4932 		struct wm_queue *wmq;
   4933 		int qid, qintr_idx;
   4934 
   4935 		if (sc->sc_type == WM_T_82575) {
   4936 			/* Interrupt control */
   4937 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4938 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4939 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4940 
   4941 			/* TX and RX */
   4942 			for (i = 0; i < sc->sc_nqueues; i++) {
   4943 				wmq = &sc->sc_queue[i];
   4944 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   4945 				    EITR_TX_QUEUE(wmq->wmq_id)
   4946 				    | EITR_RX_QUEUE(wmq->wmq_id));
   4947 			}
   4948 			/* Link status */
   4949 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4950 			    EITR_OTHER);
   4951 		} else if (sc->sc_type == WM_T_82574) {
   4952 			/* Interrupt control */
   4953 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4954 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4955 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4956 
   4957 			ivar = 0;
   4958 			/* TX and RX */
   4959 			for (i = 0; i < sc->sc_nqueues; i++) {
   4960 				wmq = &sc->sc_queue[i];
   4961 				qid = wmq->wmq_id;
   4962 				qintr_idx = wmq->wmq_intr_idx;
   4963 
   4964 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4965 				    IVAR_TX_MASK_Q_82574(qid));
   4966 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4967 				    IVAR_RX_MASK_Q_82574(qid));
   4968 			}
   4969 			/* Link status */
   4970 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4971 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   4972 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4973 		} else {
   4974 			/* Interrupt control */
   4975 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   4976 			    | GPIE_EIAME | GPIE_PBA);
   4977 
   4978 			switch (sc->sc_type) {
   4979 			case WM_T_82580:
   4980 			case WM_T_I350:
   4981 			case WM_T_I354:
   4982 			case WM_T_I210:
   4983 			case WM_T_I211:
   4984 				/* TX and RX */
   4985 				for (i = 0; i < sc->sc_nqueues; i++) {
   4986 					wmq = &sc->sc_queue[i];
   4987 					qid = wmq->wmq_id;
   4988 					qintr_idx = wmq->wmq_intr_idx;
   4989 
   4990 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4991 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4992 					ivar |= __SHIFTIN((qintr_idx
   4993 						| IVAR_VALID),
   4994 					    IVAR_TX_MASK_Q(qid));
   4995 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4996 					ivar |= __SHIFTIN((qintr_idx
   4997 						| IVAR_VALID),
   4998 					    IVAR_RX_MASK_Q(qid));
   4999 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5000 				}
   5001 				break;
   5002 			case WM_T_82576:
   5003 				/* TX and RX */
   5004 				for (i = 0; i < sc->sc_nqueues; i++) {
   5005 					wmq = &sc->sc_queue[i];
   5006 					qid = wmq->wmq_id;
   5007 					qintr_idx = wmq->wmq_intr_idx;
   5008 
   5009 					ivar = CSR_READ(sc,
   5010 					    WMREG_IVAR_Q_82576(qid));
   5011 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5012 					ivar |= __SHIFTIN((qintr_idx
   5013 						| IVAR_VALID),
   5014 					    IVAR_TX_MASK_Q_82576(qid));
   5015 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5016 					ivar |= __SHIFTIN((qintr_idx
   5017 						| IVAR_VALID),
   5018 					    IVAR_RX_MASK_Q_82576(qid));
   5019 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5020 					    ivar);
   5021 				}
   5022 				break;
   5023 			default:
   5024 				break;
   5025 			}
   5026 
   5027 			/* Link status */
   5028 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5029 			    IVAR_MISC_OTHER);
   5030 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5031 		}
   5032 
   5033 		if (sc->sc_nqueues > 1) {
   5034 			wm_init_rss(sc);
   5035 
   5036 			/*
   5037 			** NOTE: Receive Full-Packet Checksum Offload
   5038 			** is mutually exclusive with Multiqueue. However
   5039 			** this is not the same as TCP/IP checksums which
   5040 			** still work.
   5041 			*/
   5042 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5043 			reg |= RXCSUM_PCSD;
   5044 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5045 		}
   5046 	}
   5047 
   5048 	/* Set up the interrupt registers. */
   5049 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5050 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5051 	    ICR_RXO | ICR_RXT0;
   5052 	if (sc->sc_nintrs > 1) {
   5053 		uint32_t mask;
   5054 		struct wm_queue *wmq;
   5055 
   5056 		switch (sc->sc_type) {
   5057 		case WM_T_82574:
   5058 			CSR_WRITE(sc, WMREG_EIAC_82574,
   5059 			    WMREG_EIAC_82574_MSIX_MASK);
   5060 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   5061 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5062 			break;
   5063 		default:
   5064 			if (sc->sc_type == WM_T_82575) {
   5065 				mask = 0;
   5066 				for (i = 0; i < sc->sc_nqueues; i++) {
   5067 					wmq = &sc->sc_queue[i];
   5068 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5069 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5070 				}
   5071 				mask |= EITR_OTHER;
   5072 			} else {
   5073 				mask = 0;
   5074 				for (i = 0; i < sc->sc_nqueues; i++) {
   5075 					wmq = &sc->sc_queue[i];
   5076 					mask |= 1 << wmq->wmq_intr_idx;
   5077 				}
   5078 				mask |= 1 << sc->sc_link_intr_idx;
   5079 			}
   5080 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5081 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5082 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5083 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5084 			break;
   5085 		}
   5086 	} else
   5087 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5088 
   5089 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5090 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5091 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5092 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   5093 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5094 		reg |= KABGTXD_BGSQLBIAS;
   5095 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5096 	}
   5097 
   5098 	/* Set up the inter-packet gap. */
   5099 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5100 
   5101 	if (sc->sc_type >= WM_T_82543) {
   5102 		/*
   5103 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   5104 		 * the multi queue function with MSI-X.
   5105 		 */
   5106 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5107 			int qidx;
   5108 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5109 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   5110 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
   5111 				    sc->sc_itr);
   5112 			}
   5113 			/*
   5114 			 * Link interrupts occur much less than TX
   5115 			 * interrupts and RX interrupts. So, we don't
   5116 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5117 			 * FreeBSD's if_igb.
   5118 			 */
   5119 		} else
   5120 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   5121 	}
   5122 
   5123 	/* Set the VLAN ethernetype. */
   5124 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5125 
   5126 	/*
   5127 	 * Set up the transmit control register; we start out with
   5128 	 * a collision distance suitable for FDX, but update it whe
   5129 	 * we resolve the media type.
   5130 	 */
   5131 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5132 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5133 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5134 	if (sc->sc_type >= WM_T_82571)
   5135 		sc->sc_tctl |= TCTL_MULR;
   5136 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5137 
   5138 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5139 		/* Write TDT after TCTL.EN is set. See the document. */
   5140 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5141 	}
   5142 
   5143 	if (sc->sc_type == WM_T_80003) {
   5144 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5145 		reg &= ~TCTL_EXT_GCEX_MASK;
   5146 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5147 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5148 	}
   5149 
   5150 	/* Set the media. */
   5151 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5152 		goto out;
   5153 
   5154 	/* Configure for OS presence */
   5155 	wm_init_manageability(sc);
   5156 
   5157 	/*
   5158 	 * Set up the receive control register; we actually program
   5159 	 * the register when we set the receive filter.  Use multicast
   5160 	 * address offset type 0.
   5161 	 *
   5162 	 * Only the i82544 has the ability to strip the incoming
   5163 	 * CRC, so we don't enable that feature.
   5164 	 */
   5165 	sc->sc_mchash_type = 0;
   5166 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5167 	    | RCTL_MO(sc->sc_mchash_type);
   5168 
   5169 	/*
   5170 	 * The I350 has a bug where it always strips the CRC whether
   5171 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5172 	 */
   5173 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5174 	    || (sc->sc_type == WM_T_I210))
   5175 		sc->sc_rctl |= RCTL_SECRC;
   5176 
   5177 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5178 	    && (ifp->if_mtu > ETHERMTU)) {
   5179 		sc->sc_rctl |= RCTL_LPE;
   5180 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5181 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5182 	}
   5183 
   5184 	if (MCLBYTES == 2048) {
   5185 		sc->sc_rctl |= RCTL_2k;
   5186 	} else {
   5187 		if (sc->sc_type >= WM_T_82543) {
   5188 			switch (MCLBYTES) {
   5189 			case 4096:
   5190 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5191 				break;
   5192 			case 8192:
   5193 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5194 				break;
   5195 			case 16384:
   5196 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5197 				break;
   5198 			default:
   5199 				panic("wm_init: MCLBYTES %d unsupported",
   5200 				    MCLBYTES);
   5201 				break;
   5202 			}
   5203 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5204 	}
   5205 
   5206 	/* Set the receive filter. */
   5207 	wm_set_filter(sc);
   5208 
   5209 	/* Enable ECC */
   5210 	switch (sc->sc_type) {
   5211 	case WM_T_82571:
   5212 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5213 		reg |= PBA_ECC_CORR_EN;
   5214 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5215 		break;
   5216 	case WM_T_PCH_LPT:
   5217 	case WM_T_PCH_SPT:
   5218 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5219 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5220 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5221 
   5222 		sc->sc_ctrl |= CTRL_MEHE;
   5223 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5224 		break;
   5225 	default:
   5226 		break;
   5227 	}
   5228 
   5229 	/* On 575 and later set RDT only if RX enabled */
   5230 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5231 		int qidx;
   5232 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5233 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5234 			for (i = 0; i < WM_NRXDESC; i++) {
   5235 				mutex_enter(rxq->rxq_lock);
   5236 				wm_init_rxdesc(rxq, i);
   5237 				mutex_exit(rxq->rxq_lock);
   5238 
   5239 			}
   5240 		}
   5241 	}
   5242 
   5243 	wm_turnon(sc);
   5244 
   5245 	/* Start the one second link check clock. */
   5246 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5247 
   5248 	/* ...all done! */
   5249 	ifp->if_flags |= IFF_RUNNING;
   5250 	ifp->if_flags &= ~IFF_OACTIVE;
   5251 
   5252  out:
   5253 	sc->sc_if_flags = ifp->if_flags;
   5254 	if (error)
   5255 		log(LOG_ERR, "%s: interface not running\n",
   5256 		    device_xname(sc->sc_dev));
   5257 	return error;
   5258 }
   5259 
   5260 /*
   5261  * wm_stop:		[ifnet interface function]
   5262  *
   5263  *	Stop transmission on the interface.
   5264  */
   5265 static void
   5266 wm_stop(struct ifnet *ifp, int disable)
   5267 {
   5268 	struct wm_softc *sc = ifp->if_softc;
   5269 
   5270 	WM_CORE_LOCK(sc);
   5271 	wm_stop_locked(ifp, disable);
   5272 	WM_CORE_UNLOCK(sc);
   5273 }
   5274 
   5275 static void
   5276 wm_stop_locked(struct ifnet *ifp, int disable)
   5277 {
   5278 	struct wm_softc *sc = ifp->if_softc;
   5279 	struct wm_txsoft *txs;
   5280 	int i, qidx;
   5281 
   5282 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5283 		device_xname(sc->sc_dev), __func__));
   5284 	KASSERT(WM_CORE_LOCKED(sc));
   5285 
   5286 	wm_turnoff(sc);
   5287 
   5288 	/* Stop the one second clock. */
   5289 	callout_stop(&sc->sc_tick_ch);
   5290 
   5291 	/* Stop the 82547 Tx FIFO stall check timer. */
   5292 	if (sc->sc_type == WM_T_82547)
   5293 		callout_stop(&sc->sc_txfifo_ch);
   5294 
   5295 	if (sc->sc_flags & WM_F_HAS_MII) {
   5296 		/* Down the MII. */
   5297 		mii_down(&sc->sc_mii);
   5298 	} else {
   5299 #if 0
   5300 		/* Should we clear PHY's status properly? */
   5301 		wm_reset(sc);
   5302 #endif
   5303 	}
   5304 
   5305 	/* Stop the transmit and receive processes. */
   5306 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5307 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5308 	sc->sc_rctl &= ~RCTL_EN;
   5309 
   5310 	/*
   5311 	 * Clear the interrupt mask to ensure the device cannot assert its
   5312 	 * interrupt line.
   5313 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5314 	 * service any currently pending or shared interrupt.
   5315 	 */
   5316 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5317 	sc->sc_icr = 0;
   5318 	if (sc->sc_nintrs > 1) {
   5319 		if (sc->sc_type != WM_T_82574) {
   5320 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5321 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5322 		} else
   5323 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5324 	}
   5325 
   5326 	/* Release any queued transmit buffers. */
   5327 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5328 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5329 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5330 		mutex_enter(txq->txq_lock);
   5331 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5332 			txs = &txq->txq_soft[i];
   5333 			if (txs->txs_mbuf != NULL) {
   5334 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5335 				m_freem(txs->txs_mbuf);
   5336 				txs->txs_mbuf = NULL;
   5337 			}
   5338 		}
   5339 		mutex_exit(txq->txq_lock);
   5340 	}
   5341 
   5342 	/* Mark the interface as down and cancel the watchdog timer. */
   5343 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5344 	ifp->if_timer = 0;
   5345 
   5346 	if (disable) {
   5347 		for (i = 0; i < sc->sc_nqueues; i++) {
   5348 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5349 			mutex_enter(rxq->rxq_lock);
   5350 			wm_rxdrain(rxq);
   5351 			mutex_exit(rxq->rxq_lock);
   5352 		}
   5353 	}
   5354 
   5355 #if 0 /* notyet */
   5356 	if (sc->sc_type >= WM_T_82544)
   5357 		CSR_WRITE(sc, WMREG_WUC, 0);
   5358 #endif
   5359 }
   5360 
   5361 static void
   5362 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5363 {
   5364 	struct mbuf *m;
   5365 	int i;
   5366 
   5367 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5368 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5369 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5370 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5371 		    m->m_data, m->m_len, m->m_flags);
   5372 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5373 	    i, i == 1 ? "" : "s");
   5374 }
   5375 
   5376 /*
   5377  * wm_82547_txfifo_stall:
   5378  *
   5379  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5380  *	reset the FIFO pointers, and restart packet transmission.
   5381  */
   5382 static void
   5383 wm_82547_txfifo_stall(void *arg)
   5384 {
   5385 	struct wm_softc *sc = arg;
   5386 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5387 
   5388 	mutex_enter(txq->txq_lock);
   5389 
   5390 	if (txq->txq_stopping)
   5391 		goto out;
   5392 
   5393 	if (txq->txq_fifo_stall) {
   5394 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5395 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5396 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5397 			/*
   5398 			 * Packets have drained.  Stop transmitter, reset
   5399 			 * FIFO pointers, restart transmitter, and kick
   5400 			 * the packet queue.
   5401 			 */
   5402 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5403 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5404 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5405 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5406 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5407 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5408 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5409 			CSR_WRITE_FLUSH(sc);
   5410 
   5411 			txq->txq_fifo_head = 0;
   5412 			txq->txq_fifo_stall = 0;
   5413 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5414 		} else {
   5415 			/*
   5416 			 * Still waiting for packets to drain; try again in
   5417 			 * another tick.
   5418 			 */
   5419 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5420 		}
   5421 	}
   5422 
   5423 out:
   5424 	mutex_exit(txq->txq_lock);
   5425 }
   5426 
   5427 /*
   5428  * wm_82547_txfifo_bugchk:
   5429  *
   5430  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5431  *	prevent enqueueing a packet that would wrap around the end
   5432  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5433  *
   5434  *	We do this by checking the amount of space before the end
   5435  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5436  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5437  *	the internal FIFO pointers to the beginning, and restart
   5438  *	transmission on the interface.
   5439  */
   5440 #define	WM_FIFO_HDR		0x10
   5441 #define	WM_82547_PAD_LEN	0x3e0
   5442 static int
   5443 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5444 {
   5445 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5446 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5447 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5448 
   5449 	/* Just return if already stalled. */
   5450 	if (txq->txq_fifo_stall)
   5451 		return 1;
   5452 
   5453 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5454 		/* Stall only occurs in half-duplex mode. */
   5455 		goto send_packet;
   5456 	}
   5457 
   5458 	if (len >= WM_82547_PAD_LEN + space) {
   5459 		txq->txq_fifo_stall = 1;
   5460 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5461 		return 1;
   5462 	}
   5463 
   5464  send_packet:
   5465 	txq->txq_fifo_head += len;
   5466 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5467 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5468 
   5469 	return 0;
   5470 }
   5471 
   5472 static int
   5473 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5474 {
   5475 	int error;
   5476 
   5477 	/*
   5478 	 * Allocate the control data structures, and create and load the
   5479 	 * DMA map for it.
   5480 	 *
   5481 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5482 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5483 	 * both sets within the same 4G segment.
   5484 	 */
   5485 	if (sc->sc_type < WM_T_82544)
   5486 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5487 	else
   5488 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5489 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5490 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5491 	else
   5492 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5493 
   5494 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5495 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5496 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5497 		aprint_error_dev(sc->sc_dev,
   5498 		    "unable to allocate TX control data, error = %d\n",
   5499 		    error);
   5500 		goto fail_0;
   5501 	}
   5502 
   5503 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5504 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5505 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5506 		aprint_error_dev(sc->sc_dev,
   5507 		    "unable to map TX control data, error = %d\n", error);
   5508 		goto fail_1;
   5509 	}
   5510 
   5511 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5512 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5513 		aprint_error_dev(sc->sc_dev,
   5514 		    "unable to create TX control data DMA map, error = %d\n",
   5515 		    error);
   5516 		goto fail_2;
   5517 	}
   5518 
   5519 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5520 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5521 		aprint_error_dev(sc->sc_dev,
   5522 		    "unable to load TX control data DMA map, error = %d\n",
   5523 		    error);
   5524 		goto fail_3;
   5525 	}
   5526 
   5527 	return 0;
   5528 
   5529  fail_3:
   5530 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5531  fail_2:
   5532 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5533 	    WM_TXDESCS_SIZE(txq));
   5534  fail_1:
   5535 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5536  fail_0:
   5537 	return error;
   5538 }
   5539 
   5540 static void
   5541 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5542 {
   5543 
   5544 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5545 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5546 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5547 	    WM_TXDESCS_SIZE(txq));
   5548 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5549 }
   5550 
   5551 static int
   5552 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5553 {
   5554 	int error;
   5555 
   5556 	/*
   5557 	 * Allocate the control data structures, and create and load the
   5558 	 * DMA map for it.
   5559 	 *
   5560 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5561 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5562 	 * both sets within the same 4G segment.
   5563 	 */
   5564 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5565 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
   5566 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5567 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5568 		aprint_error_dev(sc->sc_dev,
   5569 		    "unable to allocate RX control data, error = %d\n",
   5570 		    error);
   5571 		goto fail_0;
   5572 	}
   5573 
   5574 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5575 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5576 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5577 		aprint_error_dev(sc->sc_dev,
   5578 		    "unable to map RX control data, error = %d\n", error);
   5579 		goto fail_1;
   5580 	}
   5581 
   5582 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5583 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5584 		aprint_error_dev(sc->sc_dev,
   5585 		    "unable to create RX control data DMA map, error = %d\n",
   5586 		    error);
   5587 		goto fail_2;
   5588 	}
   5589 
   5590 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5591 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5592 		aprint_error_dev(sc->sc_dev,
   5593 		    "unable to load RX control data DMA map, error = %d\n",
   5594 		    error);
   5595 		goto fail_3;
   5596 	}
   5597 
   5598 	return 0;
   5599 
   5600  fail_3:
   5601 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5602  fail_2:
   5603 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5604 	    rxq->rxq_desc_size);
   5605  fail_1:
   5606 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5607  fail_0:
   5608 	return error;
   5609 }
   5610 
   5611 static void
   5612 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5613 {
   5614 
   5615 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5616 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5617 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5618 	    rxq->rxq_desc_size);
   5619 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5620 }
   5621 
   5622 
   5623 static int
   5624 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5625 {
   5626 	int i, error;
   5627 
   5628 	/* Create the transmit buffer DMA maps. */
   5629 	WM_TXQUEUELEN(txq) =
   5630 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5631 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5632 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5633 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5634 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5635 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5636 			aprint_error_dev(sc->sc_dev,
   5637 			    "unable to create Tx DMA map %d, error = %d\n",
   5638 			    i, error);
   5639 			goto fail;
   5640 		}
   5641 	}
   5642 
   5643 	return 0;
   5644 
   5645  fail:
   5646 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5647 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5648 			bus_dmamap_destroy(sc->sc_dmat,
   5649 			    txq->txq_soft[i].txs_dmamap);
   5650 	}
   5651 	return error;
   5652 }
   5653 
   5654 static void
   5655 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5656 {
   5657 	int i;
   5658 
   5659 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5660 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5661 			bus_dmamap_destroy(sc->sc_dmat,
   5662 			    txq->txq_soft[i].txs_dmamap);
   5663 	}
   5664 }
   5665 
   5666 static int
   5667 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5668 {
   5669 	int i, error;
   5670 
   5671 	/* Create the receive buffer DMA maps. */
   5672 	for (i = 0; i < WM_NRXDESC; i++) {
   5673 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5674 			    MCLBYTES, 0, 0,
   5675 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5676 			aprint_error_dev(sc->sc_dev,
   5677 			    "unable to create Rx DMA map %d error = %d\n",
   5678 			    i, error);
   5679 			goto fail;
   5680 		}
   5681 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5682 	}
   5683 
   5684 	return 0;
   5685 
   5686  fail:
   5687 	for (i = 0; i < WM_NRXDESC; i++) {
   5688 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5689 			bus_dmamap_destroy(sc->sc_dmat,
   5690 			    rxq->rxq_soft[i].rxs_dmamap);
   5691 	}
   5692 	return error;
   5693 }
   5694 
   5695 static void
   5696 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5697 {
   5698 	int i;
   5699 
   5700 	for (i = 0; i < WM_NRXDESC; i++) {
   5701 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5702 			bus_dmamap_destroy(sc->sc_dmat,
   5703 			    rxq->rxq_soft[i].rxs_dmamap);
   5704 	}
   5705 }
   5706 
   5707 /*
   5708  * wm_alloc_quques:
   5709  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5710  */
   5711 static int
   5712 wm_alloc_txrx_queues(struct wm_softc *sc)
   5713 {
   5714 	int i, error, tx_done, rx_done;
   5715 
   5716 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5717 	    KM_SLEEP);
   5718 	if (sc->sc_queue == NULL) {
   5719 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5720 		error = ENOMEM;
   5721 		goto fail_0;
   5722 	}
   5723 
   5724 	/*
   5725 	 * For transmission
   5726 	 */
   5727 	error = 0;
   5728 	tx_done = 0;
   5729 	for (i = 0; i < sc->sc_nqueues; i++) {
   5730 #ifdef WM_EVENT_COUNTERS
   5731 		int j;
   5732 		const char *xname;
   5733 #endif
   5734 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5735 		txq->txq_sc = sc;
   5736 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5737 
   5738 		error = wm_alloc_tx_descs(sc, txq);
   5739 		if (error)
   5740 			break;
   5741 		error = wm_alloc_tx_buffer(sc, txq);
   5742 		if (error) {
   5743 			wm_free_tx_descs(sc, txq);
   5744 			break;
   5745 		}
   5746 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5747 		if (txq->txq_interq == NULL) {
   5748 			wm_free_tx_descs(sc, txq);
   5749 			wm_free_tx_buffer(sc, txq);
   5750 			error = ENOMEM;
   5751 			break;
   5752 		}
   5753 
   5754 #ifdef WM_EVENT_COUNTERS
   5755 		xname = device_xname(sc->sc_dev);
   5756 
   5757 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5758 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5759 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5760 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5761 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5762 
   5763 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5764 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5765 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5766 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5767 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5768 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5769 
   5770 		for (j = 0; j < WM_NTXSEGS; j++) {
   5771 			snprintf(txq->txq_txseg_evcnt_names[j],
   5772 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   5773 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   5774 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   5775 		}
   5776 
   5777 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   5778 
   5779 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   5780 #endif /* WM_EVENT_COUNTERS */
   5781 
   5782 		tx_done++;
   5783 	}
   5784 	if (error)
   5785 		goto fail_1;
   5786 
   5787 	/*
   5788 	 * For recieve
   5789 	 */
   5790 	error = 0;
   5791 	rx_done = 0;
   5792 	for (i = 0; i < sc->sc_nqueues; i++) {
   5793 #ifdef WM_EVENT_COUNTERS
   5794 		const char *xname;
   5795 #endif
   5796 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5797 		rxq->rxq_sc = sc;
   5798 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5799 
   5800 		error = wm_alloc_rx_descs(sc, rxq);
   5801 		if (error)
   5802 			break;
   5803 
   5804 		error = wm_alloc_rx_buffer(sc, rxq);
   5805 		if (error) {
   5806 			wm_free_rx_descs(sc, rxq);
   5807 			break;
   5808 		}
   5809 
   5810 #ifdef WM_EVENT_COUNTERS
   5811 		xname = device_xname(sc->sc_dev);
   5812 
   5813 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   5814 
   5815 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   5816 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   5817 #endif /* WM_EVENT_COUNTERS */
   5818 
   5819 		rx_done++;
   5820 	}
   5821 	if (error)
   5822 		goto fail_2;
   5823 
   5824 	return 0;
   5825 
   5826  fail_2:
   5827 	for (i = 0; i < rx_done; i++) {
   5828 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5829 		wm_free_rx_buffer(sc, rxq);
   5830 		wm_free_rx_descs(sc, rxq);
   5831 		if (rxq->rxq_lock)
   5832 			mutex_obj_free(rxq->rxq_lock);
   5833 	}
   5834  fail_1:
   5835 	for (i = 0; i < tx_done; i++) {
   5836 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5837 		pcq_destroy(txq->txq_interq);
   5838 		wm_free_tx_buffer(sc, txq);
   5839 		wm_free_tx_descs(sc, txq);
   5840 		if (txq->txq_lock)
   5841 			mutex_obj_free(txq->txq_lock);
   5842 	}
   5843 
   5844 	kmem_free(sc->sc_queue,
   5845 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   5846  fail_0:
   5847 	return error;
   5848 }
   5849 
   5850 /*
   5851  * wm_free_quques:
   5852  *	Free {tx,rx}descs and {tx,rx} buffers
   5853  */
   5854 static void
   5855 wm_free_txrx_queues(struct wm_softc *sc)
   5856 {
   5857 	int i;
   5858 
   5859 	for (i = 0; i < sc->sc_nqueues; i++) {
   5860 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5861 		wm_free_rx_buffer(sc, rxq);
   5862 		wm_free_rx_descs(sc, rxq);
   5863 		if (rxq->rxq_lock)
   5864 			mutex_obj_free(rxq->rxq_lock);
   5865 	}
   5866 
   5867 	for (i = 0; i < sc->sc_nqueues; i++) {
   5868 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5869 		wm_free_tx_buffer(sc, txq);
   5870 		wm_free_tx_descs(sc, txq);
   5871 		if (txq->txq_lock)
   5872 			mutex_obj_free(txq->txq_lock);
   5873 	}
   5874 
   5875 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   5876 }
   5877 
   5878 static void
   5879 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5880 {
   5881 
   5882 	KASSERT(mutex_owned(txq->txq_lock));
   5883 
   5884 	/* Initialize the transmit descriptor ring. */
   5885 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5886 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5887 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5888 	txq->txq_free = WM_NTXDESC(txq);
   5889 	txq->txq_next = 0;
   5890 }
   5891 
   5892 static void
   5893 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5894     struct wm_txqueue *txq)
   5895 {
   5896 
   5897 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5898 		device_xname(sc->sc_dev), __func__));
   5899 	KASSERT(mutex_owned(txq->txq_lock));
   5900 
   5901 	if (sc->sc_type < WM_T_82543) {
   5902 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5903 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5904 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5905 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5906 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5907 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5908 	} else {
   5909 		int qid = wmq->wmq_id;
   5910 
   5911 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5912 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5913 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5914 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5915 
   5916 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5917 			/*
   5918 			 * Don't write TDT before TCTL.EN is set.
   5919 			 * See the document.
   5920 			 */
   5921 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5922 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5923 			    | TXDCTL_WTHRESH(0));
   5924 		else {
   5925 			/* ITR / 4 */
   5926 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5927 			if (sc->sc_type >= WM_T_82540) {
   5928 				/* should be same */
   5929 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5930 			}
   5931 
   5932 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5933 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5934 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5935 		}
   5936 	}
   5937 }
   5938 
   5939 static void
   5940 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5941 {
   5942 	int i;
   5943 
   5944 	KASSERT(mutex_owned(txq->txq_lock));
   5945 
   5946 	/* Initialize the transmit job descriptors. */
   5947 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5948 		txq->txq_soft[i].txs_mbuf = NULL;
   5949 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5950 	txq->txq_snext = 0;
   5951 	txq->txq_sdirty = 0;
   5952 }
   5953 
   5954 static void
   5955 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5956     struct wm_txqueue *txq)
   5957 {
   5958 
   5959 	KASSERT(mutex_owned(txq->txq_lock));
   5960 
   5961 	/*
   5962 	 * Set up some register offsets that are different between
   5963 	 * the i82542 and the i82543 and later chips.
   5964 	 */
   5965 	if (sc->sc_type < WM_T_82543)
   5966 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5967 	else
   5968 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   5969 
   5970 	wm_init_tx_descs(sc, txq);
   5971 	wm_init_tx_regs(sc, wmq, txq);
   5972 	wm_init_tx_buffer(sc, txq);
   5973 }
   5974 
   5975 static void
   5976 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5977     struct wm_rxqueue *rxq)
   5978 {
   5979 
   5980 	KASSERT(mutex_owned(rxq->rxq_lock));
   5981 
   5982 	/*
   5983 	 * Initialize the receive descriptor and receive job
   5984 	 * descriptor rings.
   5985 	 */
   5986 	if (sc->sc_type < WM_T_82543) {
   5987 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5988 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5989 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5990 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5991 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5992 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5993 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5994 
   5995 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5996 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5997 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5998 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5999 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6000 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6001 	} else {
   6002 		int qid = wmq->wmq_id;
   6003 
   6004 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6005 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6006 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   6007 
   6008 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6009 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6010 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   6011 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   6012 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6013 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6014 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6015 			    | RXDCTL_WTHRESH(1));
   6016 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6017 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6018 		} else {
   6019 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6020 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6021 			/* ITR / 4 */
   6022 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   6023 			/* MUST be same */
   6024 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   6025 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6026 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6027 		}
   6028 	}
   6029 }
   6030 
   6031 static int
   6032 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6033 {
   6034 	struct wm_rxsoft *rxs;
   6035 	int error, i;
   6036 
   6037 	KASSERT(mutex_owned(rxq->rxq_lock));
   6038 
   6039 	for (i = 0; i < WM_NRXDESC; i++) {
   6040 		rxs = &rxq->rxq_soft[i];
   6041 		if (rxs->rxs_mbuf == NULL) {
   6042 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6043 				log(LOG_ERR, "%s: unable to allocate or map "
   6044 				    "rx buffer %d, error = %d\n",
   6045 				    device_xname(sc->sc_dev), i, error);
   6046 				/*
   6047 				 * XXX Should attempt to run with fewer receive
   6048 				 * XXX buffers instead of just failing.
   6049 				 */
   6050 				wm_rxdrain(rxq);
   6051 				return ENOMEM;
   6052 			}
   6053 		} else {
   6054 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6055 				wm_init_rxdesc(rxq, i);
   6056 			/*
   6057 			 * For 82575 and newer device, the RX descriptors
   6058 			 * must be initialized after the setting of RCTL.EN in
   6059 			 * wm_set_filter()
   6060 			 */
   6061 		}
   6062 	}
   6063 	rxq->rxq_ptr = 0;
   6064 	rxq->rxq_discard = 0;
   6065 	WM_RXCHAIN_RESET(rxq);
   6066 
   6067 	return 0;
   6068 }
   6069 
   6070 static int
   6071 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6072     struct wm_rxqueue *rxq)
   6073 {
   6074 
   6075 	KASSERT(mutex_owned(rxq->rxq_lock));
   6076 
   6077 	/*
   6078 	 * Set up some register offsets that are different between
   6079 	 * the i82542 and the i82543 and later chips.
   6080 	 */
   6081 	if (sc->sc_type < WM_T_82543)
   6082 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6083 	else
   6084 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6085 
   6086 	wm_init_rx_regs(sc, wmq, rxq);
   6087 	return wm_init_rx_buffer(sc, rxq);
   6088 }
   6089 
   6090 /*
   6091  * wm_init_quques:
   6092  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6093  */
   6094 static int
   6095 wm_init_txrx_queues(struct wm_softc *sc)
   6096 {
   6097 	int i, error = 0;
   6098 
   6099 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6100 		device_xname(sc->sc_dev), __func__));
   6101 
   6102 	for (i = 0; i < sc->sc_nqueues; i++) {
   6103 		struct wm_queue *wmq = &sc->sc_queue[i];
   6104 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6105 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6106 
   6107 		mutex_enter(txq->txq_lock);
   6108 		wm_init_tx_queue(sc, wmq, txq);
   6109 		mutex_exit(txq->txq_lock);
   6110 
   6111 		mutex_enter(rxq->rxq_lock);
   6112 		error = wm_init_rx_queue(sc, wmq, rxq);
   6113 		mutex_exit(rxq->rxq_lock);
   6114 		if (error)
   6115 			break;
   6116 	}
   6117 
   6118 	return error;
   6119 }
   6120 
   6121 /*
   6122  * wm_tx_offload:
   6123  *
   6124  *	Set up TCP/IP checksumming parameters for the
   6125  *	specified packet.
   6126  */
   6127 static int
   6128 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   6129     uint8_t *fieldsp)
   6130 {
   6131 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6132 	struct mbuf *m0 = txs->txs_mbuf;
   6133 	struct livengood_tcpip_ctxdesc *t;
   6134 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6135 	uint32_t ipcse;
   6136 	struct ether_header *eh;
   6137 	int offset, iphl;
   6138 	uint8_t fields;
   6139 
   6140 	/*
   6141 	 * XXX It would be nice if the mbuf pkthdr had offset
   6142 	 * fields for the protocol headers.
   6143 	 */
   6144 
   6145 	eh = mtod(m0, struct ether_header *);
   6146 	switch (htons(eh->ether_type)) {
   6147 	case ETHERTYPE_IP:
   6148 	case ETHERTYPE_IPV6:
   6149 		offset = ETHER_HDR_LEN;
   6150 		break;
   6151 
   6152 	case ETHERTYPE_VLAN:
   6153 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6154 		break;
   6155 
   6156 	default:
   6157 		/*
   6158 		 * Don't support this protocol or encapsulation.
   6159 		 */
   6160 		*fieldsp = 0;
   6161 		*cmdp = 0;
   6162 		return 0;
   6163 	}
   6164 
   6165 	if ((m0->m_pkthdr.csum_flags &
   6166 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   6167 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6168 	} else {
   6169 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6170 	}
   6171 	ipcse = offset + iphl - 1;
   6172 
   6173 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6174 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6175 	seg = 0;
   6176 	fields = 0;
   6177 
   6178 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6179 		int hlen = offset + iphl;
   6180 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6181 
   6182 		if (__predict_false(m0->m_len <
   6183 				    (hlen + sizeof(struct tcphdr)))) {
   6184 			/*
   6185 			 * TCP/IP headers are not in the first mbuf; we need
   6186 			 * to do this the slow and painful way.  Let's just
   6187 			 * hope this doesn't happen very often.
   6188 			 */
   6189 			struct tcphdr th;
   6190 
   6191 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6192 
   6193 			m_copydata(m0, hlen, sizeof(th), &th);
   6194 			if (v4) {
   6195 				struct ip ip;
   6196 
   6197 				m_copydata(m0, offset, sizeof(ip), &ip);
   6198 				ip.ip_len = 0;
   6199 				m_copyback(m0,
   6200 				    offset + offsetof(struct ip, ip_len),
   6201 				    sizeof(ip.ip_len), &ip.ip_len);
   6202 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6203 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6204 			} else {
   6205 				struct ip6_hdr ip6;
   6206 
   6207 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6208 				ip6.ip6_plen = 0;
   6209 				m_copyback(m0,
   6210 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6211 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6212 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6213 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6214 			}
   6215 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6216 			    sizeof(th.th_sum), &th.th_sum);
   6217 
   6218 			hlen += th.th_off << 2;
   6219 		} else {
   6220 			/*
   6221 			 * TCP/IP headers are in the first mbuf; we can do
   6222 			 * this the easy way.
   6223 			 */
   6224 			struct tcphdr *th;
   6225 
   6226 			if (v4) {
   6227 				struct ip *ip =
   6228 				    (void *)(mtod(m0, char *) + offset);
   6229 				th = (void *)(mtod(m0, char *) + hlen);
   6230 
   6231 				ip->ip_len = 0;
   6232 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6233 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6234 			} else {
   6235 				struct ip6_hdr *ip6 =
   6236 				    (void *)(mtod(m0, char *) + offset);
   6237 				th = (void *)(mtod(m0, char *) + hlen);
   6238 
   6239 				ip6->ip6_plen = 0;
   6240 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6241 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6242 			}
   6243 			hlen += th->th_off << 2;
   6244 		}
   6245 
   6246 		if (v4) {
   6247 			WM_Q_EVCNT_INCR(txq, txtso);
   6248 			cmdlen |= WTX_TCPIP_CMD_IP;
   6249 		} else {
   6250 			WM_Q_EVCNT_INCR(txq, txtso6);
   6251 			ipcse = 0;
   6252 		}
   6253 		cmd |= WTX_TCPIP_CMD_TSE;
   6254 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6255 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6256 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6257 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6258 	}
   6259 
   6260 	/*
   6261 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6262 	 * offload feature, if we load the context descriptor, we
   6263 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6264 	 */
   6265 
   6266 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6267 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6268 	    WTX_TCPIP_IPCSE(ipcse);
   6269 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6270 		WM_Q_EVCNT_INCR(txq, txipsum);
   6271 		fields |= WTX_IXSM;
   6272 	}
   6273 
   6274 	offset += iphl;
   6275 
   6276 	if (m0->m_pkthdr.csum_flags &
   6277 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6278 		WM_Q_EVCNT_INCR(txq, txtusum);
   6279 		fields |= WTX_TXSM;
   6280 		tucs = WTX_TCPIP_TUCSS(offset) |
   6281 		    WTX_TCPIP_TUCSO(offset +
   6282 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6283 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6284 	} else if ((m0->m_pkthdr.csum_flags &
   6285 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6286 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6287 		fields |= WTX_TXSM;
   6288 		tucs = WTX_TCPIP_TUCSS(offset) |
   6289 		    WTX_TCPIP_TUCSO(offset +
   6290 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6291 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6292 	} else {
   6293 		/* Just initialize it to a valid TCP context. */
   6294 		tucs = WTX_TCPIP_TUCSS(offset) |
   6295 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6296 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6297 	}
   6298 
   6299 	/* Fill in the context descriptor. */
   6300 	t = (struct livengood_tcpip_ctxdesc *)
   6301 	    &txq->txq_descs[txq->txq_next];
   6302 	t->tcpip_ipcs = htole32(ipcs);
   6303 	t->tcpip_tucs = htole32(tucs);
   6304 	t->tcpip_cmdlen = htole32(cmdlen);
   6305 	t->tcpip_seg = htole32(seg);
   6306 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6307 
   6308 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6309 	txs->txs_ndesc++;
   6310 
   6311 	*cmdp = cmd;
   6312 	*fieldsp = fields;
   6313 
   6314 	return 0;
   6315 }
   6316 
   6317 static inline int
   6318 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6319 {
   6320 	struct wm_softc *sc = ifp->if_softc;
   6321 	u_int cpuid = cpu_index(curcpu());
   6322 
   6323 	/*
   6324 	 * Currently, simple distribute strategy.
   6325 	 * TODO:
   6326 	 * destribute by flowid(RSS has value).
   6327 	 */
   6328 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
   6329 }
   6330 
   6331 /*
   6332  * wm_start:		[ifnet interface function]
   6333  *
   6334  *	Start packet transmission on the interface.
   6335  */
   6336 static void
   6337 wm_start(struct ifnet *ifp)
   6338 {
   6339 	struct wm_softc *sc = ifp->if_softc;
   6340 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6341 
   6342 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6343 
   6344 	/*
   6345 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6346 	 */
   6347 
   6348 	mutex_enter(txq->txq_lock);
   6349 	if (!txq->txq_stopping)
   6350 		wm_start_locked(ifp);
   6351 	mutex_exit(txq->txq_lock);
   6352 }
   6353 
   6354 static void
   6355 wm_start_locked(struct ifnet *ifp)
   6356 {
   6357 	struct wm_softc *sc = ifp->if_softc;
   6358 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6359 
   6360 	wm_send_common_locked(ifp, txq, false);
   6361 }
   6362 
   6363 static int
   6364 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6365 {
   6366 	int qid;
   6367 	struct wm_softc *sc = ifp->if_softc;
   6368 	struct wm_txqueue *txq;
   6369 
   6370 	qid = wm_select_txqueue(ifp, m);
   6371 	txq = &sc->sc_queue[qid].wmq_txq;
   6372 
   6373 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6374 		m_freem(m);
   6375 		WM_Q_EVCNT_INCR(txq, txdrop);
   6376 		return ENOBUFS;
   6377 	}
   6378 
   6379 	/*
   6380 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6381 	 */
   6382 	ifp->if_obytes += m->m_pkthdr.len;
   6383 	if (m->m_flags & M_MCAST)
   6384 		ifp->if_omcasts++;
   6385 
   6386 	if (mutex_tryenter(txq->txq_lock)) {
   6387 		if (!txq->txq_stopping)
   6388 			wm_transmit_locked(ifp, txq);
   6389 		mutex_exit(txq->txq_lock);
   6390 	}
   6391 
   6392 	return 0;
   6393 }
   6394 
   6395 static void
   6396 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6397 {
   6398 
   6399 	wm_send_common_locked(ifp, txq, true);
   6400 }
   6401 
   6402 static void
   6403 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6404     bool is_transmit)
   6405 {
   6406 	struct wm_softc *sc = ifp->if_softc;
   6407 	struct mbuf *m0;
   6408 	struct m_tag *mtag;
   6409 	struct wm_txsoft *txs;
   6410 	bus_dmamap_t dmamap;
   6411 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6412 	bus_addr_t curaddr;
   6413 	bus_size_t seglen, curlen;
   6414 	uint32_t cksumcmd;
   6415 	uint8_t cksumfields;
   6416 
   6417 	KASSERT(mutex_owned(txq->txq_lock));
   6418 
   6419 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6420 		return;
   6421 
   6422 	/* Remember the previous number of free descriptors. */
   6423 	ofree = txq->txq_free;
   6424 
   6425 	/*
   6426 	 * Loop through the send queue, setting up transmit descriptors
   6427 	 * until we drain the queue, or use up all available transmit
   6428 	 * descriptors.
   6429 	 */
   6430 	for (;;) {
   6431 		m0 = NULL;
   6432 
   6433 		/* Get a work queue entry. */
   6434 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6435 			wm_txeof(sc, txq);
   6436 			if (txq->txq_sfree == 0) {
   6437 				DPRINTF(WM_DEBUG_TX,
   6438 				    ("%s: TX: no free job descriptors\n",
   6439 					device_xname(sc->sc_dev)));
   6440 				WM_Q_EVCNT_INCR(txq, txsstall);
   6441 				break;
   6442 			}
   6443 		}
   6444 
   6445 		/* Grab a packet off the queue. */
   6446 		if (is_transmit)
   6447 			m0 = pcq_get(txq->txq_interq);
   6448 		else
   6449 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6450 		if (m0 == NULL)
   6451 			break;
   6452 
   6453 		DPRINTF(WM_DEBUG_TX,
   6454 		    ("%s: TX: have packet to transmit: %p\n",
   6455 		    device_xname(sc->sc_dev), m0));
   6456 
   6457 		txs = &txq->txq_soft[txq->txq_snext];
   6458 		dmamap = txs->txs_dmamap;
   6459 
   6460 		use_tso = (m0->m_pkthdr.csum_flags &
   6461 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6462 
   6463 		/*
   6464 		 * So says the Linux driver:
   6465 		 * The controller does a simple calculation to make sure
   6466 		 * there is enough room in the FIFO before initiating the
   6467 		 * DMA for each buffer.  The calc is:
   6468 		 *	4 = ceil(buffer len / MSS)
   6469 		 * To make sure we don't overrun the FIFO, adjust the max
   6470 		 * buffer len if the MSS drops.
   6471 		 */
   6472 		dmamap->dm_maxsegsz =
   6473 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6474 		    ? m0->m_pkthdr.segsz << 2
   6475 		    : WTX_MAX_LEN;
   6476 
   6477 		/*
   6478 		 * Load the DMA map.  If this fails, the packet either
   6479 		 * didn't fit in the allotted number of segments, or we
   6480 		 * were short on resources.  For the too-many-segments
   6481 		 * case, we simply report an error and drop the packet,
   6482 		 * since we can't sanely copy a jumbo packet to a single
   6483 		 * buffer.
   6484 		 */
   6485 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6486 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6487 		if (error) {
   6488 			if (error == EFBIG) {
   6489 				WM_Q_EVCNT_INCR(txq, txdrop);
   6490 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6491 				    "DMA segments, dropping...\n",
   6492 				    device_xname(sc->sc_dev));
   6493 				wm_dump_mbuf_chain(sc, m0);
   6494 				m_freem(m0);
   6495 				continue;
   6496 			}
   6497 			/*  Short on resources, just stop for now. */
   6498 			DPRINTF(WM_DEBUG_TX,
   6499 			    ("%s: TX: dmamap load failed: %d\n",
   6500 			    device_xname(sc->sc_dev), error));
   6501 			break;
   6502 		}
   6503 
   6504 		segs_needed = dmamap->dm_nsegs;
   6505 		if (use_tso) {
   6506 			/* For sentinel descriptor; see below. */
   6507 			segs_needed++;
   6508 		}
   6509 
   6510 		/*
   6511 		 * Ensure we have enough descriptors free to describe
   6512 		 * the packet.  Note, we always reserve one descriptor
   6513 		 * at the end of the ring due to the semantics of the
   6514 		 * TDT register, plus one more in the event we need
   6515 		 * to load offload context.
   6516 		 */
   6517 		if (segs_needed > txq->txq_free - 2) {
   6518 			/*
   6519 			 * Not enough free descriptors to transmit this
   6520 			 * packet.  We haven't committed anything yet,
   6521 			 * so just unload the DMA map, put the packet
   6522 			 * pack on the queue, and punt.  Notify the upper
   6523 			 * layer that there are no more slots left.
   6524 			 */
   6525 			DPRINTF(WM_DEBUG_TX,
   6526 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6527 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6528 			    segs_needed, txq->txq_free - 1));
   6529 			ifp->if_flags |= IFF_OACTIVE;
   6530 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6531 			WM_Q_EVCNT_INCR(txq, txdstall);
   6532 			break;
   6533 		}
   6534 
   6535 		/*
   6536 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6537 		 * once we know we can transmit the packet, since we
   6538 		 * do some internal FIFO space accounting here.
   6539 		 */
   6540 		if (sc->sc_type == WM_T_82547 &&
   6541 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6542 			DPRINTF(WM_DEBUG_TX,
   6543 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6544 			    device_xname(sc->sc_dev)));
   6545 			ifp->if_flags |= IFF_OACTIVE;
   6546 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6547 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6548 			break;
   6549 		}
   6550 
   6551 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6552 
   6553 		DPRINTF(WM_DEBUG_TX,
   6554 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6555 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6556 
   6557 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6558 
   6559 		/*
   6560 		 * Store a pointer to the packet so that we can free it
   6561 		 * later.
   6562 		 *
   6563 		 * Initially, we consider the number of descriptors the
   6564 		 * packet uses the number of DMA segments.  This may be
   6565 		 * incremented by 1 if we do checksum offload (a descriptor
   6566 		 * is used to set the checksum context).
   6567 		 */
   6568 		txs->txs_mbuf = m0;
   6569 		txs->txs_firstdesc = txq->txq_next;
   6570 		txs->txs_ndesc = segs_needed;
   6571 
   6572 		/* Set up offload parameters for this packet. */
   6573 		if (m0->m_pkthdr.csum_flags &
   6574 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6575 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6576 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6577 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6578 					  &cksumfields) != 0) {
   6579 				/* Error message already displayed. */
   6580 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6581 				continue;
   6582 			}
   6583 		} else {
   6584 			cksumcmd = 0;
   6585 			cksumfields = 0;
   6586 		}
   6587 
   6588 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6589 
   6590 		/* Sync the DMA map. */
   6591 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6592 		    BUS_DMASYNC_PREWRITE);
   6593 
   6594 		/* Initialize the transmit descriptor. */
   6595 		for (nexttx = txq->txq_next, seg = 0;
   6596 		     seg < dmamap->dm_nsegs; seg++) {
   6597 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6598 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6599 			     seglen != 0;
   6600 			     curaddr += curlen, seglen -= curlen,
   6601 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6602 				curlen = seglen;
   6603 
   6604 				/*
   6605 				 * So says the Linux driver:
   6606 				 * Work around for premature descriptor
   6607 				 * write-backs in TSO mode.  Append a
   6608 				 * 4-byte sentinel descriptor.
   6609 				 */
   6610 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6611 				    curlen > 8)
   6612 					curlen -= 4;
   6613 
   6614 				wm_set_dma_addr(
   6615 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6616 				txq->txq_descs[nexttx].wtx_cmdlen
   6617 				    = htole32(cksumcmd | curlen);
   6618 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6619 				    = 0;
   6620 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6621 				    = cksumfields;
   6622 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6623 				lasttx = nexttx;
   6624 
   6625 				DPRINTF(WM_DEBUG_TX,
   6626 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6627 				     "len %#04zx\n",
   6628 				    device_xname(sc->sc_dev), nexttx,
   6629 				    (uint64_t)curaddr, curlen));
   6630 			}
   6631 		}
   6632 
   6633 		KASSERT(lasttx != -1);
   6634 
   6635 		/*
   6636 		 * Set up the command byte on the last descriptor of
   6637 		 * the packet.  If we're in the interrupt delay window,
   6638 		 * delay the interrupt.
   6639 		 */
   6640 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6641 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6642 
   6643 		/*
   6644 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6645 		 * up the descriptor to encapsulate the packet for us.
   6646 		 *
   6647 		 * This is only valid on the last descriptor of the packet.
   6648 		 */
   6649 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6650 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6651 			    htole32(WTX_CMD_VLE);
   6652 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6653 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6654 		}
   6655 
   6656 		txs->txs_lastdesc = lasttx;
   6657 
   6658 		DPRINTF(WM_DEBUG_TX,
   6659 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6660 		    device_xname(sc->sc_dev),
   6661 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6662 
   6663 		/* Sync the descriptors we're using. */
   6664 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6665 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6666 
   6667 		/* Give the packet to the chip. */
   6668 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6669 
   6670 		DPRINTF(WM_DEBUG_TX,
   6671 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6672 
   6673 		DPRINTF(WM_DEBUG_TX,
   6674 		    ("%s: TX: finished transmitting packet, job %d\n",
   6675 		    device_xname(sc->sc_dev), txq->txq_snext));
   6676 
   6677 		/* Advance the tx pointer. */
   6678 		txq->txq_free -= txs->txs_ndesc;
   6679 		txq->txq_next = nexttx;
   6680 
   6681 		txq->txq_sfree--;
   6682 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6683 
   6684 		/* Pass the packet to any BPF listeners. */
   6685 		bpf_mtap(ifp, m0);
   6686 	}
   6687 
   6688 	if (m0 != NULL) {
   6689 		ifp->if_flags |= IFF_OACTIVE;
   6690 		WM_Q_EVCNT_INCR(txq, txdrop);
   6691 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6692 			__func__));
   6693 		m_freem(m0);
   6694 	}
   6695 
   6696 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6697 		/* No more slots; notify upper layer. */
   6698 		ifp->if_flags |= IFF_OACTIVE;
   6699 	}
   6700 
   6701 	if (txq->txq_free != ofree) {
   6702 		/* Set a watchdog timer in case the chip flakes out. */
   6703 		ifp->if_timer = 5;
   6704 	}
   6705 }
   6706 
   6707 /*
   6708  * wm_nq_tx_offload:
   6709  *
   6710  *	Set up TCP/IP checksumming parameters for the
   6711  *	specified packet, for NEWQUEUE devices
   6712  */
   6713 static int
   6714 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6715     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6716 {
   6717 	struct mbuf *m0 = txs->txs_mbuf;
   6718 	struct m_tag *mtag;
   6719 	uint32_t vl_len, mssidx, cmdc;
   6720 	struct ether_header *eh;
   6721 	int offset, iphl;
   6722 
   6723 	/*
   6724 	 * XXX It would be nice if the mbuf pkthdr had offset
   6725 	 * fields for the protocol headers.
   6726 	 */
   6727 	*cmdlenp = 0;
   6728 	*fieldsp = 0;
   6729 
   6730 	eh = mtod(m0, struct ether_header *);
   6731 	switch (htons(eh->ether_type)) {
   6732 	case ETHERTYPE_IP:
   6733 	case ETHERTYPE_IPV6:
   6734 		offset = ETHER_HDR_LEN;
   6735 		break;
   6736 
   6737 	case ETHERTYPE_VLAN:
   6738 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6739 		break;
   6740 
   6741 	default:
   6742 		/* Don't support this protocol or encapsulation. */
   6743 		*do_csum = false;
   6744 		return 0;
   6745 	}
   6746 	*do_csum = true;
   6747 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6748 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6749 
   6750 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6751 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6752 
   6753 	if ((m0->m_pkthdr.csum_flags &
   6754 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6755 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6756 	} else {
   6757 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6758 	}
   6759 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6760 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6761 
   6762 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6763 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6764 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6765 		*cmdlenp |= NQTX_CMD_VLE;
   6766 	}
   6767 
   6768 	mssidx = 0;
   6769 
   6770 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6771 		int hlen = offset + iphl;
   6772 		int tcp_hlen;
   6773 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6774 
   6775 		if (__predict_false(m0->m_len <
   6776 				    (hlen + sizeof(struct tcphdr)))) {
   6777 			/*
   6778 			 * TCP/IP headers are not in the first mbuf; we need
   6779 			 * to do this the slow and painful way.  Let's just
   6780 			 * hope this doesn't happen very often.
   6781 			 */
   6782 			struct tcphdr th;
   6783 
   6784 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6785 
   6786 			m_copydata(m0, hlen, sizeof(th), &th);
   6787 			if (v4) {
   6788 				struct ip ip;
   6789 
   6790 				m_copydata(m0, offset, sizeof(ip), &ip);
   6791 				ip.ip_len = 0;
   6792 				m_copyback(m0,
   6793 				    offset + offsetof(struct ip, ip_len),
   6794 				    sizeof(ip.ip_len), &ip.ip_len);
   6795 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6796 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6797 			} else {
   6798 				struct ip6_hdr ip6;
   6799 
   6800 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6801 				ip6.ip6_plen = 0;
   6802 				m_copyback(m0,
   6803 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6804 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6805 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6806 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6807 			}
   6808 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6809 			    sizeof(th.th_sum), &th.th_sum);
   6810 
   6811 			tcp_hlen = th.th_off << 2;
   6812 		} else {
   6813 			/*
   6814 			 * TCP/IP headers are in the first mbuf; we can do
   6815 			 * this the easy way.
   6816 			 */
   6817 			struct tcphdr *th;
   6818 
   6819 			if (v4) {
   6820 				struct ip *ip =
   6821 				    (void *)(mtod(m0, char *) + offset);
   6822 				th = (void *)(mtod(m0, char *) + hlen);
   6823 
   6824 				ip->ip_len = 0;
   6825 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6826 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6827 			} else {
   6828 				struct ip6_hdr *ip6 =
   6829 				    (void *)(mtod(m0, char *) + offset);
   6830 				th = (void *)(mtod(m0, char *) + hlen);
   6831 
   6832 				ip6->ip6_plen = 0;
   6833 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6834 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6835 			}
   6836 			tcp_hlen = th->th_off << 2;
   6837 		}
   6838 		hlen += tcp_hlen;
   6839 		*cmdlenp |= NQTX_CMD_TSE;
   6840 
   6841 		if (v4) {
   6842 			WM_Q_EVCNT_INCR(txq, txtso);
   6843 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6844 		} else {
   6845 			WM_Q_EVCNT_INCR(txq, txtso6);
   6846 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6847 		}
   6848 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6849 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6850 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6851 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6852 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6853 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6854 	} else {
   6855 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6856 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6857 	}
   6858 
   6859 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6860 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6861 		cmdc |= NQTXC_CMD_IP4;
   6862 	}
   6863 
   6864 	if (m0->m_pkthdr.csum_flags &
   6865 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6866 		WM_Q_EVCNT_INCR(txq, txtusum);
   6867 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6868 			cmdc |= NQTXC_CMD_TCP;
   6869 		} else {
   6870 			cmdc |= NQTXC_CMD_UDP;
   6871 		}
   6872 		cmdc |= NQTXC_CMD_IP4;
   6873 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6874 	}
   6875 	if (m0->m_pkthdr.csum_flags &
   6876 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6877 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6878 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6879 			cmdc |= NQTXC_CMD_TCP;
   6880 		} else {
   6881 			cmdc |= NQTXC_CMD_UDP;
   6882 		}
   6883 		cmdc |= NQTXC_CMD_IP6;
   6884 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6885 	}
   6886 
   6887 	/* Fill in the context descriptor. */
   6888 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6889 	    htole32(vl_len);
   6890 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6891 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6892 	    htole32(cmdc);
   6893 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6894 	    htole32(mssidx);
   6895 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6896 	DPRINTF(WM_DEBUG_TX,
   6897 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6898 	    txq->txq_next, 0, vl_len));
   6899 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6900 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6901 	txs->txs_ndesc++;
   6902 	return 0;
   6903 }
   6904 
   6905 /*
   6906  * wm_nq_start:		[ifnet interface function]
   6907  *
   6908  *	Start packet transmission on the interface for NEWQUEUE devices
   6909  */
   6910 static void
   6911 wm_nq_start(struct ifnet *ifp)
   6912 {
   6913 	struct wm_softc *sc = ifp->if_softc;
   6914 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6915 
   6916 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6917 
   6918 	/*
   6919 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6920 	 */
   6921 
   6922 	mutex_enter(txq->txq_lock);
   6923 	if (!txq->txq_stopping)
   6924 		wm_nq_start_locked(ifp);
   6925 	mutex_exit(txq->txq_lock);
   6926 }
   6927 
   6928 static void
   6929 wm_nq_start_locked(struct ifnet *ifp)
   6930 {
   6931 	struct wm_softc *sc = ifp->if_softc;
   6932 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6933 
   6934 	wm_nq_send_common_locked(ifp, txq, false);
   6935 }
   6936 
   6937 static int
   6938 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   6939 {
   6940 	int qid;
   6941 	struct wm_softc *sc = ifp->if_softc;
   6942 	struct wm_txqueue *txq;
   6943 
   6944 	qid = wm_select_txqueue(ifp, m);
   6945 	txq = &sc->sc_queue[qid].wmq_txq;
   6946 
   6947 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6948 		m_freem(m);
   6949 		WM_Q_EVCNT_INCR(txq, txdrop);
   6950 		return ENOBUFS;
   6951 	}
   6952 
   6953 	/*
   6954 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6955 	 */
   6956 	ifp->if_obytes += m->m_pkthdr.len;
   6957 	if (m->m_flags & M_MCAST)
   6958 		ifp->if_omcasts++;
   6959 
   6960 	if (mutex_tryenter(txq->txq_lock)) {
   6961 		if (!txq->txq_stopping)
   6962 			wm_nq_transmit_locked(ifp, txq);
   6963 		mutex_exit(txq->txq_lock);
   6964 	}
   6965 
   6966 	return 0;
   6967 }
   6968 
   6969 static void
   6970 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6971 {
   6972 
   6973 	wm_nq_send_common_locked(ifp, txq, true);
   6974 }
   6975 
   6976 static void
   6977 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6978     bool is_transmit)
   6979 {
   6980 	struct wm_softc *sc = ifp->if_softc;
   6981 	struct mbuf *m0;
   6982 	struct m_tag *mtag;
   6983 	struct wm_txsoft *txs;
   6984 	bus_dmamap_t dmamap;
   6985 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6986 	bool do_csum, sent;
   6987 
   6988 	KASSERT(mutex_owned(txq->txq_lock));
   6989 
   6990 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6991 		return;
   6992 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6993 		return;
   6994 
   6995 	sent = false;
   6996 
   6997 	/*
   6998 	 * Loop through the send queue, setting up transmit descriptors
   6999 	 * until we drain the queue, or use up all available transmit
   7000 	 * descriptors.
   7001 	 */
   7002 	for (;;) {
   7003 		m0 = NULL;
   7004 
   7005 		/* Get a work queue entry. */
   7006 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7007 			wm_txeof(sc, txq);
   7008 			if (txq->txq_sfree == 0) {
   7009 				DPRINTF(WM_DEBUG_TX,
   7010 				    ("%s: TX: no free job descriptors\n",
   7011 					device_xname(sc->sc_dev)));
   7012 				WM_Q_EVCNT_INCR(txq, txsstall);
   7013 				break;
   7014 			}
   7015 		}
   7016 
   7017 		/* Grab a packet off the queue. */
   7018 		if (is_transmit)
   7019 			m0 = pcq_get(txq->txq_interq);
   7020 		else
   7021 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7022 		if (m0 == NULL)
   7023 			break;
   7024 
   7025 		DPRINTF(WM_DEBUG_TX,
   7026 		    ("%s: TX: have packet to transmit: %p\n",
   7027 		    device_xname(sc->sc_dev), m0));
   7028 
   7029 		txs = &txq->txq_soft[txq->txq_snext];
   7030 		dmamap = txs->txs_dmamap;
   7031 
   7032 		/*
   7033 		 * Load the DMA map.  If this fails, the packet either
   7034 		 * didn't fit in the allotted number of segments, or we
   7035 		 * were short on resources.  For the too-many-segments
   7036 		 * case, we simply report an error and drop the packet,
   7037 		 * since we can't sanely copy a jumbo packet to a single
   7038 		 * buffer.
   7039 		 */
   7040 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7041 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7042 		if (error) {
   7043 			if (error == EFBIG) {
   7044 				WM_Q_EVCNT_INCR(txq, txdrop);
   7045 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7046 				    "DMA segments, dropping...\n",
   7047 				    device_xname(sc->sc_dev));
   7048 				wm_dump_mbuf_chain(sc, m0);
   7049 				m_freem(m0);
   7050 				continue;
   7051 			}
   7052 			/* Short on resources, just stop for now. */
   7053 			DPRINTF(WM_DEBUG_TX,
   7054 			    ("%s: TX: dmamap load failed: %d\n",
   7055 			    device_xname(sc->sc_dev), error));
   7056 			break;
   7057 		}
   7058 
   7059 		segs_needed = dmamap->dm_nsegs;
   7060 
   7061 		/*
   7062 		 * Ensure we have enough descriptors free to describe
   7063 		 * the packet.  Note, we always reserve one descriptor
   7064 		 * at the end of the ring due to the semantics of the
   7065 		 * TDT register, plus one more in the event we need
   7066 		 * to load offload context.
   7067 		 */
   7068 		if (segs_needed > txq->txq_free - 2) {
   7069 			/*
   7070 			 * Not enough free descriptors to transmit this
   7071 			 * packet.  We haven't committed anything yet,
   7072 			 * so just unload the DMA map, put the packet
   7073 			 * pack on the queue, and punt.  Notify the upper
   7074 			 * layer that there are no more slots left.
   7075 			 */
   7076 			DPRINTF(WM_DEBUG_TX,
   7077 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7078 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7079 			    segs_needed, txq->txq_free - 1));
   7080 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7081 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7082 			WM_Q_EVCNT_INCR(txq, txdstall);
   7083 			break;
   7084 		}
   7085 
   7086 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7087 
   7088 		DPRINTF(WM_DEBUG_TX,
   7089 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7090 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7091 
   7092 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7093 
   7094 		/*
   7095 		 * Store a pointer to the packet so that we can free it
   7096 		 * later.
   7097 		 *
   7098 		 * Initially, we consider the number of descriptors the
   7099 		 * packet uses the number of DMA segments.  This may be
   7100 		 * incremented by 1 if we do checksum offload (a descriptor
   7101 		 * is used to set the checksum context).
   7102 		 */
   7103 		txs->txs_mbuf = m0;
   7104 		txs->txs_firstdesc = txq->txq_next;
   7105 		txs->txs_ndesc = segs_needed;
   7106 
   7107 		/* Set up offload parameters for this packet. */
   7108 		uint32_t cmdlen, fields, dcmdlen;
   7109 		if (m0->m_pkthdr.csum_flags &
   7110 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7111 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7112 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7113 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7114 			    &do_csum) != 0) {
   7115 				/* Error message already displayed. */
   7116 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7117 				continue;
   7118 			}
   7119 		} else {
   7120 			do_csum = false;
   7121 			cmdlen = 0;
   7122 			fields = 0;
   7123 		}
   7124 
   7125 		/* Sync the DMA map. */
   7126 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7127 		    BUS_DMASYNC_PREWRITE);
   7128 
   7129 		/* Initialize the first transmit descriptor. */
   7130 		nexttx = txq->txq_next;
   7131 		if (!do_csum) {
   7132 			/* setup a legacy descriptor */
   7133 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7134 			    dmamap->dm_segs[0].ds_addr);
   7135 			txq->txq_descs[nexttx].wtx_cmdlen =
   7136 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7137 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7138 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7139 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7140 			    NULL) {
   7141 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7142 				    htole32(WTX_CMD_VLE);
   7143 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7144 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7145 			} else {
   7146 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7147 			}
   7148 			dcmdlen = 0;
   7149 		} else {
   7150 			/* setup an advanced data descriptor */
   7151 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7152 			    htole64(dmamap->dm_segs[0].ds_addr);
   7153 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7154 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7155 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7156 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7157 			    htole32(fields);
   7158 			DPRINTF(WM_DEBUG_TX,
   7159 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7160 			    device_xname(sc->sc_dev), nexttx,
   7161 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7162 			DPRINTF(WM_DEBUG_TX,
   7163 			    ("\t 0x%08x%08x\n", fields,
   7164 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7165 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7166 		}
   7167 
   7168 		lasttx = nexttx;
   7169 		nexttx = WM_NEXTTX(txq, nexttx);
   7170 		/*
   7171 		 * fill in the next descriptors. legacy or adcanced format
   7172 		 * is the same here
   7173 		 */
   7174 		for (seg = 1; seg < dmamap->dm_nsegs;
   7175 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7176 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7177 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7178 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7179 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7180 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7181 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7182 			lasttx = nexttx;
   7183 
   7184 			DPRINTF(WM_DEBUG_TX,
   7185 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7186 			     "len %#04zx\n",
   7187 			    device_xname(sc->sc_dev), nexttx,
   7188 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7189 			    dmamap->dm_segs[seg].ds_len));
   7190 		}
   7191 
   7192 		KASSERT(lasttx != -1);
   7193 
   7194 		/*
   7195 		 * Set up the command byte on the last descriptor of
   7196 		 * the packet.  If we're in the interrupt delay window,
   7197 		 * delay the interrupt.
   7198 		 */
   7199 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7200 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7201 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7202 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7203 
   7204 		txs->txs_lastdesc = lasttx;
   7205 
   7206 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7207 		    device_xname(sc->sc_dev),
   7208 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7209 
   7210 		/* Sync the descriptors we're using. */
   7211 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7212 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7213 
   7214 		/* Give the packet to the chip. */
   7215 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7216 		sent = true;
   7217 
   7218 		DPRINTF(WM_DEBUG_TX,
   7219 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7220 
   7221 		DPRINTF(WM_DEBUG_TX,
   7222 		    ("%s: TX: finished transmitting packet, job %d\n",
   7223 		    device_xname(sc->sc_dev), txq->txq_snext));
   7224 
   7225 		/* Advance the tx pointer. */
   7226 		txq->txq_free -= txs->txs_ndesc;
   7227 		txq->txq_next = nexttx;
   7228 
   7229 		txq->txq_sfree--;
   7230 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7231 
   7232 		/* Pass the packet to any BPF listeners. */
   7233 		bpf_mtap(ifp, m0);
   7234 	}
   7235 
   7236 	if (m0 != NULL) {
   7237 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7238 		WM_Q_EVCNT_INCR(txq, txdrop);
   7239 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7240 			__func__));
   7241 		m_freem(m0);
   7242 	}
   7243 
   7244 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7245 		/* No more slots; notify upper layer. */
   7246 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7247 	}
   7248 
   7249 	if (sent) {
   7250 		/* Set a watchdog timer in case the chip flakes out. */
   7251 		ifp->if_timer = 5;
   7252 	}
   7253 }
   7254 
   7255 static void
   7256 wm_deferred_start(struct ifnet *ifp)
   7257 {
   7258 	struct wm_softc *sc = ifp->if_softc;
   7259 	int qid = 0;
   7260 
   7261 	/*
   7262 	 * Try to transmit on all Tx queues. Passing a txq somehow and
   7263 	 * transmitting only on the txq may be better.
   7264 	 */
   7265 restart:
   7266 	WM_CORE_LOCK(sc);
   7267 	if (sc->sc_core_stopping)
   7268 		goto out;
   7269 
   7270 	for (; qid < sc->sc_nqueues; qid++) {
   7271 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   7272 
   7273 		if (!mutex_tryenter(txq->txq_lock))
   7274 			continue;
   7275 
   7276 		if (txq->txq_stopping) {
   7277 			mutex_exit(txq->txq_lock);
   7278 			continue;
   7279 		}
   7280 		WM_CORE_UNLOCK(sc);
   7281 
   7282 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7283 			/* XXX need for ALTQ */
   7284 			if (qid == 0)
   7285 				wm_nq_start_locked(ifp);
   7286 			wm_nq_transmit_locked(ifp, txq);
   7287 		} else {
   7288 			/* XXX need for ALTQ */
   7289 			if (qid == 0)
   7290 				wm_start_locked(ifp);
   7291 			wm_transmit_locked(ifp, txq);
   7292 		}
   7293 		mutex_exit(txq->txq_lock);
   7294 
   7295 		qid++;
   7296 		goto restart;
   7297 	}
   7298 out:
   7299 	WM_CORE_UNLOCK(sc);
   7300 }
   7301 
   7302 /* Interrupt */
   7303 
   7304 /*
   7305  * wm_txeof:
   7306  *
   7307  *	Helper; handle transmit interrupts.
   7308  */
   7309 static int
   7310 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7311 {
   7312 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7313 	struct wm_txsoft *txs;
   7314 	bool processed = false;
   7315 	int count = 0;
   7316 	int i;
   7317 	uint8_t status;
   7318 
   7319 	KASSERT(mutex_owned(txq->txq_lock));
   7320 
   7321 	if (txq->txq_stopping)
   7322 		return 0;
   7323 
   7324 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7325 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7326 	else
   7327 		ifp->if_flags &= ~IFF_OACTIVE;
   7328 
   7329 	/*
   7330 	 * Go through the Tx list and free mbufs for those
   7331 	 * frames which have been transmitted.
   7332 	 */
   7333 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7334 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7335 		txs = &txq->txq_soft[i];
   7336 
   7337 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7338 			device_xname(sc->sc_dev), i));
   7339 
   7340 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7341 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7342 
   7343 		status =
   7344 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7345 		if ((status & WTX_ST_DD) == 0) {
   7346 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7347 			    BUS_DMASYNC_PREREAD);
   7348 			break;
   7349 		}
   7350 
   7351 		processed = true;
   7352 		count++;
   7353 		DPRINTF(WM_DEBUG_TX,
   7354 		    ("%s: TX: job %d done: descs %d..%d\n",
   7355 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7356 		    txs->txs_lastdesc));
   7357 
   7358 		/*
   7359 		 * XXX We should probably be using the statistics
   7360 		 * XXX registers, but I don't know if they exist
   7361 		 * XXX on chips before the i82544.
   7362 		 */
   7363 
   7364 #ifdef WM_EVENT_COUNTERS
   7365 		if (status & WTX_ST_TU)
   7366 			WM_Q_EVCNT_INCR(txq, tu);
   7367 #endif /* WM_EVENT_COUNTERS */
   7368 
   7369 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7370 			ifp->if_oerrors++;
   7371 			if (status & WTX_ST_LC)
   7372 				log(LOG_WARNING, "%s: late collision\n",
   7373 				    device_xname(sc->sc_dev));
   7374 			else if (status & WTX_ST_EC) {
   7375 				ifp->if_collisions += 16;
   7376 				log(LOG_WARNING, "%s: excessive collisions\n",
   7377 				    device_xname(sc->sc_dev));
   7378 			}
   7379 		} else
   7380 			ifp->if_opackets++;
   7381 
   7382 		txq->txq_free += txs->txs_ndesc;
   7383 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7384 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7385 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7386 		m_freem(txs->txs_mbuf);
   7387 		txs->txs_mbuf = NULL;
   7388 	}
   7389 
   7390 	/* Update the dirty transmit buffer pointer. */
   7391 	txq->txq_sdirty = i;
   7392 	DPRINTF(WM_DEBUG_TX,
   7393 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7394 
   7395 	if (count != 0)
   7396 		rnd_add_uint32(&sc->rnd_source, count);
   7397 
   7398 	/*
   7399 	 * If there are no more pending transmissions, cancel the watchdog
   7400 	 * timer.
   7401 	 */
   7402 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7403 		ifp->if_timer = 0;
   7404 
   7405 	return processed;
   7406 }
   7407 
   7408 /*
   7409  * wm_rxeof:
   7410  *
   7411  *	Helper; handle receive interrupts.
   7412  */
   7413 static void
   7414 wm_rxeof(struct wm_rxqueue *rxq)
   7415 {
   7416 	struct wm_softc *sc = rxq->rxq_sc;
   7417 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7418 	struct wm_rxsoft *rxs;
   7419 	struct mbuf *m;
   7420 	int i, len;
   7421 	int count = 0;
   7422 	uint8_t status, errors;
   7423 	uint16_t vlantag;
   7424 
   7425 	KASSERT(mutex_owned(rxq->rxq_lock));
   7426 
   7427 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7428 		rxs = &rxq->rxq_soft[i];
   7429 
   7430 		DPRINTF(WM_DEBUG_RX,
   7431 		    ("%s: RX: checking descriptor %d\n",
   7432 		    device_xname(sc->sc_dev), i));
   7433 
   7434 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7435 
   7436 		status = rxq->rxq_descs[i].wrx_status;
   7437 		errors = rxq->rxq_descs[i].wrx_errors;
   7438 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   7439 		vlantag = rxq->rxq_descs[i].wrx_special;
   7440 
   7441 		if ((status & WRX_ST_DD) == 0) {
   7442 			/* We have processed all of the receive descriptors. */
   7443 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   7444 			break;
   7445 		}
   7446 
   7447 		count++;
   7448 		if (__predict_false(rxq->rxq_discard)) {
   7449 			DPRINTF(WM_DEBUG_RX,
   7450 			    ("%s: RX: discarding contents of descriptor %d\n",
   7451 			    device_xname(sc->sc_dev), i));
   7452 			wm_init_rxdesc(rxq, i);
   7453 			if (status & WRX_ST_EOP) {
   7454 				/* Reset our state. */
   7455 				DPRINTF(WM_DEBUG_RX,
   7456 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7457 				    device_xname(sc->sc_dev)));
   7458 				rxq->rxq_discard = 0;
   7459 			}
   7460 			continue;
   7461 		}
   7462 
   7463 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7464 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7465 
   7466 		m = rxs->rxs_mbuf;
   7467 
   7468 		/*
   7469 		 * Add a new receive buffer to the ring, unless of
   7470 		 * course the length is zero. Treat the latter as a
   7471 		 * failed mapping.
   7472 		 */
   7473 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7474 			/*
   7475 			 * Failed, throw away what we've done so
   7476 			 * far, and discard the rest of the packet.
   7477 			 */
   7478 			ifp->if_ierrors++;
   7479 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7480 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7481 			wm_init_rxdesc(rxq, i);
   7482 			if ((status & WRX_ST_EOP) == 0)
   7483 				rxq->rxq_discard = 1;
   7484 			if (rxq->rxq_head != NULL)
   7485 				m_freem(rxq->rxq_head);
   7486 			WM_RXCHAIN_RESET(rxq);
   7487 			DPRINTF(WM_DEBUG_RX,
   7488 			    ("%s: RX: Rx buffer allocation failed, "
   7489 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7490 			    rxq->rxq_discard ? " (discard)" : ""));
   7491 			continue;
   7492 		}
   7493 
   7494 		m->m_len = len;
   7495 		rxq->rxq_len += len;
   7496 		DPRINTF(WM_DEBUG_RX,
   7497 		    ("%s: RX: buffer at %p len %d\n",
   7498 		    device_xname(sc->sc_dev), m->m_data, len));
   7499 
   7500 		/* If this is not the end of the packet, keep looking. */
   7501 		if ((status & WRX_ST_EOP) == 0) {
   7502 			WM_RXCHAIN_LINK(rxq, m);
   7503 			DPRINTF(WM_DEBUG_RX,
   7504 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7505 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7506 			continue;
   7507 		}
   7508 
   7509 		/*
   7510 		 * Okay, we have the entire packet now.  The chip is
   7511 		 * configured to include the FCS except I350 and I21[01]
   7512 		 * (not all chips can be configured to strip it),
   7513 		 * so we need to trim it.
   7514 		 * May need to adjust length of previous mbuf in the
   7515 		 * chain if the current mbuf is too short.
   7516 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7517 		 * is always set in I350, so we don't trim it.
   7518 		 */
   7519 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7520 		    && (sc->sc_type != WM_T_I210)
   7521 		    && (sc->sc_type != WM_T_I211)) {
   7522 			if (m->m_len < ETHER_CRC_LEN) {
   7523 				rxq->rxq_tail->m_len
   7524 				    -= (ETHER_CRC_LEN - m->m_len);
   7525 				m->m_len = 0;
   7526 			} else
   7527 				m->m_len -= ETHER_CRC_LEN;
   7528 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7529 		} else
   7530 			len = rxq->rxq_len;
   7531 
   7532 		WM_RXCHAIN_LINK(rxq, m);
   7533 
   7534 		*rxq->rxq_tailp = NULL;
   7535 		m = rxq->rxq_head;
   7536 
   7537 		WM_RXCHAIN_RESET(rxq);
   7538 
   7539 		DPRINTF(WM_DEBUG_RX,
   7540 		    ("%s: RX: have entire packet, len -> %d\n",
   7541 		    device_xname(sc->sc_dev), len));
   7542 
   7543 		/* If an error occurred, update stats and drop the packet. */
   7544 		if (errors &
   7545 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7546 			if (errors & WRX_ER_SE)
   7547 				log(LOG_WARNING, "%s: symbol error\n",
   7548 				    device_xname(sc->sc_dev));
   7549 			else if (errors & WRX_ER_SEQ)
   7550 				log(LOG_WARNING, "%s: receive sequence error\n",
   7551 				    device_xname(sc->sc_dev));
   7552 			else if (errors & WRX_ER_CE)
   7553 				log(LOG_WARNING, "%s: CRC error\n",
   7554 				    device_xname(sc->sc_dev));
   7555 			m_freem(m);
   7556 			continue;
   7557 		}
   7558 
   7559 		/* No errors.  Receive the packet. */
   7560 		m_set_rcvif(m, ifp);
   7561 		m->m_pkthdr.len = len;
   7562 
   7563 		/*
   7564 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7565 		 * for us.  Associate the tag with the packet.
   7566 		 */
   7567 		/* XXXX should check for i350 and i354 */
   7568 		if ((status & WRX_ST_VP) != 0) {
   7569 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
   7570 		}
   7571 
   7572 		/* Set up checksum info for this packet. */
   7573 		if ((status & WRX_ST_IXSM) == 0) {
   7574 			if (status & WRX_ST_IPCS) {
   7575 				WM_Q_EVCNT_INCR(rxq, rxipsum);
   7576 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7577 				if (errors & WRX_ER_IPE)
   7578 					m->m_pkthdr.csum_flags |=
   7579 					    M_CSUM_IPv4_BAD;
   7580 			}
   7581 			if (status & WRX_ST_TCPCS) {
   7582 				/*
   7583 				 * Note: we don't know if this was TCP or UDP,
   7584 				 * so we just set both bits, and expect the
   7585 				 * upper layers to deal.
   7586 				 */
   7587 				WM_Q_EVCNT_INCR(rxq, rxtusum);
   7588 				m->m_pkthdr.csum_flags |=
   7589 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7590 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7591 				if (errors & WRX_ER_TCPE)
   7592 					m->m_pkthdr.csum_flags |=
   7593 					    M_CSUM_TCP_UDP_BAD;
   7594 			}
   7595 		}
   7596 
   7597 		ifp->if_ipackets++;
   7598 
   7599 		mutex_exit(rxq->rxq_lock);
   7600 
   7601 		/* Pass this up to any BPF listeners. */
   7602 		bpf_mtap(ifp, m);
   7603 
   7604 		/* Pass it on. */
   7605 		if_percpuq_enqueue(sc->sc_ipq, m);
   7606 
   7607 		mutex_enter(rxq->rxq_lock);
   7608 
   7609 		if (rxq->rxq_stopping)
   7610 			break;
   7611 	}
   7612 
   7613 	/* Update the receive pointer. */
   7614 	rxq->rxq_ptr = i;
   7615 	if (count != 0)
   7616 		rnd_add_uint32(&sc->rnd_source, count);
   7617 
   7618 	DPRINTF(WM_DEBUG_RX,
   7619 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7620 }
   7621 
   7622 /*
   7623  * wm_linkintr_gmii:
   7624  *
   7625  *	Helper; handle link interrupts for GMII.
   7626  */
   7627 static void
   7628 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7629 {
   7630 
   7631 	KASSERT(WM_CORE_LOCKED(sc));
   7632 
   7633 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7634 		__func__));
   7635 
   7636 	if (icr & ICR_LSC) {
   7637 		uint32_t reg;
   7638 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7639 
   7640 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7641 			wm_gig_downshift_workaround_ich8lan(sc);
   7642 
   7643 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7644 			device_xname(sc->sc_dev)));
   7645 		mii_pollstat(&sc->sc_mii);
   7646 		if (sc->sc_type == WM_T_82543) {
   7647 			int miistatus, active;
   7648 
   7649 			/*
   7650 			 * With 82543, we need to force speed and
   7651 			 * duplex on the MAC equal to what the PHY
   7652 			 * speed and duplex configuration is.
   7653 			 */
   7654 			miistatus = sc->sc_mii.mii_media_status;
   7655 
   7656 			if (miistatus & IFM_ACTIVE) {
   7657 				active = sc->sc_mii.mii_media_active;
   7658 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7659 				switch (IFM_SUBTYPE(active)) {
   7660 				case IFM_10_T:
   7661 					sc->sc_ctrl |= CTRL_SPEED_10;
   7662 					break;
   7663 				case IFM_100_TX:
   7664 					sc->sc_ctrl |= CTRL_SPEED_100;
   7665 					break;
   7666 				case IFM_1000_T:
   7667 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7668 					break;
   7669 				default:
   7670 					/*
   7671 					 * fiber?
   7672 					 * Shoud not enter here.
   7673 					 */
   7674 					printf("unknown media (%x)\n", active);
   7675 					break;
   7676 				}
   7677 				if (active & IFM_FDX)
   7678 					sc->sc_ctrl |= CTRL_FD;
   7679 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7680 			}
   7681 		} else if ((sc->sc_type == WM_T_ICH8)
   7682 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7683 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7684 		} else if (sc->sc_type == WM_T_PCH) {
   7685 			wm_k1_gig_workaround_hv(sc,
   7686 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7687 		}
   7688 
   7689 		if ((sc->sc_phytype == WMPHY_82578)
   7690 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7691 			== IFM_1000_T)) {
   7692 
   7693 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7694 				delay(200*1000); /* XXX too big */
   7695 
   7696 				/* Link stall fix for link up */
   7697 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7698 				    HV_MUX_DATA_CTRL,
   7699 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7700 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7701 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7702 				    HV_MUX_DATA_CTRL,
   7703 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7704 			}
   7705 		}
   7706 		/*
   7707 		 * I217 Packet Loss issue:
   7708 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   7709 		 * on power up.
   7710 		 * Set the Beacon Duration for I217 to 8 usec
   7711 		 */
   7712 		if ((sc->sc_type == WM_T_PCH_LPT)
   7713 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   7714 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   7715 			reg &= ~FEXTNVM4_BEACON_DURATION;
   7716 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   7717 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   7718 		}
   7719 
   7720 		/* XXX Work-around I218 hang issue */
   7721 		/* e1000_k1_workaround_lpt_lp() */
   7722 
   7723 		if ((sc->sc_type == WM_T_PCH_LPT)
   7724 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   7725 			/*
   7726 			 * Set platform power management values for Latency
   7727 			 * Tolerance Reporting (LTR)
   7728 			 */
   7729 			wm_platform_pm_pch_lpt(sc,
   7730 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   7731 				    != 0));
   7732 		}
   7733 
   7734 		/* FEXTNVM6 K1-off workaround */
   7735 		if (sc->sc_type == WM_T_PCH_SPT) {
   7736 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   7737 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   7738 			    & FEXTNVM6_K1_OFF_ENABLE)
   7739 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   7740 			else
   7741 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   7742 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   7743 		}
   7744 	} else if (icr & ICR_RXSEQ) {
   7745 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7746 			device_xname(sc->sc_dev)));
   7747 	}
   7748 }
   7749 
   7750 /*
   7751  * wm_linkintr_tbi:
   7752  *
   7753  *	Helper; handle link interrupts for TBI mode.
   7754  */
   7755 static void
   7756 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7757 {
   7758 	uint32_t status;
   7759 
   7760 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7761 		__func__));
   7762 
   7763 	status = CSR_READ(sc, WMREG_STATUS);
   7764 	if (icr & ICR_LSC) {
   7765 		if (status & STATUS_LU) {
   7766 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7767 			    device_xname(sc->sc_dev),
   7768 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7769 			/*
   7770 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7771 			 * so we should update sc->sc_ctrl
   7772 			 */
   7773 
   7774 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7775 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7776 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7777 			if (status & STATUS_FD)
   7778 				sc->sc_tctl |=
   7779 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7780 			else
   7781 				sc->sc_tctl |=
   7782 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7783 			if (sc->sc_ctrl & CTRL_TFCE)
   7784 				sc->sc_fcrtl |= FCRTL_XONE;
   7785 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7786 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7787 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7788 				      sc->sc_fcrtl);
   7789 			sc->sc_tbi_linkup = 1;
   7790 		} else {
   7791 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7792 			    device_xname(sc->sc_dev)));
   7793 			sc->sc_tbi_linkup = 0;
   7794 		}
   7795 		/* Update LED */
   7796 		wm_tbi_serdes_set_linkled(sc);
   7797 	} else if (icr & ICR_RXSEQ) {
   7798 		DPRINTF(WM_DEBUG_LINK,
   7799 		    ("%s: LINK: Receive sequence error\n",
   7800 		    device_xname(sc->sc_dev)));
   7801 	}
   7802 }
   7803 
   7804 /*
   7805  * wm_linkintr_serdes:
   7806  *
   7807  *	Helper; handle link interrupts for TBI mode.
   7808  */
   7809 static void
   7810 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7811 {
   7812 	struct mii_data *mii = &sc->sc_mii;
   7813 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7814 	uint32_t pcs_adv, pcs_lpab, reg;
   7815 
   7816 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7817 		__func__));
   7818 
   7819 	if (icr & ICR_LSC) {
   7820 		/* Check PCS */
   7821 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7822 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7823 			mii->mii_media_status |= IFM_ACTIVE;
   7824 			sc->sc_tbi_linkup = 1;
   7825 		} else {
   7826 			mii->mii_media_status |= IFM_NONE;
   7827 			sc->sc_tbi_linkup = 0;
   7828 			wm_tbi_serdes_set_linkled(sc);
   7829 			return;
   7830 		}
   7831 		mii->mii_media_active |= IFM_1000_SX;
   7832 		if ((reg & PCS_LSTS_FDX) != 0)
   7833 			mii->mii_media_active |= IFM_FDX;
   7834 		else
   7835 			mii->mii_media_active |= IFM_HDX;
   7836 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7837 			/* Check flow */
   7838 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7839 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7840 				DPRINTF(WM_DEBUG_LINK,
   7841 				    ("XXX LINKOK but not ACOMP\n"));
   7842 				return;
   7843 			}
   7844 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7845 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7846 			DPRINTF(WM_DEBUG_LINK,
   7847 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7848 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7849 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7850 				mii->mii_media_active |= IFM_FLOW
   7851 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7852 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7853 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7854 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7855 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7856 				mii->mii_media_active |= IFM_FLOW
   7857 				    | IFM_ETH_TXPAUSE;
   7858 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7859 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7860 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7861 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7862 				mii->mii_media_active |= IFM_FLOW
   7863 				    | IFM_ETH_RXPAUSE;
   7864 		}
   7865 		/* Update LED */
   7866 		wm_tbi_serdes_set_linkled(sc);
   7867 	} else {
   7868 		DPRINTF(WM_DEBUG_LINK,
   7869 		    ("%s: LINK: Receive sequence error\n",
   7870 		    device_xname(sc->sc_dev)));
   7871 	}
   7872 }
   7873 
   7874 /*
   7875  * wm_linkintr:
   7876  *
   7877  *	Helper; handle link interrupts.
   7878  */
   7879 static void
   7880 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7881 {
   7882 
   7883 	KASSERT(WM_CORE_LOCKED(sc));
   7884 
   7885 	if (sc->sc_flags & WM_F_HAS_MII)
   7886 		wm_linkintr_gmii(sc, icr);
   7887 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7888 	    && (sc->sc_type >= WM_T_82575))
   7889 		wm_linkintr_serdes(sc, icr);
   7890 	else
   7891 		wm_linkintr_tbi(sc, icr);
   7892 }
   7893 
   7894 /*
   7895  * wm_intr_legacy:
   7896  *
   7897  *	Interrupt service routine for INTx and MSI.
   7898  */
   7899 static int
   7900 wm_intr_legacy(void *arg)
   7901 {
   7902 	struct wm_softc *sc = arg;
   7903 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7904 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
   7905 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7906 	uint32_t icr, rndval = 0;
   7907 	int handled = 0;
   7908 
   7909 	DPRINTF(WM_DEBUG_TX,
   7910 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7911 	while (1 /* CONSTCOND */) {
   7912 		icr = CSR_READ(sc, WMREG_ICR);
   7913 		if ((icr & sc->sc_icr) == 0)
   7914 			break;
   7915 		if (rndval == 0)
   7916 			rndval = icr;
   7917 
   7918 		mutex_enter(rxq->rxq_lock);
   7919 
   7920 		if (rxq->rxq_stopping) {
   7921 			mutex_exit(rxq->rxq_lock);
   7922 			break;
   7923 		}
   7924 
   7925 		handled = 1;
   7926 
   7927 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7928 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   7929 			DPRINTF(WM_DEBUG_RX,
   7930 			    ("%s: RX: got Rx intr 0x%08x\n",
   7931 			    device_xname(sc->sc_dev),
   7932 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   7933 			WM_Q_EVCNT_INCR(rxq, rxintr);
   7934 		}
   7935 #endif
   7936 		wm_rxeof(rxq);
   7937 
   7938 		mutex_exit(rxq->rxq_lock);
   7939 		mutex_enter(txq->txq_lock);
   7940 
   7941 		if (txq->txq_stopping) {
   7942 			mutex_exit(txq->txq_lock);
   7943 			break;
   7944 		}
   7945 
   7946 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7947 		if (icr & ICR_TXDW) {
   7948 			DPRINTF(WM_DEBUG_TX,
   7949 			    ("%s: TX: got TXDW interrupt\n",
   7950 			    device_xname(sc->sc_dev)));
   7951 			WM_Q_EVCNT_INCR(txq, txdw);
   7952 		}
   7953 #endif
   7954 		wm_txeof(sc, txq);
   7955 
   7956 		mutex_exit(txq->txq_lock);
   7957 		WM_CORE_LOCK(sc);
   7958 
   7959 		if (sc->sc_core_stopping) {
   7960 			WM_CORE_UNLOCK(sc);
   7961 			break;
   7962 		}
   7963 
   7964 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   7965 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7966 			wm_linkintr(sc, icr);
   7967 		}
   7968 
   7969 		WM_CORE_UNLOCK(sc);
   7970 
   7971 		if (icr & ICR_RXO) {
   7972 #if defined(WM_DEBUG)
   7973 			log(LOG_WARNING, "%s: Receive overrun\n",
   7974 			    device_xname(sc->sc_dev));
   7975 #endif /* defined(WM_DEBUG) */
   7976 		}
   7977 	}
   7978 
   7979 	rnd_add_uint32(&sc->rnd_source, rndval);
   7980 
   7981 	if (handled) {
   7982 		/* Try to get more packets going. */
   7983 		if_schedule_deferred_start(ifp);
   7984 	}
   7985 
   7986 	return handled;
   7987 }
   7988 
   7989 static int
   7990 wm_txrxintr_msix(void *arg)
   7991 {
   7992 	struct wm_queue *wmq = arg;
   7993 	struct wm_txqueue *txq = &wmq->wmq_txq;
   7994 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7995 	struct wm_softc *sc = txq->txq_sc;
   7996 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7997 
   7998 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   7999 
   8000 	DPRINTF(WM_DEBUG_TX,
   8001 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8002 
   8003 	if (sc->sc_type == WM_T_82574)
   8004 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8005 	else if (sc->sc_type == WM_T_82575)
   8006 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8007 	else
   8008 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8009 
   8010 	mutex_enter(txq->txq_lock);
   8011 
   8012 	if (txq->txq_stopping) {
   8013 		mutex_exit(txq->txq_lock);
   8014 		return 0;
   8015 	}
   8016 
   8017 	WM_Q_EVCNT_INCR(txq, txdw);
   8018 	wm_txeof(sc, txq);
   8019 
   8020 	/* Try to get more packets going. */
   8021 	if (pcq_peek(txq->txq_interq) != NULL)
   8022 		if_schedule_deferred_start(ifp);
   8023 	/*
   8024 	 * There are still some upper layer processing which call
   8025 	 * ifp->if_start(). e.g. ALTQ
   8026 	 */
   8027 	if (wmq->wmq_id == 0)
   8028 		if_schedule_deferred_start(ifp);
   8029 
   8030 	mutex_exit(txq->txq_lock);
   8031 
   8032 	DPRINTF(WM_DEBUG_RX,
   8033 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8034 	mutex_enter(rxq->rxq_lock);
   8035 
   8036 	if (rxq->rxq_stopping) {
   8037 		mutex_exit(rxq->rxq_lock);
   8038 		return 0;
   8039 	}
   8040 
   8041 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8042 	wm_rxeof(rxq);
   8043 	mutex_exit(rxq->rxq_lock);
   8044 
   8045 	if (sc->sc_type == WM_T_82574)
   8046 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8047 	else if (sc->sc_type == WM_T_82575)
   8048 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8049 	else
   8050 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8051 
   8052 	return 1;
   8053 }
   8054 
   8055 /*
   8056  * wm_linkintr_msix:
   8057  *
   8058  *	Interrupt service routine for link status change for MSI-X.
   8059  */
   8060 static int
   8061 wm_linkintr_msix(void *arg)
   8062 {
   8063 	struct wm_softc *sc = arg;
   8064 	uint32_t reg;
   8065 
   8066 	DPRINTF(WM_DEBUG_LINK,
   8067 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8068 
   8069 	reg = CSR_READ(sc, WMREG_ICR);
   8070 	WM_CORE_LOCK(sc);
   8071 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8072 		goto out;
   8073 
   8074 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8075 	wm_linkintr(sc, ICR_LSC);
   8076 
   8077 out:
   8078 	WM_CORE_UNLOCK(sc);
   8079 
   8080 	if (sc->sc_type == WM_T_82574)
   8081 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8082 	else if (sc->sc_type == WM_T_82575)
   8083 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8084 	else
   8085 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8086 
   8087 	return 1;
   8088 }
   8089 
   8090 /*
   8091  * Media related.
   8092  * GMII, SGMII, TBI (and SERDES)
   8093  */
   8094 
   8095 /* Common */
   8096 
   8097 /*
   8098  * wm_tbi_serdes_set_linkled:
   8099  *
   8100  *	Update the link LED on TBI and SERDES devices.
   8101  */
   8102 static void
   8103 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8104 {
   8105 
   8106 	if (sc->sc_tbi_linkup)
   8107 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8108 	else
   8109 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8110 
   8111 	/* 82540 or newer devices are active low */
   8112 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8113 
   8114 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8115 }
   8116 
   8117 /* GMII related */
   8118 
   8119 /*
   8120  * wm_gmii_reset:
   8121  *
   8122  *	Reset the PHY.
   8123  */
   8124 static void
   8125 wm_gmii_reset(struct wm_softc *sc)
   8126 {
   8127 	uint32_t reg;
   8128 	int rv;
   8129 
   8130 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8131 		device_xname(sc->sc_dev), __func__));
   8132 
   8133 	rv = sc->phy.acquire(sc);
   8134 	if (rv != 0) {
   8135 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8136 		    __func__);
   8137 		return;
   8138 	}
   8139 
   8140 	switch (sc->sc_type) {
   8141 	case WM_T_82542_2_0:
   8142 	case WM_T_82542_2_1:
   8143 		/* null */
   8144 		break;
   8145 	case WM_T_82543:
   8146 		/*
   8147 		 * With 82543, we need to force speed and duplex on the MAC
   8148 		 * equal to what the PHY speed and duplex configuration is.
   8149 		 * In addition, we need to perform a hardware reset on the PHY
   8150 		 * to take it out of reset.
   8151 		 */
   8152 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8153 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8154 
   8155 		/* The PHY reset pin is active-low. */
   8156 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8157 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8158 		    CTRL_EXT_SWDPIN(4));
   8159 		reg |= CTRL_EXT_SWDPIO(4);
   8160 
   8161 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8162 		CSR_WRITE_FLUSH(sc);
   8163 		delay(10*1000);
   8164 
   8165 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8166 		CSR_WRITE_FLUSH(sc);
   8167 		delay(150);
   8168 #if 0
   8169 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8170 #endif
   8171 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8172 		break;
   8173 	case WM_T_82544:	/* reset 10000us */
   8174 	case WM_T_82540:
   8175 	case WM_T_82545:
   8176 	case WM_T_82545_3:
   8177 	case WM_T_82546:
   8178 	case WM_T_82546_3:
   8179 	case WM_T_82541:
   8180 	case WM_T_82541_2:
   8181 	case WM_T_82547:
   8182 	case WM_T_82547_2:
   8183 	case WM_T_82571:	/* reset 100us */
   8184 	case WM_T_82572:
   8185 	case WM_T_82573:
   8186 	case WM_T_82574:
   8187 	case WM_T_82575:
   8188 	case WM_T_82576:
   8189 	case WM_T_82580:
   8190 	case WM_T_I350:
   8191 	case WM_T_I354:
   8192 	case WM_T_I210:
   8193 	case WM_T_I211:
   8194 	case WM_T_82583:
   8195 	case WM_T_80003:
   8196 		/* generic reset */
   8197 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8198 		CSR_WRITE_FLUSH(sc);
   8199 		delay(20000);
   8200 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8201 		CSR_WRITE_FLUSH(sc);
   8202 		delay(20000);
   8203 
   8204 		if ((sc->sc_type == WM_T_82541)
   8205 		    || (sc->sc_type == WM_T_82541_2)
   8206 		    || (sc->sc_type == WM_T_82547)
   8207 		    || (sc->sc_type == WM_T_82547_2)) {
   8208 			/* workaround for igp are done in igp_reset() */
   8209 			/* XXX add code to set LED after phy reset */
   8210 		}
   8211 		break;
   8212 	case WM_T_ICH8:
   8213 	case WM_T_ICH9:
   8214 	case WM_T_ICH10:
   8215 	case WM_T_PCH:
   8216 	case WM_T_PCH2:
   8217 	case WM_T_PCH_LPT:
   8218 	case WM_T_PCH_SPT:
   8219 		/* generic reset */
   8220 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8221 		CSR_WRITE_FLUSH(sc);
   8222 		delay(100);
   8223 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8224 		CSR_WRITE_FLUSH(sc);
   8225 		delay(150);
   8226 		break;
   8227 	default:
   8228 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8229 		    __func__);
   8230 		break;
   8231 	}
   8232 
   8233 	sc->phy.release(sc);
   8234 
   8235 	/* get_cfg_done */
   8236 	wm_get_cfg_done(sc);
   8237 
   8238 	/* extra setup */
   8239 	switch (sc->sc_type) {
   8240 	case WM_T_82542_2_0:
   8241 	case WM_T_82542_2_1:
   8242 	case WM_T_82543:
   8243 	case WM_T_82544:
   8244 	case WM_T_82540:
   8245 	case WM_T_82545:
   8246 	case WM_T_82545_3:
   8247 	case WM_T_82546:
   8248 	case WM_T_82546_3:
   8249 	case WM_T_82541_2:
   8250 	case WM_T_82547_2:
   8251 	case WM_T_82571:
   8252 	case WM_T_82572:
   8253 	case WM_T_82573:
   8254 	case WM_T_82575:
   8255 	case WM_T_82576:
   8256 	case WM_T_82580:
   8257 	case WM_T_I350:
   8258 	case WM_T_I354:
   8259 	case WM_T_I210:
   8260 	case WM_T_I211:
   8261 	case WM_T_80003:
   8262 		/* null */
   8263 		break;
   8264 	case WM_T_82574:
   8265 	case WM_T_82583:
   8266 		wm_lplu_d0_disable(sc);
   8267 		break;
   8268 	case WM_T_82541:
   8269 	case WM_T_82547:
   8270 		/* XXX Configure actively LED after PHY reset */
   8271 		break;
   8272 	case WM_T_ICH8:
   8273 	case WM_T_ICH9:
   8274 	case WM_T_ICH10:
   8275 	case WM_T_PCH:
   8276 	case WM_T_PCH2:
   8277 	case WM_T_PCH_LPT:
   8278 	case WM_T_PCH_SPT:
   8279 		/* Allow time for h/w to get to a quiescent state afer reset */
   8280 		delay(10*1000);
   8281 
   8282 		if (sc->sc_type == WM_T_PCH)
   8283 			wm_hv_phy_workaround_ich8lan(sc);
   8284 
   8285 		if (sc->sc_type == WM_T_PCH2)
   8286 			wm_lv_phy_workaround_ich8lan(sc);
   8287 
   8288 		/* Clear the host wakeup bit after lcd reset */
   8289 		if (sc->sc_type >= WM_T_PCH) {
   8290 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8291 			    BM_PORT_GEN_CFG);
   8292 			reg &= ~BM_WUC_HOST_WU_BIT;
   8293 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8294 			    BM_PORT_GEN_CFG, reg);
   8295 		}
   8296 
   8297 		/*
   8298 		 * XXX Configure the LCD with th extended configuration region
   8299 		 * in NVM
   8300 		 */
   8301 
   8302 		/* Disable D0 LPLU. */
   8303 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8304 			wm_lplu_d0_disable_pch(sc);
   8305 		else
   8306 			wm_lplu_d0_disable(sc);	/* ICH* */
   8307 		break;
   8308 	default:
   8309 		panic("%s: unknown type\n", __func__);
   8310 		break;
   8311 	}
   8312 }
   8313 
   8314 /*
   8315  * wm_get_phy_id_82575:
   8316  *
   8317  * Return PHY ID. Return -1 if it failed.
   8318  */
   8319 static int
   8320 wm_get_phy_id_82575(struct wm_softc *sc)
   8321 {
   8322 	uint32_t reg;
   8323 	int phyid = -1;
   8324 
   8325 	/* XXX */
   8326 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8327 		return -1;
   8328 
   8329 	if (wm_sgmii_uses_mdio(sc)) {
   8330 		switch (sc->sc_type) {
   8331 		case WM_T_82575:
   8332 		case WM_T_82576:
   8333 			reg = CSR_READ(sc, WMREG_MDIC);
   8334 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8335 			break;
   8336 		case WM_T_82580:
   8337 		case WM_T_I350:
   8338 		case WM_T_I354:
   8339 		case WM_T_I210:
   8340 		case WM_T_I211:
   8341 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8342 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8343 			break;
   8344 		default:
   8345 			return -1;
   8346 		}
   8347 	}
   8348 
   8349 	return phyid;
   8350 }
   8351 
   8352 
   8353 /*
   8354  * wm_gmii_mediainit:
   8355  *
   8356  *	Initialize media for use on 1000BASE-T devices.
   8357  */
   8358 static void
   8359 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8360 {
   8361 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8362 	struct mii_data *mii = &sc->sc_mii;
   8363 	uint32_t reg;
   8364 
   8365 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8366 		device_xname(sc->sc_dev), __func__));
   8367 
   8368 	/* We have GMII. */
   8369 	sc->sc_flags |= WM_F_HAS_MII;
   8370 
   8371 	if (sc->sc_type == WM_T_80003)
   8372 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8373 	else
   8374 		sc->sc_tipg = TIPG_1000T_DFLT;
   8375 
   8376 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8377 	if ((sc->sc_type == WM_T_82580)
   8378 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8379 	    || (sc->sc_type == WM_T_I211)) {
   8380 		reg = CSR_READ(sc, WMREG_PHPM);
   8381 		reg &= ~PHPM_GO_LINK_D;
   8382 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8383 	}
   8384 
   8385 	/*
   8386 	 * Let the chip set speed/duplex on its own based on
   8387 	 * signals from the PHY.
   8388 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8389 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8390 	 */
   8391 	sc->sc_ctrl |= CTRL_SLU;
   8392 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8393 
   8394 	/* Initialize our media structures and probe the GMII. */
   8395 	mii->mii_ifp = ifp;
   8396 
   8397 	/*
   8398 	 * Determine the PHY access method.
   8399 	 *
   8400 	 *  For SGMII, use SGMII specific method.
   8401 	 *
   8402 	 *  For some devices, we can determine the PHY access method
   8403 	 * from sc_type.
   8404 	 *
   8405 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   8406 	 * access  method by sc_type, so use the PCI product ID for some
   8407 	 * devices.
   8408 	 * For other ICH8 variants, try to use igp's method. If the PHY
   8409 	 * can't detect, then use bm's method.
   8410 	 */
   8411 	switch (prodid) {
   8412 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   8413 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   8414 		/* 82577 */
   8415 		sc->sc_phytype = WMPHY_82577;
   8416 		break;
   8417 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   8418 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   8419 		/* 82578 */
   8420 		sc->sc_phytype = WMPHY_82578;
   8421 		break;
   8422 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8423 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8424 		/* 82579 */
   8425 		sc->sc_phytype = WMPHY_82579;
   8426 		break;
   8427 	case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8428 	case PCI_PRODUCT_INTEL_82801I_BM:
   8429 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8430 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8431 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8432 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8433 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8434 		/* ICH8, 9, 10 with 82567 */
   8435 		sc->sc_phytype = WMPHY_BM;
   8436 		mii->mii_readreg = wm_gmii_bm_readreg;
   8437 		mii->mii_writereg = wm_gmii_bm_writereg;
   8438 		break;
   8439 	default:
   8440 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   8441 		    && !wm_sgmii_uses_mdio(sc)){
   8442 			/* SGMII */
   8443 			mii->mii_readreg = wm_sgmii_readreg;
   8444 			mii->mii_writereg = wm_sgmii_writereg;
   8445 		} else if ((sc->sc_type == WM_T_82574)
   8446 		    || (sc->sc_type == WM_T_82583)) {
   8447 			/* BM2 (phyaddr == 1) */
   8448 			sc->sc_phytype = WMPHY_BM;
   8449 			mii->mii_readreg = wm_gmii_bm_readreg;
   8450 			mii->mii_writereg = wm_gmii_bm_writereg;
   8451 		} else if (sc->sc_type >= WM_T_ICH8) {
   8452 			/* non-82567 ICH8, 9 and 10 */
   8453 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8454 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8455 		} else if (sc->sc_type >= WM_T_80003) {
   8456 			/* 80003 */
   8457 			sc->sc_phytype = WMPHY_GG82563;
   8458 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8459 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8460 		} else if (sc->sc_type >= WM_T_I210) {
   8461 			/* I210 and I211 */
   8462 			sc->sc_phytype = WMPHY_210;
   8463 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8464 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8465 		} else if (sc->sc_type >= WM_T_82580) {
   8466 			/* 82580, I350 and I354 */
   8467 			sc->sc_phytype = WMPHY_82580;
   8468 			mii->mii_readreg = wm_gmii_82580_readreg;
   8469 			mii->mii_writereg = wm_gmii_82580_writereg;
   8470 		} else if (sc->sc_type >= WM_T_82544) {
   8471 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8472 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8473 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8474 		} else {
   8475 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8476 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8477 		}
   8478 		break;
   8479 	}
   8480 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8481 		/* All PCH* use _hv_ */
   8482 		mii->mii_readreg = wm_gmii_hv_readreg;
   8483 		mii->mii_writereg = wm_gmii_hv_writereg;
   8484 	}
   8485 	mii->mii_statchg = wm_gmii_statchg;
   8486 
   8487 	/* get PHY control from SMBus to PCIe */
   8488 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   8489 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   8490 		wm_smbustopci(sc);
   8491 
   8492 	wm_gmii_reset(sc);
   8493 
   8494 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8495 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8496 	    wm_gmii_mediastatus);
   8497 
   8498 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8499 	    || (sc->sc_type == WM_T_82580)
   8500 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8501 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8502 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8503 			/* Attach only one port */
   8504 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8505 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8506 		} else {
   8507 			int i, id;
   8508 			uint32_t ctrl_ext;
   8509 
   8510 			id = wm_get_phy_id_82575(sc);
   8511 			if (id != -1) {
   8512 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8513 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8514 			}
   8515 			if ((id == -1)
   8516 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8517 				/* Power on sgmii phy if it is disabled */
   8518 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8519 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8520 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8521 				CSR_WRITE_FLUSH(sc);
   8522 				delay(300*1000); /* XXX too long */
   8523 
   8524 				/* from 1 to 8 */
   8525 				for (i = 1; i < 8; i++)
   8526 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8527 					    0xffffffff, i, MII_OFFSET_ANY,
   8528 					    MIIF_DOPAUSE);
   8529 
   8530 				/* restore previous sfp cage power state */
   8531 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8532 			}
   8533 		}
   8534 	} else {
   8535 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8536 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8537 	}
   8538 
   8539 	/*
   8540 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8541 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8542 	 */
   8543 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8544 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8545 		wm_set_mdio_slow_mode_hv(sc);
   8546 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8547 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8548 	}
   8549 
   8550 	/*
   8551 	 * (For ICH8 variants)
   8552 	 * If PHY detection failed, use BM's r/w function and retry.
   8553 	 */
   8554 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8555 		/* if failed, retry with *_bm_* */
   8556 		mii->mii_readreg = wm_gmii_bm_readreg;
   8557 		mii->mii_writereg = wm_gmii_bm_writereg;
   8558 
   8559 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8560 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8561 	}
   8562 
   8563 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8564 		/* Any PHY wasn't find */
   8565 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8566 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8567 		sc->sc_phytype = WMPHY_NONE;
   8568 	} else {
   8569 		/*
   8570 		 * PHY Found!
   8571 		 * Check PHY type.
   8572 		 */
   8573 		uint32_t model;
   8574 		struct mii_softc *child;
   8575 
   8576 		child = LIST_FIRST(&mii->mii_phys);
   8577 		model = child->mii_mpd_model;
   8578 		if (model == MII_MODEL_yyINTEL_I82566)
   8579 			sc->sc_phytype = WMPHY_IGP_3;
   8580 
   8581 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8582 	}
   8583 }
   8584 
   8585 /*
   8586  * wm_gmii_mediachange:	[ifmedia interface function]
   8587  *
   8588  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8589  */
   8590 static int
   8591 wm_gmii_mediachange(struct ifnet *ifp)
   8592 {
   8593 	struct wm_softc *sc = ifp->if_softc;
   8594 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8595 	int rc;
   8596 
   8597 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8598 		device_xname(sc->sc_dev), __func__));
   8599 	if ((ifp->if_flags & IFF_UP) == 0)
   8600 		return 0;
   8601 
   8602 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8603 	sc->sc_ctrl |= CTRL_SLU;
   8604 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8605 	    || (sc->sc_type > WM_T_82543)) {
   8606 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8607 	} else {
   8608 		sc->sc_ctrl &= ~CTRL_ASDE;
   8609 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8610 		if (ife->ifm_media & IFM_FDX)
   8611 			sc->sc_ctrl |= CTRL_FD;
   8612 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8613 		case IFM_10_T:
   8614 			sc->sc_ctrl |= CTRL_SPEED_10;
   8615 			break;
   8616 		case IFM_100_TX:
   8617 			sc->sc_ctrl |= CTRL_SPEED_100;
   8618 			break;
   8619 		case IFM_1000_T:
   8620 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8621 			break;
   8622 		default:
   8623 			panic("wm_gmii_mediachange: bad media 0x%x",
   8624 			    ife->ifm_media);
   8625 		}
   8626 	}
   8627 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8628 	if (sc->sc_type <= WM_T_82543)
   8629 		wm_gmii_reset(sc);
   8630 
   8631 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8632 		return 0;
   8633 	return rc;
   8634 }
   8635 
   8636 /*
   8637  * wm_gmii_mediastatus:	[ifmedia interface function]
   8638  *
   8639  *	Get the current interface media status on a 1000BASE-T device.
   8640  */
   8641 static void
   8642 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8643 {
   8644 	struct wm_softc *sc = ifp->if_softc;
   8645 
   8646 	ether_mediastatus(ifp, ifmr);
   8647 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8648 	    | sc->sc_flowflags;
   8649 }
   8650 
   8651 #define	MDI_IO		CTRL_SWDPIN(2)
   8652 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8653 #define	MDI_CLK		CTRL_SWDPIN(3)
   8654 
   8655 static void
   8656 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8657 {
   8658 	uint32_t i, v;
   8659 
   8660 	v = CSR_READ(sc, WMREG_CTRL);
   8661 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8662 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8663 
   8664 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8665 		if (data & i)
   8666 			v |= MDI_IO;
   8667 		else
   8668 			v &= ~MDI_IO;
   8669 		CSR_WRITE(sc, WMREG_CTRL, v);
   8670 		CSR_WRITE_FLUSH(sc);
   8671 		delay(10);
   8672 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8673 		CSR_WRITE_FLUSH(sc);
   8674 		delay(10);
   8675 		CSR_WRITE(sc, WMREG_CTRL, v);
   8676 		CSR_WRITE_FLUSH(sc);
   8677 		delay(10);
   8678 	}
   8679 }
   8680 
   8681 static uint32_t
   8682 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8683 {
   8684 	uint32_t v, i, data = 0;
   8685 
   8686 	v = CSR_READ(sc, WMREG_CTRL);
   8687 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8688 	v |= CTRL_SWDPIO(3);
   8689 
   8690 	CSR_WRITE(sc, WMREG_CTRL, v);
   8691 	CSR_WRITE_FLUSH(sc);
   8692 	delay(10);
   8693 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8694 	CSR_WRITE_FLUSH(sc);
   8695 	delay(10);
   8696 	CSR_WRITE(sc, WMREG_CTRL, v);
   8697 	CSR_WRITE_FLUSH(sc);
   8698 	delay(10);
   8699 
   8700 	for (i = 0; i < 16; i++) {
   8701 		data <<= 1;
   8702 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8703 		CSR_WRITE_FLUSH(sc);
   8704 		delay(10);
   8705 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8706 			data |= 1;
   8707 		CSR_WRITE(sc, WMREG_CTRL, v);
   8708 		CSR_WRITE_FLUSH(sc);
   8709 		delay(10);
   8710 	}
   8711 
   8712 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8713 	CSR_WRITE_FLUSH(sc);
   8714 	delay(10);
   8715 	CSR_WRITE(sc, WMREG_CTRL, v);
   8716 	CSR_WRITE_FLUSH(sc);
   8717 	delay(10);
   8718 
   8719 	return data;
   8720 }
   8721 
   8722 #undef MDI_IO
   8723 #undef MDI_DIR
   8724 #undef MDI_CLK
   8725 
   8726 /*
   8727  * wm_gmii_i82543_readreg:	[mii interface function]
   8728  *
   8729  *	Read a PHY register on the GMII (i82543 version).
   8730  */
   8731 static int
   8732 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8733 {
   8734 	struct wm_softc *sc = device_private(self);
   8735 	int rv;
   8736 
   8737 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8738 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8739 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8740 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8741 
   8742 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8743 	    device_xname(sc->sc_dev), phy, reg, rv));
   8744 
   8745 	return rv;
   8746 }
   8747 
   8748 /*
   8749  * wm_gmii_i82543_writereg:	[mii interface function]
   8750  *
   8751  *	Write a PHY register on the GMII (i82543 version).
   8752  */
   8753 static void
   8754 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8755 {
   8756 	struct wm_softc *sc = device_private(self);
   8757 
   8758 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8759 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8760 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8761 	    (MII_COMMAND_START << 30), 32);
   8762 }
   8763 
   8764 /*
   8765  * wm_gmii_mdic_readreg:	[mii interface function]
   8766  *
   8767  *	Read a PHY register on the GMII.
   8768  */
   8769 static int
   8770 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   8771 {
   8772 	struct wm_softc *sc = device_private(self);
   8773 	uint32_t mdic = 0;
   8774 	int i, rv;
   8775 
   8776 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8777 	    MDIC_REGADD(reg));
   8778 
   8779 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8780 		mdic = CSR_READ(sc, WMREG_MDIC);
   8781 		if (mdic & MDIC_READY)
   8782 			break;
   8783 		delay(50);
   8784 	}
   8785 
   8786 	if ((mdic & MDIC_READY) == 0) {
   8787 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8788 		    device_xname(sc->sc_dev), phy, reg);
   8789 		rv = 0;
   8790 	} else if (mdic & MDIC_E) {
   8791 #if 0 /* This is normal if no PHY is present. */
   8792 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8793 		    device_xname(sc->sc_dev), phy, reg);
   8794 #endif
   8795 		rv = 0;
   8796 	} else {
   8797 		rv = MDIC_DATA(mdic);
   8798 		if (rv == 0xffff)
   8799 			rv = 0;
   8800 	}
   8801 
   8802 	return rv;
   8803 }
   8804 
   8805 /*
   8806  * wm_gmii_mdic_writereg:	[mii interface function]
   8807  *
   8808  *	Write a PHY register on the GMII.
   8809  */
   8810 static void
   8811 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   8812 {
   8813 	struct wm_softc *sc = device_private(self);
   8814 	uint32_t mdic = 0;
   8815 	int i;
   8816 
   8817 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8818 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8819 
   8820 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8821 		mdic = CSR_READ(sc, WMREG_MDIC);
   8822 		if (mdic & MDIC_READY)
   8823 			break;
   8824 		delay(50);
   8825 	}
   8826 
   8827 	if ((mdic & MDIC_READY) == 0)
   8828 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8829 		    device_xname(sc->sc_dev), phy, reg);
   8830 	else if (mdic & MDIC_E)
   8831 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8832 		    device_xname(sc->sc_dev), phy, reg);
   8833 }
   8834 
   8835 /*
   8836  * wm_gmii_i82544_readreg:	[mii interface function]
   8837  *
   8838  *	Read a PHY register on the GMII.
   8839  */
   8840 static int
   8841 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8842 {
   8843 	struct wm_softc *sc = device_private(self);
   8844 	int rv;
   8845 
   8846 	if (sc->phy.acquire(sc)) {
   8847 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8848 		    __func__);
   8849 		return 0;
   8850 	}
   8851 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   8852 	sc->phy.release(sc);
   8853 
   8854 	return rv;
   8855 }
   8856 
   8857 /*
   8858  * wm_gmii_i82544_writereg:	[mii interface function]
   8859  *
   8860  *	Write a PHY register on the GMII.
   8861  */
   8862 static void
   8863 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8864 {
   8865 	struct wm_softc *sc = device_private(self);
   8866 
   8867 	if (sc->phy.acquire(sc)) {
   8868 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8869 		    __func__);
   8870 	}
   8871 	wm_gmii_mdic_writereg(self, phy, reg, val);
   8872 	sc->phy.release(sc);
   8873 }
   8874 
   8875 /*
   8876  * wm_gmii_i80003_readreg:	[mii interface function]
   8877  *
   8878  *	Read a PHY register on the kumeran
   8879  * This could be handled by the PHY layer if we didn't have to lock the
   8880  * ressource ...
   8881  */
   8882 static int
   8883 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8884 {
   8885 	struct wm_softc *sc = device_private(self);
   8886 	int rv;
   8887 
   8888 	if (phy != 1) /* only one PHY on kumeran bus */
   8889 		return 0;
   8890 
   8891 	if (sc->phy.acquire(sc)) {
   8892 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8893 		    __func__);
   8894 		return 0;
   8895 	}
   8896 
   8897 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   8898 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8899 		    reg >> GG82563_PAGE_SHIFT);
   8900 	} else {
   8901 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8902 		    reg >> GG82563_PAGE_SHIFT);
   8903 	}
   8904 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8905 	delay(200);
   8906 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   8907 	delay(200);
   8908 	sc->phy.release(sc);
   8909 
   8910 	return rv;
   8911 }
   8912 
   8913 /*
   8914  * wm_gmii_i80003_writereg:	[mii interface function]
   8915  *
   8916  *	Write a PHY register on the kumeran.
   8917  * This could be handled by the PHY layer if we didn't have to lock the
   8918  * ressource ...
   8919  */
   8920 static void
   8921 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8922 {
   8923 	struct wm_softc *sc = device_private(self);
   8924 
   8925 	if (phy != 1) /* only one PHY on kumeran bus */
   8926 		return;
   8927 
   8928 	if (sc->phy.acquire(sc)) {
   8929 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8930 		    __func__);
   8931 		return;
   8932 	}
   8933 
   8934 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   8935 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8936 		    reg >> GG82563_PAGE_SHIFT);
   8937 	} else {
   8938 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8939 		    reg >> GG82563_PAGE_SHIFT);
   8940 	}
   8941 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8942 	delay(200);
   8943 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   8944 	delay(200);
   8945 
   8946 	sc->phy.release(sc);
   8947 }
   8948 
   8949 /*
   8950  * wm_gmii_bm_readreg:	[mii interface function]
   8951  *
   8952  *	Read a PHY register on the kumeran
   8953  * This could be handled by the PHY layer if we didn't have to lock the
   8954  * ressource ...
   8955  */
   8956 static int
   8957 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8958 {
   8959 	struct wm_softc *sc = device_private(self);
   8960 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   8961 	uint16_t val;
   8962 	int rv;
   8963 
   8964 	if (sc->phy.acquire(sc)) {
   8965 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8966 		    __func__);
   8967 		return 0;
   8968 	}
   8969 
   8970 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   8971 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   8972 		    || (reg == 31)) ? 1 : phy;
   8973 	/* Page 800 works differently than the rest so it has its own func */
   8974 	if (page == BM_WUC_PAGE) {
   8975 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8976 		rv = val;
   8977 		goto release;
   8978 	}
   8979 
   8980 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8981 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   8982 		    && (sc->sc_type != WM_T_82583))
   8983 			wm_gmii_mdic_writereg(self, phy,
   8984 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   8985 		else
   8986 			wm_gmii_mdic_writereg(self, phy,
   8987 			    BME1000_PHY_PAGE_SELECT, page);
   8988 	}
   8989 
   8990 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   8991 
   8992 release:
   8993 	sc->phy.release(sc);
   8994 	return rv;
   8995 }
   8996 
   8997 /*
   8998  * wm_gmii_bm_writereg:	[mii interface function]
   8999  *
   9000  *	Write a PHY register on the kumeran.
   9001  * This could be handled by the PHY layer if we didn't have to lock the
   9002  * ressource ...
   9003  */
   9004 static void
   9005 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   9006 {
   9007 	struct wm_softc *sc = device_private(self);
   9008 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9009 
   9010 	if (sc->phy.acquire(sc)) {
   9011 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9012 		    __func__);
   9013 		return;
   9014 	}
   9015 
   9016 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9017 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9018 		    || (reg == 31)) ? 1 : phy;
   9019 	/* Page 800 works differently than the rest so it has its own func */
   9020 	if (page == BM_WUC_PAGE) {
   9021 		uint16_t tmp;
   9022 
   9023 		tmp = val;
   9024 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9025 		goto release;
   9026 	}
   9027 
   9028 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9029 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9030 		    && (sc->sc_type != WM_T_82583))
   9031 			wm_gmii_mdic_writereg(self, phy,
   9032 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9033 		else
   9034 			wm_gmii_mdic_writereg(self, phy,
   9035 			    BME1000_PHY_PAGE_SELECT, page);
   9036 	}
   9037 
   9038 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9039 
   9040 release:
   9041 	sc->phy.release(sc);
   9042 }
   9043 
   9044 static void
   9045 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   9046 {
   9047 	struct wm_softc *sc = device_private(self);
   9048 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9049 	uint16_t wuce, reg;
   9050 
   9051 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9052 		device_xname(sc->sc_dev), __func__));
   9053 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9054 	if (sc->sc_type == WM_T_PCH) {
   9055 		/* XXX e1000 driver do nothing... why? */
   9056 	}
   9057 
   9058 	/*
   9059 	 * 1) Enable PHY wakeup register first.
   9060 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9061 	 */
   9062 
   9063 	/* Set page 769 */
   9064 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9065 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9066 
   9067 	/* Read WUCE and save it */
   9068 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9069 
   9070 	reg = wuce | BM_WUC_ENABLE_BIT;
   9071 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9072 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9073 
   9074 	/* Select page 800 */
   9075 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9076 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9077 
   9078 	/*
   9079 	 * 2) Access PHY wakeup register.
   9080 	 * See e1000_access_phy_wakeup_reg_bm.
   9081 	 */
   9082 
   9083 	/* Write page 800 */
   9084 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9085 
   9086 	if (rd)
   9087 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9088 	else
   9089 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9090 
   9091 	/*
   9092 	 * 3) Disable PHY wakeup register.
   9093 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9094 	 */
   9095 	/* Set page 769 */
   9096 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9097 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9098 
   9099 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9100 }
   9101 
   9102 /*
   9103  * wm_gmii_hv_readreg:	[mii interface function]
   9104  *
   9105  *	Read a PHY register on the kumeran
   9106  * This could be handled by the PHY layer if we didn't have to lock the
   9107  * ressource ...
   9108  */
   9109 static int
   9110 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9111 {
   9112 	struct wm_softc *sc = device_private(self);
   9113 	int rv;
   9114 
   9115 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9116 		device_xname(sc->sc_dev), __func__));
   9117 	if (sc->phy.acquire(sc)) {
   9118 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9119 		    __func__);
   9120 		return 0;
   9121 	}
   9122 
   9123 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9124 	sc->phy.release(sc);
   9125 	return rv;
   9126 }
   9127 
   9128 static int
   9129 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9130 {
   9131 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9132 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9133 	uint16_t val;
   9134 	int rv;
   9135 
   9136 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9137 
   9138 	/* Page 800 works differently than the rest so it has its own func */
   9139 	if (page == BM_WUC_PAGE) {
   9140 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9141 		return val;
   9142 	}
   9143 
   9144 	/*
   9145 	 * Lower than page 768 works differently than the rest so it has its
   9146 	 * own func
   9147 	 */
   9148 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9149 		printf("gmii_hv_readreg!!!\n");
   9150 		return 0;
   9151 	}
   9152 
   9153 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9154 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9155 		    page << BME1000_PAGE_SHIFT);
   9156 	}
   9157 
   9158 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9159 	return rv;
   9160 }
   9161 
   9162 /*
   9163  * wm_gmii_hv_writereg:	[mii interface function]
   9164  *
   9165  *	Write a PHY register on the kumeran.
   9166  * This could be handled by the PHY layer if we didn't have to lock the
   9167  * ressource ...
   9168  */
   9169 static void
   9170 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9171 {
   9172 	struct wm_softc *sc = device_private(self);
   9173 
   9174 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9175 		device_xname(sc->sc_dev), __func__));
   9176 
   9177 	if (sc->phy.acquire(sc)) {
   9178 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9179 		    __func__);
   9180 		return;
   9181 	}
   9182 
   9183 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   9184 	sc->phy.release(sc);
   9185 }
   9186 
   9187 static void
   9188 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   9189 {
   9190 	struct wm_softc *sc = device_private(self);
   9191 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9192 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9193 
   9194 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9195 
   9196 	/* Page 800 works differently than the rest so it has its own func */
   9197 	if (page == BM_WUC_PAGE) {
   9198 		uint16_t tmp;
   9199 
   9200 		tmp = val;
   9201 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9202 		return;
   9203 	}
   9204 
   9205 	/*
   9206 	 * Lower than page 768 works differently than the rest so it has its
   9207 	 * own func
   9208 	 */
   9209 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9210 		printf("gmii_hv_writereg!!!\n");
   9211 		return;
   9212 	}
   9213 
   9214 	{
   9215 		/*
   9216 		 * XXX Workaround MDIO accesses being disabled after entering
   9217 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9218 		 * register is set)
   9219 		 */
   9220 		if (sc->sc_phytype == WMPHY_82578) {
   9221 			struct mii_softc *child;
   9222 
   9223 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9224 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   9225 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   9226 			    && ((val & (1 << 11)) != 0)) {
   9227 				printf("XXX need workaround\n");
   9228 			}
   9229 		}
   9230 
   9231 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9232 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9233 			    page << BME1000_PAGE_SHIFT);
   9234 		}
   9235 	}
   9236 
   9237 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   9238 }
   9239 
   9240 /*
   9241  * wm_gmii_82580_readreg:	[mii interface function]
   9242  *
   9243  *	Read a PHY register on the 82580 and I350.
   9244  * This could be handled by the PHY layer if we didn't have to lock the
   9245  * ressource ...
   9246  */
   9247 static int
   9248 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   9249 {
   9250 	struct wm_softc *sc = device_private(self);
   9251 	int rv;
   9252 
   9253 	if (sc->phy.acquire(sc) != 0) {
   9254 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9255 		    __func__);
   9256 		return 0;
   9257 	}
   9258 
   9259 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9260 
   9261 	sc->phy.release(sc);
   9262 	return rv;
   9263 }
   9264 
   9265 /*
   9266  * wm_gmii_82580_writereg:	[mii interface function]
   9267  *
   9268  *	Write a PHY register on the 82580 and I350.
   9269  * This could be handled by the PHY layer if we didn't have to lock the
   9270  * ressource ...
   9271  */
   9272 static void
   9273 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   9274 {
   9275 	struct wm_softc *sc = device_private(self);
   9276 
   9277 	if (sc->phy.acquire(sc) != 0) {
   9278 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9279 		    __func__);
   9280 		return;
   9281 	}
   9282 
   9283 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9284 
   9285 	sc->phy.release(sc);
   9286 }
   9287 
   9288 /*
   9289  * wm_gmii_gs40g_readreg:	[mii interface function]
   9290  *
   9291  *	Read a PHY register on the I2100 and I211.
   9292  * This could be handled by the PHY layer if we didn't have to lock the
   9293  * ressource ...
   9294  */
   9295 static int
   9296 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   9297 {
   9298 	struct wm_softc *sc = device_private(self);
   9299 	int page, offset;
   9300 	int rv;
   9301 
   9302 	/* Acquire semaphore */
   9303 	if (sc->phy.acquire(sc)) {
   9304 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9305 		    __func__);
   9306 		return 0;
   9307 	}
   9308 
   9309 	/* Page select */
   9310 	page = reg >> GS40G_PAGE_SHIFT;
   9311 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9312 
   9313 	/* Read reg */
   9314 	offset = reg & GS40G_OFFSET_MASK;
   9315 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   9316 
   9317 	sc->phy.release(sc);
   9318 	return rv;
   9319 }
   9320 
   9321 /*
   9322  * wm_gmii_gs40g_writereg:	[mii interface function]
   9323  *
   9324  *	Write a PHY register on the I210 and I211.
   9325  * This could be handled by the PHY layer if we didn't have to lock the
   9326  * ressource ...
   9327  */
   9328 static void
   9329 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   9330 {
   9331 	struct wm_softc *sc = device_private(self);
   9332 	int page, offset;
   9333 
   9334 	/* Acquire semaphore */
   9335 	if (sc->phy.acquire(sc)) {
   9336 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9337 		    __func__);
   9338 		return;
   9339 	}
   9340 
   9341 	/* Page select */
   9342 	page = reg >> GS40G_PAGE_SHIFT;
   9343 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9344 
   9345 	/* Write reg */
   9346 	offset = reg & GS40G_OFFSET_MASK;
   9347 	wm_gmii_mdic_writereg(self, phy, offset, val);
   9348 
   9349 	/* Release semaphore */
   9350 	sc->phy.release(sc);
   9351 }
   9352 
   9353 /*
   9354  * wm_gmii_statchg:	[mii interface function]
   9355  *
   9356  *	Callback from MII layer when media changes.
   9357  */
   9358 static void
   9359 wm_gmii_statchg(struct ifnet *ifp)
   9360 {
   9361 	struct wm_softc *sc = ifp->if_softc;
   9362 	struct mii_data *mii = &sc->sc_mii;
   9363 
   9364 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   9365 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9366 	sc->sc_fcrtl &= ~FCRTL_XONE;
   9367 
   9368 	/*
   9369 	 * Get flow control negotiation result.
   9370 	 */
   9371 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   9372 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   9373 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   9374 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   9375 	}
   9376 
   9377 	if (sc->sc_flowflags & IFM_FLOW) {
   9378 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   9379 			sc->sc_ctrl |= CTRL_TFCE;
   9380 			sc->sc_fcrtl |= FCRTL_XONE;
   9381 		}
   9382 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   9383 			sc->sc_ctrl |= CTRL_RFCE;
   9384 	}
   9385 
   9386 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   9387 		DPRINTF(WM_DEBUG_LINK,
   9388 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   9389 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9390 	} else {
   9391 		DPRINTF(WM_DEBUG_LINK,
   9392 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   9393 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9394 	}
   9395 
   9396 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9397 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9398 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   9399 						 : WMREG_FCRTL, sc->sc_fcrtl);
   9400 	if (sc->sc_type == WM_T_80003) {
   9401 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   9402 		case IFM_1000_T:
   9403 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9404 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   9405 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9406 			break;
   9407 		default:
   9408 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9409 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   9410 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   9411 			break;
   9412 		}
   9413 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   9414 	}
   9415 }
   9416 
   9417 /* kumeran related (80003, ICH* and PCH*) */
   9418 
   9419 /*
   9420  * wm_kmrn_readreg:
   9421  *
   9422  *	Read a kumeran register
   9423  */
   9424 static int
   9425 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   9426 {
   9427 	int rv;
   9428 
   9429 	if (sc->sc_type == WM_T_80003)
   9430 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9431 	else
   9432 		rv = sc->phy.acquire(sc);
   9433 	if (rv != 0) {
   9434 		aprint_error_dev(sc->sc_dev,
   9435 		    "%s: failed to get semaphore\n", __func__);
   9436 		return 0;
   9437 	}
   9438 
   9439 	rv = wm_kmrn_readreg_locked(sc, reg);
   9440 
   9441 	if (sc->sc_type == WM_T_80003)
   9442 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9443 	else
   9444 		sc->phy.release(sc);
   9445 
   9446 	return rv;
   9447 }
   9448 
   9449 static int
   9450 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   9451 {
   9452 	int rv;
   9453 
   9454 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9455 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9456 	    KUMCTRLSTA_REN);
   9457 	CSR_WRITE_FLUSH(sc);
   9458 	delay(2);
   9459 
   9460 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9461 
   9462 	return rv;
   9463 }
   9464 
   9465 /*
   9466  * wm_kmrn_writereg:
   9467  *
   9468  *	Write a kumeran register
   9469  */
   9470 static void
   9471 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9472 {
   9473 	int rv;
   9474 
   9475 	if (sc->sc_type == WM_T_80003)
   9476 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9477 	else
   9478 		rv = sc->phy.acquire(sc);
   9479 	if (rv != 0) {
   9480 		aprint_error_dev(sc->sc_dev,
   9481 		    "%s: failed to get semaphore\n", __func__);
   9482 		return;
   9483 	}
   9484 
   9485 	wm_kmrn_writereg_locked(sc, reg, val);
   9486 
   9487 	if (sc->sc_type == WM_T_80003)
   9488 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9489 	else
   9490 		sc->phy.release(sc);
   9491 }
   9492 
   9493 static void
   9494 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   9495 {
   9496 
   9497 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9498 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9499 	    (val & KUMCTRLSTA_MASK));
   9500 }
   9501 
   9502 /* SGMII related */
   9503 
   9504 /*
   9505  * wm_sgmii_uses_mdio
   9506  *
   9507  * Check whether the transaction is to the internal PHY or the external
   9508  * MDIO interface. Return true if it's MDIO.
   9509  */
   9510 static bool
   9511 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9512 {
   9513 	uint32_t reg;
   9514 	bool ismdio = false;
   9515 
   9516 	switch (sc->sc_type) {
   9517 	case WM_T_82575:
   9518 	case WM_T_82576:
   9519 		reg = CSR_READ(sc, WMREG_MDIC);
   9520 		ismdio = ((reg & MDIC_DEST) != 0);
   9521 		break;
   9522 	case WM_T_82580:
   9523 	case WM_T_I350:
   9524 	case WM_T_I354:
   9525 	case WM_T_I210:
   9526 	case WM_T_I211:
   9527 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9528 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9529 		break;
   9530 	default:
   9531 		break;
   9532 	}
   9533 
   9534 	return ismdio;
   9535 }
   9536 
   9537 /*
   9538  * wm_sgmii_readreg:	[mii interface function]
   9539  *
   9540  *	Read a PHY register on the SGMII
   9541  * This could be handled by the PHY layer if we didn't have to lock the
   9542  * ressource ...
   9543  */
   9544 static int
   9545 wm_sgmii_readreg(device_t self, int phy, int reg)
   9546 {
   9547 	struct wm_softc *sc = device_private(self);
   9548 	uint32_t i2ccmd;
   9549 	int i, rv;
   9550 
   9551 	if (sc->phy.acquire(sc)) {
   9552 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9553 		    __func__);
   9554 		return 0;
   9555 	}
   9556 
   9557 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9558 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9559 	    | I2CCMD_OPCODE_READ;
   9560 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9561 
   9562 	/* Poll the ready bit */
   9563 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9564 		delay(50);
   9565 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9566 		if (i2ccmd & I2CCMD_READY)
   9567 			break;
   9568 	}
   9569 	if ((i2ccmd & I2CCMD_READY) == 0)
   9570 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9571 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9572 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9573 
   9574 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9575 
   9576 	sc->phy.release(sc);
   9577 	return rv;
   9578 }
   9579 
   9580 /*
   9581  * wm_sgmii_writereg:	[mii interface function]
   9582  *
   9583  *	Write a PHY register on the SGMII.
   9584  * This could be handled by the PHY layer if we didn't have to lock the
   9585  * ressource ...
   9586  */
   9587 static void
   9588 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9589 {
   9590 	struct wm_softc *sc = device_private(self);
   9591 	uint32_t i2ccmd;
   9592 	int i;
   9593 	int val_swapped;
   9594 
   9595 	if (sc->phy.acquire(sc) != 0) {
   9596 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9597 		    __func__);
   9598 		return;
   9599 	}
   9600 	/* Swap the data bytes for the I2C interface */
   9601 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9602 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9603 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9604 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9605 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9606 
   9607 	/* Poll the ready bit */
   9608 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9609 		delay(50);
   9610 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9611 		if (i2ccmd & I2CCMD_READY)
   9612 			break;
   9613 	}
   9614 	if ((i2ccmd & I2CCMD_READY) == 0)
   9615 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9616 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9617 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9618 
   9619 	sc->phy.release(sc);
   9620 }
   9621 
   9622 /* TBI related */
   9623 
   9624 /*
   9625  * wm_tbi_mediainit:
   9626  *
   9627  *	Initialize media for use on 1000BASE-X devices.
   9628  */
   9629 static void
   9630 wm_tbi_mediainit(struct wm_softc *sc)
   9631 {
   9632 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9633 	const char *sep = "";
   9634 
   9635 	if (sc->sc_type < WM_T_82543)
   9636 		sc->sc_tipg = TIPG_WM_DFLT;
   9637 	else
   9638 		sc->sc_tipg = TIPG_LG_DFLT;
   9639 
   9640 	sc->sc_tbi_serdes_anegticks = 5;
   9641 
   9642 	/* Initialize our media structures */
   9643 	sc->sc_mii.mii_ifp = ifp;
   9644 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9645 
   9646 	if ((sc->sc_type >= WM_T_82575)
   9647 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9648 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9649 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9650 	else
   9651 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9652 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9653 
   9654 	/*
   9655 	 * SWD Pins:
   9656 	 *
   9657 	 *	0 = Link LED (output)
   9658 	 *	1 = Loss Of Signal (input)
   9659 	 */
   9660 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9661 
   9662 	/* XXX Perhaps this is only for TBI */
   9663 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9664 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9665 
   9666 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9667 		sc->sc_ctrl &= ~CTRL_LRST;
   9668 
   9669 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9670 
   9671 #define	ADD(ss, mm, dd)							\
   9672 do {									\
   9673 	aprint_normal("%s%s", sep, ss);					\
   9674 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9675 	sep = ", ";							\
   9676 } while (/*CONSTCOND*/0)
   9677 
   9678 	aprint_normal_dev(sc->sc_dev, "");
   9679 
   9680 	/* Only 82545 is LX */
   9681 	if (sc->sc_type == WM_T_82545) {
   9682 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9683 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9684 	} else {
   9685 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9686 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9687 	}
   9688 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9689 	aprint_normal("\n");
   9690 
   9691 #undef ADD
   9692 
   9693 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9694 }
   9695 
   9696 /*
   9697  * wm_tbi_mediachange:	[ifmedia interface function]
   9698  *
   9699  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9700  */
   9701 static int
   9702 wm_tbi_mediachange(struct ifnet *ifp)
   9703 {
   9704 	struct wm_softc *sc = ifp->if_softc;
   9705 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9706 	uint32_t status;
   9707 	int i;
   9708 
   9709 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9710 		/* XXX need some work for >= 82571 and < 82575 */
   9711 		if (sc->sc_type < WM_T_82575)
   9712 			return 0;
   9713 	}
   9714 
   9715 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9716 	    || (sc->sc_type >= WM_T_82575))
   9717 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9718 
   9719 	sc->sc_ctrl &= ~CTRL_LRST;
   9720 	sc->sc_txcw = TXCW_ANE;
   9721 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9722 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9723 	else if (ife->ifm_media & IFM_FDX)
   9724 		sc->sc_txcw |= TXCW_FD;
   9725 	else
   9726 		sc->sc_txcw |= TXCW_HD;
   9727 
   9728 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9729 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9730 
   9731 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9732 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9733 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9734 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9735 	CSR_WRITE_FLUSH(sc);
   9736 	delay(1000);
   9737 
   9738 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9739 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9740 
   9741 	/*
   9742 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9743 	 * optics detect a signal, 0 if they don't.
   9744 	 */
   9745 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9746 		/* Have signal; wait for the link to come up. */
   9747 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9748 			delay(10000);
   9749 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9750 				break;
   9751 		}
   9752 
   9753 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9754 			    device_xname(sc->sc_dev),i));
   9755 
   9756 		status = CSR_READ(sc, WMREG_STATUS);
   9757 		DPRINTF(WM_DEBUG_LINK,
   9758 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9759 			device_xname(sc->sc_dev),status, STATUS_LU));
   9760 		if (status & STATUS_LU) {
   9761 			/* Link is up. */
   9762 			DPRINTF(WM_DEBUG_LINK,
   9763 			    ("%s: LINK: set media -> link up %s\n",
   9764 			    device_xname(sc->sc_dev),
   9765 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9766 
   9767 			/*
   9768 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9769 			 * so we should update sc->sc_ctrl
   9770 			 */
   9771 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9772 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9773 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9774 			if (status & STATUS_FD)
   9775 				sc->sc_tctl |=
   9776 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9777 			else
   9778 				sc->sc_tctl |=
   9779 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9780 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9781 				sc->sc_fcrtl |= FCRTL_XONE;
   9782 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9783 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9784 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9785 				      sc->sc_fcrtl);
   9786 			sc->sc_tbi_linkup = 1;
   9787 		} else {
   9788 			if (i == WM_LINKUP_TIMEOUT)
   9789 				wm_check_for_link(sc);
   9790 			/* Link is down. */
   9791 			DPRINTF(WM_DEBUG_LINK,
   9792 			    ("%s: LINK: set media -> link down\n",
   9793 			    device_xname(sc->sc_dev)));
   9794 			sc->sc_tbi_linkup = 0;
   9795 		}
   9796 	} else {
   9797 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9798 		    device_xname(sc->sc_dev)));
   9799 		sc->sc_tbi_linkup = 0;
   9800 	}
   9801 
   9802 	wm_tbi_serdes_set_linkled(sc);
   9803 
   9804 	return 0;
   9805 }
   9806 
   9807 /*
   9808  * wm_tbi_mediastatus:	[ifmedia interface function]
   9809  *
   9810  *	Get the current interface media status on a 1000BASE-X device.
   9811  */
   9812 static void
   9813 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9814 {
   9815 	struct wm_softc *sc = ifp->if_softc;
   9816 	uint32_t ctrl, status;
   9817 
   9818 	ifmr->ifm_status = IFM_AVALID;
   9819 	ifmr->ifm_active = IFM_ETHER;
   9820 
   9821 	status = CSR_READ(sc, WMREG_STATUS);
   9822 	if ((status & STATUS_LU) == 0) {
   9823 		ifmr->ifm_active |= IFM_NONE;
   9824 		return;
   9825 	}
   9826 
   9827 	ifmr->ifm_status |= IFM_ACTIVE;
   9828 	/* Only 82545 is LX */
   9829 	if (sc->sc_type == WM_T_82545)
   9830 		ifmr->ifm_active |= IFM_1000_LX;
   9831 	else
   9832 		ifmr->ifm_active |= IFM_1000_SX;
   9833 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9834 		ifmr->ifm_active |= IFM_FDX;
   9835 	else
   9836 		ifmr->ifm_active |= IFM_HDX;
   9837 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9838 	if (ctrl & CTRL_RFCE)
   9839 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9840 	if (ctrl & CTRL_TFCE)
   9841 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9842 }
   9843 
   9844 /* XXX TBI only */
   9845 static int
   9846 wm_check_for_link(struct wm_softc *sc)
   9847 {
   9848 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9849 	uint32_t rxcw;
   9850 	uint32_t ctrl;
   9851 	uint32_t status;
   9852 	uint32_t sig;
   9853 
   9854 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9855 		/* XXX need some work for >= 82571 */
   9856 		if (sc->sc_type >= WM_T_82571) {
   9857 			sc->sc_tbi_linkup = 1;
   9858 			return 0;
   9859 		}
   9860 	}
   9861 
   9862 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9863 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9864 	status = CSR_READ(sc, WMREG_STATUS);
   9865 
   9866 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9867 
   9868 	DPRINTF(WM_DEBUG_LINK,
   9869 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9870 		device_xname(sc->sc_dev), __func__,
   9871 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9872 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   9873 
   9874 	/*
   9875 	 * SWDPIN   LU RXCW
   9876 	 *      0    0    0
   9877 	 *      0    0    1	(should not happen)
   9878 	 *      0    1    0	(should not happen)
   9879 	 *      0    1    1	(should not happen)
   9880 	 *      1    0    0	Disable autonego and force linkup
   9881 	 *      1    0    1	got /C/ but not linkup yet
   9882 	 *      1    1    0	(linkup)
   9883 	 *      1    1    1	If IFM_AUTO, back to autonego
   9884 	 *
   9885 	 */
   9886 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9887 	    && ((status & STATUS_LU) == 0)
   9888 	    && ((rxcw & RXCW_C) == 0)) {
   9889 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9890 			__func__));
   9891 		sc->sc_tbi_linkup = 0;
   9892 		/* Disable auto-negotiation in the TXCW register */
   9893 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9894 
   9895 		/*
   9896 		 * Force link-up and also force full-duplex.
   9897 		 *
   9898 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9899 		 * so we should update sc->sc_ctrl
   9900 		 */
   9901 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9902 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9903 	} else if (((status & STATUS_LU) != 0)
   9904 	    && ((rxcw & RXCW_C) != 0)
   9905 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9906 		sc->sc_tbi_linkup = 1;
   9907 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9908 			__func__));
   9909 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9910 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9911 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9912 	    && ((rxcw & RXCW_C) != 0)) {
   9913 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9914 	} else {
   9915 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9916 			status));
   9917 	}
   9918 
   9919 	return 0;
   9920 }
   9921 
   9922 /*
   9923  * wm_tbi_tick:
   9924  *
   9925  *	Check the link on TBI devices.
   9926  *	This function acts as mii_tick().
   9927  */
   9928 static void
   9929 wm_tbi_tick(struct wm_softc *sc)
   9930 {
   9931 	struct mii_data *mii = &sc->sc_mii;
   9932 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9933 	uint32_t status;
   9934 
   9935 	KASSERT(WM_CORE_LOCKED(sc));
   9936 
   9937 	status = CSR_READ(sc, WMREG_STATUS);
   9938 
   9939 	/* XXX is this needed? */
   9940 	(void)CSR_READ(sc, WMREG_RXCW);
   9941 	(void)CSR_READ(sc, WMREG_CTRL);
   9942 
   9943 	/* set link status */
   9944 	if ((status & STATUS_LU) == 0) {
   9945 		DPRINTF(WM_DEBUG_LINK,
   9946 		    ("%s: LINK: checklink -> down\n",
   9947 			device_xname(sc->sc_dev)));
   9948 		sc->sc_tbi_linkup = 0;
   9949 	} else if (sc->sc_tbi_linkup == 0) {
   9950 		DPRINTF(WM_DEBUG_LINK,
   9951 		    ("%s: LINK: checklink -> up %s\n",
   9952 			device_xname(sc->sc_dev),
   9953 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9954 		sc->sc_tbi_linkup = 1;
   9955 		sc->sc_tbi_serdes_ticks = 0;
   9956 	}
   9957 
   9958 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9959 		goto setled;
   9960 
   9961 	if ((status & STATUS_LU) == 0) {
   9962 		sc->sc_tbi_linkup = 0;
   9963 		/* If the timer expired, retry autonegotiation */
   9964 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9965 		    && (++sc->sc_tbi_serdes_ticks
   9966 			>= sc->sc_tbi_serdes_anegticks)) {
   9967 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9968 			sc->sc_tbi_serdes_ticks = 0;
   9969 			/*
   9970 			 * Reset the link, and let autonegotiation do
   9971 			 * its thing
   9972 			 */
   9973 			sc->sc_ctrl |= CTRL_LRST;
   9974 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9975 			CSR_WRITE_FLUSH(sc);
   9976 			delay(1000);
   9977 			sc->sc_ctrl &= ~CTRL_LRST;
   9978 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9979 			CSR_WRITE_FLUSH(sc);
   9980 			delay(1000);
   9981 			CSR_WRITE(sc, WMREG_TXCW,
   9982 			    sc->sc_txcw & ~TXCW_ANE);
   9983 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9984 		}
   9985 	}
   9986 
   9987 setled:
   9988 	wm_tbi_serdes_set_linkled(sc);
   9989 }
   9990 
   9991 /* SERDES related */
   9992 static void
   9993 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9994 {
   9995 	uint32_t reg;
   9996 
   9997 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9998 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9999 		return;
   10000 
   10001 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   10002 	reg |= PCS_CFG_PCS_EN;
   10003 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   10004 
   10005 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10006 	reg &= ~CTRL_EXT_SWDPIN(3);
   10007 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10008 	CSR_WRITE_FLUSH(sc);
   10009 }
   10010 
   10011 static int
   10012 wm_serdes_mediachange(struct ifnet *ifp)
   10013 {
   10014 	struct wm_softc *sc = ifp->if_softc;
   10015 	bool pcs_autoneg = true; /* XXX */
   10016 	uint32_t ctrl_ext, pcs_lctl, reg;
   10017 
   10018 	/* XXX Currently, this function is not called on 8257[12] */
   10019 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10020 	    || (sc->sc_type >= WM_T_82575))
   10021 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10022 
   10023 	wm_serdes_power_up_link_82575(sc);
   10024 
   10025 	sc->sc_ctrl |= CTRL_SLU;
   10026 
   10027 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10028 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10029 
   10030 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10031 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10032 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10033 	case CTRL_EXT_LINK_MODE_SGMII:
   10034 		pcs_autoneg = true;
   10035 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10036 		break;
   10037 	case CTRL_EXT_LINK_MODE_1000KX:
   10038 		pcs_autoneg = false;
   10039 		/* FALLTHROUGH */
   10040 	default:
   10041 		if ((sc->sc_type == WM_T_82575)
   10042 		    || (sc->sc_type == WM_T_82576)) {
   10043 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10044 				pcs_autoneg = false;
   10045 		}
   10046 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10047 		    | CTRL_FRCFDX;
   10048 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10049 	}
   10050 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10051 
   10052 	if (pcs_autoneg) {
   10053 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10054 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10055 
   10056 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10057 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10058 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10059 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10060 	} else
   10061 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10062 
   10063 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10064 
   10065 
   10066 	return 0;
   10067 }
   10068 
   10069 static void
   10070 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10071 {
   10072 	struct wm_softc *sc = ifp->if_softc;
   10073 	struct mii_data *mii = &sc->sc_mii;
   10074 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10075 	uint32_t pcs_adv, pcs_lpab, reg;
   10076 
   10077 	ifmr->ifm_status = IFM_AVALID;
   10078 	ifmr->ifm_active = IFM_ETHER;
   10079 
   10080 	/* Check PCS */
   10081 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10082 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10083 		ifmr->ifm_active |= IFM_NONE;
   10084 		sc->sc_tbi_linkup = 0;
   10085 		goto setled;
   10086 	}
   10087 
   10088 	sc->sc_tbi_linkup = 1;
   10089 	ifmr->ifm_status |= IFM_ACTIVE;
   10090 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10091 	if ((reg & PCS_LSTS_FDX) != 0)
   10092 		ifmr->ifm_active |= IFM_FDX;
   10093 	else
   10094 		ifmr->ifm_active |= IFM_HDX;
   10095 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10096 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10097 		/* Check flow */
   10098 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10099 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10100 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10101 			goto setled;
   10102 		}
   10103 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10104 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10105 		DPRINTF(WM_DEBUG_LINK,
   10106 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10107 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10108 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10109 			mii->mii_media_active |= IFM_FLOW
   10110 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10111 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10112 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10113 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10114 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10115 			mii->mii_media_active |= IFM_FLOW
   10116 			    | IFM_ETH_TXPAUSE;
   10117 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10118 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10119 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10120 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10121 			mii->mii_media_active |= IFM_FLOW
   10122 			    | IFM_ETH_RXPAUSE;
   10123 		} else {
   10124 		}
   10125 	}
   10126 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10127 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10128 setled:
   10129 	wm_tbi_serdes_set_linkled(sc);
   10130 }
   10131 
   10132 /*
   10133  * wm_serdes_tick:
   10134  *
   10135  *	Check the link on serdes devices.
   10136  */
   10137 static void
   10138 wm_serdes_tick(struct wm_softc *sc)
   10139 {
   10140 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10141 	struct mii_data *mii = &sc->sc_mii;
   10142 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10143 	uint32_t reg;
   10144 
   10145 	KASSERT(WM_CORE_LOCKED(sc));
   10146 
   10147 	mii->mii_media_status = IFM_AVALID;
   10148 	mii->mii_media_active = IFM_ETHER;
   10149 
   10150 	/* Check PCS */
   10151 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10152 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   10153 		mii->mii_media_status |= IFM_ACTIVE;
   10154 		sc->sc_tbi_linkup = 1;
   10155 		sc->sc_tbi_serdes_ticks = 0;
   10156 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   10157 		if ((reg & PCS_LSTS_FDX) != 0)
   10158 			mii->mii_media_active |= IFM_FDX;
   10159 		else
   10160 			mii->mii_media_active |= IFM_HDX;
   10161 	} else {
   10162 		mii->mii_media_status |= IFM_NONE;
   10163 		sc->sc_tbi_linkup = 0;
   10164 		    /* If the timer expired, retry autonegotiation */
   10165 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10166 		    && (++sc->sc_tbi_serdes_ticks
   10167 			>= sc->sc_tbi_serdes_anegticks)) {
   10168 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10169 			sc->sc_tbi_serdes_ticks = 0;
   10170 			/* XXX */
   10171 			wm_serdes_mediachange(ifp);
   10172 		}
   10173 	}
   10174 
   10175 	wm_tbi_serdes_set_linkled(sc);
   10176 }
   10177 
   10178 /* SFP related */
   10179 
   10180 static int
   10181 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   10182 {
   10183 	uint32_t i2ccmd;
   10184 	int i;
   10185 
   10186 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10187 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10188 
   10189 	/* Poll the ready bit */
   10190 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10191 		delay(50);
   10192 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10193 		if (i2ccmd & I2CCMD_READY)
   10194 			break;
   10195 	}
   10196 	if ((i2ccmd & I2CCMD_READY) == 0)
   10197 		return -1;
   10198 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10199 		return -1;
   10200 
   10201 	*data = i2ccmd & 0x00ff;
   10202 
   10203 	return 0;
   10204 }
   10205 
   10206 static uint32_t
   10207 wm_sfp_get_media_type(struct wm_softc *sc)
   10208 {
   10209 	uint32_t ctrl_ext;
   10210 	uint8_t val = 0;
   10211 	int timeout = 3;
   10212 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   10213 	int rv = -1;
   10214 
   10215 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10216 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   10217 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   10218 	CSR_WRITE_FLUSH(sc);
   10219 
   10220 	/* Read SFP module data */
   10221 	while (timeout) {
   10222 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   10223 		if (rv == 0)
   10224 			break;
   10225 		delay(100*1000); /* XXX too big */
   10226 		timeout--;
   10227 	}
   10228 	if (rv != 0)
   10229 		goto out;
   10230 	switch (val) {
   10231 	case SFF_SFP_ID_SFF:
   10232 		aprint_normal_dev(sc->sc_dev,
   10233 		    "Module/Connector soldered to board\n");
   10234 		break;
   10235 	case SFF_SFP_ID_SFP:
   10236 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   10237 		break;
   10238 	case SFF_SFP_ID_UNKNOWN:
   10239 		goto out;
   10240 	default:
   10241 		break;
   10242 	}
   10243 
   10244 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   10245 	if (rv != 0) {
   10246 		goto out;
   10247 	}
   10248 
   10249 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   10250 		mediatype = WM_MEDIATYPE_SERDES;
   10251 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   10252 		sc->sc_flags |= WM_F_SGMII;
   10253 		mediatype = WM_MEDIATYPE_COPPER;
   10254 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   10255 		sc->sc_flags |= WM_F_SGMII;
   10256 		mediatype = WM_MEDIATYPE_SERDES;
   10257 	}
   10258 
   10259 out:
   10260 	/* Restore I2C interface setting */
   10261 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10262 
   10263 	return mediatype;
   10264 }
   10265 
   10266 /*
   10267  * NVM related.
   10268  * Microwire, SPI (w/wo EERD) and Flash.
   10269  */
   10270 
   10271 /* Both spi and uwire */
   10272 
   10273 /*
   10274  * wm_eeprom_sendbits:
   10275  *
   10276  *	Send a series of bits to the EEPROM.
   10277  */
   10278 static void
   10279 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   10280 {
   10281 	uint32_t reg;
   10282 	int x;
   10283 
   10284 	reg = CSR_READ(sc, WMREG_EECD);
   10285 
   10286 	for (x = nbits; x > 0; x--) {
   10287 		if (bits & (1U << (x - 1)))
   10288 			reg |= EECD_DI;
   10289 		else
   10290 			reg &= ~EECD_DI;
   10291 		CSR_WRITE(sc, WMREG_EECD, reg);
   10292 		CSR_WRITE_FLUSH(sc);
   10293 		delay(2);
   10294 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10295 		CSR_WRITE_FLUSH(sc);
   10296 		delay(2);
   10297 		CSR_WRITE(sc, WMREG_EECD, reg);
   10298 		CSR_WRITE_FLUSH(sc);
   10299 		delay(2);
   10300 	}
   10301 }
   10302 
   10303 /*
   10304  * wm_eeprom_recvbits:
   10305  *
   10306  *	Receive a series of bits from the EEPROM.
   10307  */
   10308 static void
   10309 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   10310 {
   10311 	uint32_t reg, val;
   10312 	int x;
   10313 
   10314 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   10315 
   10316 	val = 0;
   10317 	for (x = nbits; x > 0; x--) {
   10318 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10319 		CSR_WRITE_FLUSH(sc);
   10320 		delay(2);
   10321 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   10322 			val |= (1U << (x - 1));
   10323 		CSR_WRITE(sc, WMREG_EECD, reg);
   10324 		CSR_WRITE_FLUSH(sc);
   10325 		delay(2);
   10326 	}
   10327 	*valp = val;
   10328 }
   10329 
   10330 /* Microwire */
   10331 
   10332 /*
   10333  * wm_nvm_read_uwire:
   10334  *
   10335  *	Read a word from the EEPROM using the MicroWire protocol.
   10336  */
   10337 static int
   10338 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10339 {
   10340 	uint32_t reg, val;
   10341 	int i;
   10342 
   10343 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10344 		device_xname(sc->sc_dev), __func__));
   10345 
   10346 	for (i = 0; i < wordcnt; i++) {
   10347 		/* Clear SK and DI. */
   10348 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   10349 		CSR_WRITE(sc, WMREG_EECD, reg);
   10350 
   10351 		/*
   10352 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   10353 		 * and Xen.
   10354 		 *
   10355 		 * We use this workaround only for 82540 because qemu's
   10356 		 * e1000 act as 82540.
   10357 		 */
   10358 		if (sc->sc_type == WM_T_82540) {
   10359 			reg |= EECD_SK;
   10360 			CSR_WRITE(sc, WMREG_EECD, reg);
   10361 			reg &= ~EECD_SK;
   10362 			CSR_WRITE(sc, WMREG_EECD, reg);
   10363 			CSR_WRITE_FLUSH(sc);
   10364 			delay(2);
   10365 		}
   10366 		/* XXX: end of workaround */
   10367 
   10368 		/* Set CHIP SELECT. */
   10369 		reg |= EECD_CS;
   10370 		CSR_WRITE(sc, WMREG_EECD, reg);
   10371 		CSR_WRITE_FLUSH(sc);
   10372 		delay(2);
   10373 
   10374 		/* Shift in the READ command. */
   10375 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   10376 
   10377 		/* Shift in address. */
   10378 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   10379 
   10380 		/* Shift out the data. */
   10381 		wm_eeprom_recvbits(sc, &val, 16);
   10382 		data[i] = val & 0xffff;
   10383 
   10384 		/* Clear CHIP SELECT. */
   10385 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   10386 		CSR_WRITE(sc, WMREG_EECD, reg);
   10387 		CSR_WRITE_FLUSH(sc);
   10388 		delay(2);
   10389 	}
   10390 
   10391 	return 0;
   10392 }
   10393 
   10394 /* SPI */
   10395 
   10396 /*
   10397  * Set SPI and FLASH related information from the EECD register.
   10398  * For 82541 and 82547, the word size is taken from EEPROM.
   10399  */
   10400 static int
   10401 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   10402 {
   10403 	int size;
   10404 	uint32_t reg;
   10405 	uint16_t data;
   10406 
   10407 	reg = CSR_READ(sc, WMREG_EECD);
   10408 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   10409 
   10410 	/* Read the size of NVM from EECD by default */
   10411 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10412 	switch (sc->sc_type) {
   10413 	case WM_T_82541:
   10414 	case WM_T_82541_2:
   10415 	case WM_T_82547:
   10416 	case WM_T_82547_2:
   10417 		/* Set dummy value to access EEPROM */
   10418 		sc->sc_nvm_wordsize = 64;
   10419 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   10420 		reg = data;
   10421 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10422 		if (size == 0)
   10423 			size = 6; /* 64 word size */
   10424 		else
   10425 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   10426 		break;
   10427 	case WM_T_80003:
   10428 	case WM_T_82571:
   10429 	case WM_T_82572:
   10430 	case WM_T_82573: /* SPI case */
   10431 	case WM_T_82574: /* SPI case */
   10432 	case WM_T_82583: /* SPI case */
   10433 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10434 		if (size > 14)
   10435 			size = 14;
   10436 		break;
   10437 	case WM_T_82575:
   10438 	case WM_T_82576:
   10439 	case WM_T_82580:
   10440 	case WM_T_I350:
   10441 	case WM_T_I354:
   10442 	case WM_T_I210:
   10443 	case WM_T_I211:
   10444 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10445 		if (size > 15)
   10446 			size = 15;
   10447 		break;
   10448 	default:
   10449 		aprint_error_dev(sc->sc_dev,
   10450 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   10451 		return -1;
   10452 		break;
   10453 	}
   10454 
   10455 	sc->sc_nvm_wordsize = 1 << size;
   10456 
   10457 	return 0;
   10458 }
   10459 
   10460 /*
   10461  * wm_nvm_ready_spi:
   10462  *
   10463  *	Wait for a SPI EEPROM to be ready for commands.
   10464  */
   10465 static int
   10466 wm_nvm_ready_spi(struct wm_softc *sc)
   10467 {
   10468 	uint32_t val;
   10469 	int usec;
   10470 
   10471 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10472 		device_xname(sc->sc_dev), __func__));
   10473 
   10474 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10475 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10476 		wm_eeprom_recvbits(sc, &val, 8);
   10477 		if ((val & SPI_SR_RDY) == 0)
   10478 			break;
   10479 	}
   10480 	if (usec >= SPI_MAX_RETRIES) {
   10481 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10482 		return 1;
   10483 	}
   10484 	return 0;
   10485 }
   10486 
   10487 /*
   10488  * wm_nvm_read_spi:
   10489  *
   10490  *	Read a work from the EEPROM using the SPI protocol.
   10491  */
   10492 static int
   10493 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10494 {
   10495 	uint32_t reg, val;
   10496 	int i;
   10497 	uint8_t opc;
   10498 
   10499 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10500 		device_xname(sc->sc_dev), __func__));
   10501 
   10502 	/* Clear SK and CS. */
   10503 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10504 	CSR_WRITE(sc, WMREG_EECD, reg);
   10505 	CSR_WRITE_FLUSH(sc);
   10506 	delay(2);
   10507 
   10508 	if (wm_nvm_ready_spi(sc))
   10509 		return 1;
   10510 
   10511 	/* Toggle CS to flush commands. */
   10512 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10513 	CSR_WRITE_FLUSH(sc);
   10514 	delay(2);
   10515 	CSR_WRITE(sc, WMREG_EECD, reg);
   10516 	CSR_WRITE_FLUSH(sc);
   10517 	delay(2);
   10518 
   10519 	opc = SPI_OPC_READ;
   10520 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10521 		opc |= SPI_OPC_A8;
   10522 
   10523 	wm_eeprom_sendbits(sc, opc, 8);
   10524 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10525 
   10526 	for (i = 0; i < wordcnt; i++) {
   10527 		wm_eeprom_recvbits(sc, &val, 16);
   10528 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10529 	}
   10530 
   10531 	/* Raise CS and clear SK. */
   10532 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10533 	CSR_WRITE(sc, WMREG_EECD, reg);
   10534 	CSR_WRITE_FLUSH(sc);
   10535 	delay(2);
   10536 
   10537 	return 0;
   10538 }
   10539 
   10540 /* Using with EERD */
   10541 
   10542 static int
   10543 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10544 {
   10545 	uint32_t attempts = 100000;
   10546 	uint32_t i, reg = 0;
   10547 	int32_t done = -1;
   10548 
   10549 	for (i = 0; i < attempts; i++) {
   10550 		reg = CSR_READ(sc, rw);
   10551 
   10552 		if (reg & EERD_DONE) {
   10553 			done = 0;
   10554 			break;
   10555 		}
   10556 		delay(5);
   10557 	}
   10558 
   10559 	return done;
   10560 }
   10561 
   10562 static int
   10563 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   10564     uint16_t *data)
   10565 {
   10566 	int i, eerd = 0;
   10567 	int error = 0;
   10568 
   10569 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10570 		device_xname(sc->sc_dev), __func__));
   10571 
   10572 	for (i = 0; i < wordcnt; i++) {
   10573 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   10574 
   10575 		CSR_WRITE(sc, WMREG_EERD, eerd);
   10576 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   10577 		if (error != 0)
   10578 			break;
   10579 
   10580 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   10581 	}
   10582 
   10583 	return error;
   10584 }
   10585 
   10586 /* Flash */
   10587 
   10588 static int
   10589 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10590 {
   10591 	uint32_t eecd;
   10592 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10593 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10594 	uint8_t sig_byte = 0;
   10595 
   10596 	switch (sc->sc_type) {
   10597 	case WM_T_PCH_SPT:
   10598 		/*
   10599 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   10600 		 * sector valid bits from the NVM.
   10601 		 */
   10602 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   10603 		if ((*bank == 0) || (*bank == 1)) {
   10604 			aprint_error_dev(sc->sc_dev,
   10605 			    "%s: no valid NVM bank present (%u)\n", __func__,
   10606 				*bank);
   10607 			return -1;
   10608 		} else {
   10609 			*bank = *bank - 2;
   10610 			return 0;
   10611 		}
   10612 	case WM_T_ICH8:
   10613 	case WM_T_ICH9:
   10614 		eecd = CSR_READ(sc, WMREG_EECD);
   10615 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10616 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10617 			return 0;
   10618 		}
   10619 		/* FALLTHROUGH */
   10620 	default:
   10621 		/* Default to 0 */
   10622 		*bank = 0;
   10623 
   10624 		/* Check bank 0 */
   10625 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10626 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10627 			*bank = 0;
   10628 			return 0;
   10629 		}
   10630 
   10631 		/* Check bank 1 */
   10632 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10633 		    &sig_byte);
   10634 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10635 			*bank = 1;
   10636 			return 0;
   10637 		}
   10638 	}
   10639 
   10640 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10641 		device_xname(sc->sc_dev)));
   10642 	return -1;
   10643 }
   10644 
   10645 /******************************************************************************
   10646  * This function does initial flash setup so that a new read/write/erase cycle
   10647  * can be started.
   10648  *
   10649  * sc - The pointer to the hw structure
   10650  ****************************************************************************/
   10651 static int32_t
   10652 wm_ich8_cycle_init(struct wm_softc *sc)
   10653 {
   10654 	uint16_t hsfsts;
   10655 	int32_t error = 1;
   10656 	int32_t i     = 0;
   10657 
   10658 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10659 
   10660 	/* May be check the Flash Des Valid bit in Hw status */
   10661 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10662 		return error;
   10663 	}
   10664 
   10665 	/* Clear FCERR in Hw status by writing 1 */
   10666 	/* Clear DAEL in Hw status by writing a 1 */
   10667 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10668 
   10669 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10670 
   10671 	/*
   10672 	 * Either we should have a hardware SPI cycle in progress bit to check
   10673 	 * against, in order to start a new cycle or FDONE bit should be
   10674 	 * changed in the hardware so that it is 1 after harware reset, which
   10675 	 * can then be used as an indication whether a cycle is in progress or
   10676 	 * has been completed .. we should also have some software semaphore
   10677 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10678 	 * threads access to those bits can be sequentiallized or a way so that
   10679 	 * 2 threads dont start the cycle at the same time
   10680 	 */
   10681 
   10682 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10683 		/*
   10684 		 * There is no cycle running at present, so we can start a
   10685 		 * cycle
   10686 		 */
   10687 
   10688 		/* Begin by setting Flash Cycle Done. */
   10689 		hsfsts |= HSFSTS_DONE;
   10690 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10691 		error = 0;
   10692 	} else {
   10693 		/*
   10694 		 * otherwise poll for sometime so the current cycle has a
   10695 		 * chance to end before giving up.
   10696 		 */
   10697 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10698 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10699 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10700 				error = 0;
   10701 				break;
   10702 			}
   10703 			delay(1);
   10704 		}
   10705 		if (error == 0) {
   10706 			/*
   10707 			 * Successful in waiting for previous cycle to timeout,
   10708 			 * now set the Flash Cycle Done.
   10709 			 */
   10710 			hsfsts |= HSFSTS_DONE;
   10711 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10712 		}
   10713 	}
   10714 	return error;
   10715 }
   10716 
   10717 /******************************************************************************
   10718  * This function starts a flash cycle and waits for its completion
   10719  *
   10720  * sc - The pointer to the hw structure
   10721  ****************************************************************************/
   10722 static int32_t
   10723 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10724 {
   10725 	uint16_t hsflctl;
   10726 	uint16_t hsfsts;
   10727 	int32_t error = 1;
   10728 	uint32_t i = 0;
   10729 
   10730 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10731 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10732 	hsflctl |= HSFCTL_GO;
   10733 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10734 
   10735 	/* Wait till FDONE bit is set to 1 */
   10736 	do {
   10737 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10738 		if (hsfsts & HSFSTS_DONE)
   10739 			break;
   10740 		delay(1);
   10741 		i++;
   10742 	} while (i < timeout);
   10743 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10744 		error = 0;
   10745 
   10746 	return error;
   10747 }
   10748 
   10749 /******************************************************************************
   10750  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   10751  *
   10752  * sc - The pointer to the hw structure
   10753  * index - The index of the byte or word to read.
   10754  * size - Size of data to read, 1=byte 2=word, 4=dword
   10755  * data - Pointer to the word to store the value read.
   10756  *****************************************************************************/
   10757 static int32_t
   10758 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10759     uint32_t size, uint32_t *data)
   10760 {
   10761 	uint16_t hsfsts;
   10762 	uint16_t hsflctl;
   10763 	uint32_t flash_linear_address;
   10764 	uint32_t flash_data = 0;
   10765 	int32_t error = 1;
   10766 	int32_t count = 0;
   10767 
   10768 	if (size < 1  || size > 4 || data == 0x0 ||
   10769 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10770 		return error;
   10771 
   10772 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10773 	    sc->sc_ich8_flash_base;
   10774 
   10775 	do {
   10776 		delay(1);
   10777 		/* Steps */
   10778 		error = wm_ich8_cycle_init(sc);
   10779 		if (error)
   10780 			break;
   10781 
   10782 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10783 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10784 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10785 		    & HSFCTL_BCOUNT_MASK;
   10786 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10787 		if (sc->sc_type == WM_T_PCH_SPT) {
   10788 			/*
   10789 			 * In SPT, This register is in Lan memory space, not
   10790 			 * flash. Therefore, only 32 bit access is supported.
   10791 			 */
   10792 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   10793 			    (uint32_t)hsflctl);
   10794 		} else
   10795 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10796 
   10797 		/*
   10798 		 * Write the last 24 bits of index into Flash Linear address
   10799 		 * field in Flash Address
   10800 		 */
   10801 		/* TODO: TBD maybe check the index against the size of flash */
   10802 
   10803 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10804 
   10805 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10806 
   10807 		/*
   10808 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10809 		 * the whole sequence a few more times, else read in (shift in)
   10810 		 * the Flash Data0, the order is least significant byte first
   10811 		 * msb to lsb
   10812 		 */
   10813 		if (error == 0) {
   10814 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10815 			if (size == 1)
   10816 				*data = (uint8_t)(flash_data & 0x000000FF);
   10817 			else if (size == 2)
   10818 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10819 			else if (size == 4)
   10820 				*data = (uint32_t)flash_data;
   10821 			break;
   10822 		} else {
   10823 			/*
   10824 			 * If we've gotten here, then things are probably
   10825 			 * completely hosed, but if the error condition is
   10826 			 * detected, it won't hurt to give it another try...
   10827 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10828 			 */
   10829 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10830 			if (hsfsts & HSFSTS_ERR) {
   10831 				/* Repeat for some time before giving up. */
   10832 				continue;
   10833 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10834 				break;
   10835 		}
   10836 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10837 
   10838 	return error;
   10839 }
   10840 
   10841 /******************************************************************************
   10842  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10843  *
   10844  * sc - pointer to wm_hw structure
   10845  * index - The index of the byte to read.
   10846  * data - Pointer to a byte to store the value read.
   10847  *****************************************************************************/
   10848 static int32_t
   10849 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10850 {
   10851 	int32_t status;
   10852 	uint32_t word = 0;
   10853 
   10854 	status = wm_read_ich8_data(sc, index, 1, &word);
   10855 	if (status == 0)
   10856 		*data = (uint8_t)word;
   10857 	else
   10858 		*data = 0;
   10859 
   10860 	return status;
   10861 }
   10862 
   10863 /******************************************************************************
   10864  * Reads a word from the NVM using the ICH8 flash access registers.
   10865  *
   10866  * sc - pointer to wm_hw structure
   10867  * index - The starting byte index of the word to read.
   10868  * data - Pointer to a word to store the value read.
   10869  *****************************************************************************/
   10870 static int32_t
   10871 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10872 {
   10873 	int32_t status;
   10874 	uint32_t word = 0;
   10875 
   10876 	status = wm_read_ich8_data(sc, index, 2, &word);
   10877 	if (status == 0)
   10878 		*data = (uint16_t)word;
   10879 	else
   10880 		*data = 0;
   10881 
   10882 	return status;
   10883 }
   10884 
   10885 /******************************************************************************
   10886  * Reads a dword from the NVM using the ICH8 flash access registers.
   10887  *
   10888  * sc - pointer to wm_hw structure
   10889  * index - The starting byte index of the word to read.
   10890  * data - Pointer to a word to store the value read.
   10891  *****************************************************************************/
   10892 static int32_t
   10893 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   10894 {
   10895 	int32_t status;
   10896 
   10897 	status = wm_read_ich8_data(sc, index, 4, data);
   10898 	return status;
   10899 }
   10900 
   10901 /******************************************************************************
   10902  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10903  * register.
   10904  *
   10905  * sc - Struct containing variables accessed by shared code
   10906  * offset - offset of word in the EEPROM to read
   10907  * data - word read from the EEPROM
   10908  * words - number of words to read
   10909  *****************************************************************************/
   10910 static int
   10911 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10912 {
   10913 	int32_t  error = 0;
   10914 	uint32_t flash_bank = 0;
   10915 	uint32_t act_offset = 0;
   10916 	uint32_t bank_offset = 0;
   10917 	uint16_t word = 0;
   10918 	uint16_t i = 0;
   10919 
   10920 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10921 		device_xname(sc->sc_dev), __func__));
   10922 
   10923 	/*
   10924 	 * We need to know which is the valid flash bank.  In the event
   10925 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10926 	 * managing flash_bank.  So it cannot be trusted and needs
   10927 	 * to be updated with each read.
   10928 	 */
   10929 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10930 	if (error) {
   10931 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10932 			device_xname(sc->sc_dev)));
   10933 		flash_bank = 0;
   10934 	}
   10935 
   10936 	/*
   10937 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10938 	 * size
   10939 	 */
   10940 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10941 
   10942 	error = wm_get_swfwhw_semaphore(sc);
   10943 	if (error) {
   10944 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10945 		    __func__);
   10946 		return error;
   10947 	}
   10948 
   10949 	for (i = 0; i < words; i++) {
   10950 		/* The NVM part needs a byte offset, hence * 2 */
   10951 		act_offset = bank_offset + ((offset + i) * 2);
   10952 		error = wm_read_ich8_word(sc, act_offset, &word);
   10953 		if (error) {
   10954 			aprint_error_dev(sc->sc_dev,
   10955 			    "%s: failed to read NVM\n", __func__);
   10956 			break;
   10957 		}
   10958 		data[i] = word;
   10959 	}
   10960 
   10961 	wm_put_swfwhw_semaphore(sc);
   10962 	return error;
   10963 }
   10964 
   10965 /******************************************************************************
   10966  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   10967  * register.
   10968  *
   10969  * sc - Struct containing variables accessed by shared code
   10970  * offset - offset of word in the EEPROM to read
   10971  * data - word read from the EEPROM
   10972  * words - number of words to read
   10973  *****************************************************************************/
   10974 static int
   10975 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10976 {
   10977 	int32_t  error = 0;
   10978 	uint32_t flash_bank = 0;
   10979 	uint32_t act_offset = 0;
   10980 	uint32_t bank_offset = 0;
   10981 	uint32_t dword = 0;
   10982 	uint16_t i = 0;
   10983 
   10984 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10985 		device_xname(sc->sc_dev), __func__));
   10986 
   10987 	/*
   10988 	 * We need to know which is the valid flash bank.  In the event
   10989 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10990 	 * managing flash_bank.  So it cannot be trusted and needs
   10991 	 * to be updated with each read.
   10992 	 */
   10993 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10994 	if (error) {
   10995 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10996 			device_xname(sc->sc_dev)));
   10997 		flash_bank = 0;
   10998 	}
   10999 
   11000 	/*
   11001 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11002 	 * size
   11003 	 */
   11004 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11005 
   11006 	error = wm_get_swfwhw_semaphore(sc);
   11007 	if (error) {
   11008 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11009 		    __func__);
   11010 		return error;
   11011 	}
   11012 
   11013 	for (i = 0; i < words; i++) {
   11014 		/* The NVM part needs a byte offset, hence * 2 */
   11015 		act_offset = bank_offset + ((offset + i) * 2);
   11016 		/* but we must read dword aligned, so mask ... */
   11017 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11018 		if (error) {
   11019 			aprint_error_dev(sc->sc_dev,
   11020 			    "%s: failed to read NVM\n", __func__);
   11021 			break;
   11022 		}
   11023 		/* ... and pick out low or high word */
   11024 		if ((act_offset & 0x2) == 0)
   11025 			data[i] = (uint16_t)(dword & 0xFFFF);
   11026 		else
   11027 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11028 	}
   11029 
   11030 	wm_put_swfwhw_semaphore(sc);
   11031 	return error;
   11032 }
   11033 
   11034 /* iNVM */
   11035 
   11036 static int
   11037 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11038 {
   11039 	int32_t  rv = 0;
   11040 	uint32_t invm_dword;
   11041 	uint16_t i;
   11042 	uint8_t record_type, word_address;
   11043 
   11044 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11045 		device_xname(sc->sc_dev), __func__));
   11046 
   11047 	for (i = 0; i < INVM_SIZE; i++) {
   11048 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11049 		/* Get record type */
   11050 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11051 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11052 			break;
   11053 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11054 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11055 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11056 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11057 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11058 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11059 			if (word_address == address) {
   11060 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11061 				rv = 0;
   11062 				break;
   11063 			}
   11064 		}
   11065 	}
   11066 
   11067 	return rv;
   11068 }
   11069 
   11070 static int
   11071 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11072 {
   11073 	int rv = 0;
   11074 	int i;
   11075 
   11076 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11077 		device_xname(sc->sc_dev), __func__));
   11078 
   11079 	for (i = 0; i < words; i++) {
   11080 		switch (offset + i) {
   11081 		case NVM_OFF_MACADDR:
   11082 		case NVM_OFF_MACADDR1:
   11083 		case NVM_OFF_MACADDR2:
   11084 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11085 			if (rv != 0) {
   11086 				data[i] = 0xffff;
   11087 				rv = -1;
   11088 			}
   11089 			break;
   11090 		case NVM_OFF_CFG2:
   11091 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11092 			if (rv != 0) {
   11093 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11094 				rv = 0;
   11095 			}
   11096 			break;
   11097 		case NVM_OFF_CFG4:
   11098 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11099 			if (rv != 0) {
   11100 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11101 				rv = 0;
   11102 			}
   11103 			break;
   11104 		case NVM_OFF_LED_1_CFG:
   11105 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11106 			if (rv != 0) {
   11107 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11108 				rv = 0;
   11109 			}
   11110 			break;
   11111 		case NVM_OFF_LED_0_2_CFG:
   11112 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11113 			if (rv != 0) {
   11114 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11115 				rv = 0;
   11116 			}
   11117 			break;
   11118 		case NVM_OFF_ID_LED_SETTINGS:
   11119 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11120 			if (rv != 0) {
   11121 				*data = ID_LED_RESERVED_FFFF;
   11122 				rv = 0;
   11123 			}
   11124 			break;
   11125 		default:
   11126 			DPRINTF(WM_DEBUG_NVM,
   11127 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11128 			*data = NVM_RESERVED_WORD;
   11129 			break;
   11130 		}
   11131 	}
   11132 
   11133 	return rv;
   11134 }
   11135 
   11136 /* Lock, detecting NVM type, validate checksum, version and read */
   11137 
   11138 /*
   11139  * wm_nvm_acquire:
   11140  *
   11141  *	Perform the EEPROM handshake required on some chips.
   11142  */
   11143 static int
   11144 wm_nvm_acquire(struct wm_softc *sc)
   11145 {
   11146 	uint32_t reg;
   11147 	int x;
   11148 	int ret = 0;
   11149 
   11150 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11151 		device_xname(sc->sc_dev), __func__));
   11152 
   11153 	if (sc->sc_type >= WM_T_ICH8) {
   11154 		ret = wm_get_nvm_ich8lan(sc);
   11155 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   11156 		ret = wm_get_swfwhw_semaphore(sc);
   11157 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   11158 		/* This will also do wm_get_swsm_semaphore() if needed */
   11159 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   11160 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11161 		ret = wm_get_swsm_semaphore(sc);
   11162 	}
   11163 
   11164 	if (ret) {
   11165 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11166 			__func__);
   11167 		return 1;
   11168 	}
   11169 
   11170 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11171 		reg = CSR_READ(sc, WMREG_EECD);
   11172 
   11173 		/* Request EEPROM access. */
   11174 		reg |= EECD_EE_REQ;
   11175 		CSR_WRITE(sc, WMREG_EECD, reg);
   11176 
   11177 		/* ..and wait for it to be granted. */
   11178 		for (x = 0; x < 1000; x++) {
   11179 			reg = CSR_READ(sc, WMREG_EECD);
   11180 			if (reg & EECD_EE_GNT)
   11181 				break;
   11182 			delay(5);
   11183 		}
   11184 		if ((reg & EECD_EE_GNT) == 0) {
   11185 			aprint_error_dev(sc->sc_dev,
   11186 			    "could not acquire EEPROM GNT\n");
   11187 			reg &= ~EECD_EE_REQ;
   11188 			CSR_WRITE(sc, WMREG_EECD, reg);
   11189 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11190 				wm_put_swfwhw_semaphore(sc);
   11191 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   11192 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11193 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11194 				wm_put_swsm_semaphore(sc);
   11195 			return 1;
   11196 		}
   11197 	}
   11198 
   11199 	return 0;
   11200 }
   11201 
   11202 /*
   11203  * wm_nvm_release:
   11204  *
   11205  *	Release the EEPROM mutex.
   11206  */
   11207 static void
   11208 wm_nvm_release(struct wm_softc *sc)
   11209 {
   11210 	uint32_t reg;
   11211 
   11212 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11213 		device_xname(sc->sc_dev), __func__));
   11214 
   11215 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11216 		reg = CSR_READ(sc, WMREG_EECD);
   11217 		reg &= ~EECD_EE_REQ;
   11218 		CSR_WRITE(sc, WMREG_EECD, reg);
   11219 	}
   11220 
   11221 	if (sc->sc_type >= WM_T_ICH8) {
   11222 		wm_put_nvm_ich8lan(sc);
   11223 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11224 		wm_put_swfwhw_semaphore(sc);
   11225 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   11226 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11227 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11228 		wm_put_swsm_semaphore(sc);
   11229 }
   11230 
   11231 static int
   11232 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   11233 {
   11234 	uint32_t eecd = 0;
   11235 
   11236 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   11237 	    || sc->sc_type == WM_T_82583) {
   11238 		eecd = CSR_READ(sc, WMREG_EECD);
   11239 
   11240 		/* Isolate bits 15 & 16 */
   11241 		eecd = ((eecd >> 15) & 0x03);
   11242 
   11243 		/* If both bits are set, device is Flash type */
   11244 		if (eecd == 0x03)
   11245 			return 0;
   11246 	}
   11247 	return 1;
   11248 }
   11249 
   11250 static int
   11251 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   11252 {
   11253 	uint32_t eec;
   11254 
   11255 	eec = CSR_READ(sc, WMREG_EEC);
   11256 	if ((eec & EEC_FLASH_DETECTED) != 0)
   11257 		return 1;
   11258 
   11259 	return 0;
   11260 }
   11261 
   11262 /*
   11263  * wm_nvm_validate_checksum
   11264  *
   11265  * The checksum is defined as the sum of the first 64 (16 bit) words.
   11266  */
   11267 static int
   11268 wm_nvm_validate_checksum(struct wm_softc *sc)
   11269 {
   11270 	uint16_t checksum;
   11271 	uint16_t eeprom_data;
   11272 #ifdef WM_DEBUG
   11273 	uint16_t csum_wordaddr, valid_checksum;
   11274 #endif
   11275 	int i;
   11276 
   11277 	checksum = 0;
   11278 
   11279 	/* Don't check for I211 */
   11280 	if (sc->sc_type == WM_T_I211)
   11281 		return 0;
   11282 
   11283 #ifdef WM_DEBUG
   11284 	if (sc->sc_type == WM_T_PCH_LPT) {
   11285 		csum_wordaddr = NVM_OFF_COMPAT;
   11286 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   11287 	} else {
   11288 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   11289 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   11290 	}
   11291 
   11292 	/* Dump EEPROM image for debug */
   11293 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11294 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11295 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   11296 		/* XXX PCH_SPT? */
   11297 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   11298 		if ((eeprom_data & valid_checksum) == 0) {
   11299 			DPRINTF(WM_DEBUG_NVM,
   11300 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   11301 				device_xname(sc->sc_dev), eeprom_data,
   11302 				    valid_checksum));
   11303 		}
   11304 	}
   11305 
   11306 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   11307 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   11308 		for (i = 0; i < NVM_SIZE; i++) {
   11309 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11310 				printf("XXXX ");
   11311 			else
   11312 				printf("%04hx ", eeprom_data);
   11313 			if (i % 8 == 7)
   11314 				printf("\n");
   11315 		}
   11316 	}
   11317 
   11318 #endif /* WM_DEBUG */
   11319 
   11320 	for (i = 0; i < NVM_SIZE; i++) {
   11321 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11322 			return 1;
   11323 		checksum += eeprom_data;
   11324 	}
   11325 
   11326 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   11327 #ifdef WM_DEBUG
   11328 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   11329 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   11330 #endif
   11331 	}
   11332 
   11333 	return 0;
   11334 }
   11335 
   11336 static void
   11337 wm_nvm_version_invm(struct wm_softc *sc)
   11338 {
   11339 	uint32_t dword;
   11340 
   11341 	/*
   11342 	 * Linux's code to decode version is very strange, so we don't
   11343 	 * obey that algorithm and just use word 61 as the document.
   11344 	 * Perhaps it's not perfect though...
   11345 	 *
   11346 	 * Example:
   11347 	 *
   11348 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   11349 	 */
   11350 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   11351 	dword = __SHIFTOUT(dword, INVM_VER_1);
   11352 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   11353 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   11354 }
   11355 
   11356 static void
   11357 wm_nvm_version(struct wm_softc *sc)
   11358 {
   11359 	uint16_t major, minor, build, patch;
   11360 	uint16_t uid0, uid1;
   11361 	uint16_t nvm_data;
   11362 	uint16_t off;
   11363 	bool check_version = false;
   11364 	bool check_optionrom = false;
   11365 	bool have_build = false;
   11366 
   11367 	/*
   11368 	 * Version format:
   11369 	 *
   11370 	 * XYYZ
   11371 	 * X0YZ
   11372 	 * X0YY
   11373 	 *
   11374 	 * Example:
   11375 	 *
   11376 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   11377 	 *	82571	0x50a6	5.10.6?
   11378 	 *	82572	0x506a	5.6.10?
   11379 	 *	82572EI	0x5069	5.6.9?
   11380 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   11381 	 *		0x2013	2.1.3?
   11382 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   11383 	 */
   11384 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   11385 	switch (sc->sc_type) {
   11386 	case WM_T_82571:
   11387 	case WM_T_82572:
   11388 	case WM_T_82574:
   11389 	case WM_T_82583:
   11390 		check_version = true;
   11391 		check_optionrom = true;
   11392 		have_build = true;
   11393 		break;
   11394 	case WM_T_82575:
   11395 	case WM_T_82576:
   11396 	case WM_T_82580:
   11397 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   11398 			check_version = true;
   11399 		break;
   11400 	case WM_T_I211:
   11401 		wm_nvm_version_invm(sc);
   11402 		goto printver;
   11403 	case WM_T_I210:
   11404 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   11405 			wm_nvm_version_invm(sc);
   11406 			goto printver;
   11407 		}
   11408 		/* FALLTHROUGH */
   11409 	case WM_T_I350:
   11410 	case WM_T_I354:
   11411 		check_version = true;
   11412 		check_optionrom = true;
   11413 		break;
   11414 	default:
   11415 		return;
   11416 	}
   11417 	if (check_version) {
   11418 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   11419 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   11420 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   11421 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   11422 			build = nvm_data & NVM_BUILD_MASK;
   11423 			have_build = true;
   11424 		} else
   11425 			minor = nvm_data & 0x00ff;
   11426 
   11427 		/* Decimal */
   11428 		minor = (minor / 16) * 10 + (minor % 16);
   11429 		sc->sc_nvm_ver_major = major;
   11430 		sc->sc_nvm_ver_minor = minor;
   11431 
   11432 printver:
   11433 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   11434 		    sc->sc_nvm_ver_minor);
   11435 		if (have_build) {
   11436 			sc->sc_nvm_ver_build = build;
   11437 			aprint_verbose(".%d", build);
   11438 		}
   11439 	}
   11440 	if (check_optionrom) {
   11441 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   11442 		/* Option ROM Version */
   11443 		if ((off != 0x0000) && (off != 0xffff)) {
   11444 			off += NVM_COMBO_VER_OFF;
   11445 			wm_nvm_read(sc, off + 1, 1, &uid1);
   11446 			wm_nvm_read(sc, off, 1, &uid0);
   11447 			if ((uid0 != 0) && (uid0 != 0xffff)
   11448 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   11449 				/* 16bits */
   11450 				major = uid0 >> 8;
   11451 				build = (uid0 << 8) | (uid1 >> 8);
   11452 				patch = uid1 & 0x00ff;
   11453 				aprint_verbose(", option ROM Version %d.%d.%d",
   11454 				    major, build, patch);
   11455 			}
   11456 		}
   11457 	}
   11458 
   11459 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   11460 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   11461 }
   11462 
   11463 /*
   11464  * wm_nvm_read:
   11465  *
   11466  *	Read data from the serial EEPROM.
   11467  */
   11468 static int
   11469 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11470 {
   11471 	int rv;
   11472 
   11473 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11474 		device_xname(sc->sc_dev), __func__));
   11475 
   11476 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   11477 		return 1;
   11478 
   11479 	if (wm_nvm_acquire(sc))
   11480 		return 1;
   11481 
   11482 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11483 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11484 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   11485 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   11486 	else if (sc->sc_type == WM_T_PCH_SPT)
   11487 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11488 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11489 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11490 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11491 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11492 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11493 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11494 	else
   11495 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11496 
   11497 	wm_nvm_release(sc);
   11498 	return rv;
   11499 }
   11500 
   11501 /*
   11502  * Hardware semaphores.
   11503  * Very complexed...
   11504  */
   11505 
   11506 static int
   11507 wm_get_null(struct wm_softc *sc)
   11508 {
   11509 
   11510 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11511 		device_xname(sc->sc_dev), __func__));
   11512 	return 0;
   11513 }
   11514 
   11515 static void
   11516 wm_put_null(struct wm_softc *sc)
   11517 {
   11518 
   11519 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11520 		device_xname(sc->sc_dev), __func__));
   11521 	return;
   11522 }
   11523 
   11524 /*
   11525  * Get hardware semaphore.
   11526  * Same as e1000_get_hw_semaphore_generic()
   11527  */
   11528 static int
   11529 wm_get_swsm_semaphore(struct wm_softc *sc)
   11530 {
   11531 	int32_t timeout;
   11532 	uint32_t swsm;
   11533 
   11534 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11535 		device_xname(sc->sc_dev), __func__));
   11536 	KASSERT(sc->sc_nvm_wordsize > 0);
   11537 
   11538 	/* Get the SW semaphore. */
   11539 	timeout = sc->sc_nvm_wordsize + 1;
   11540 	while (timeout) {
   11541 		swsm = CSR_READ(sc, WMREG_SWSM);
   11542 
   11543 		if ((swsm & SWSM_SMBI) == 0)
   11544 			break;
   11545 
   11546 		delay(50);
   11547 		timeout--;
   11548 	}
   11549 
   11550 	if (timeout == 0) {
   11551 		aprint_error_dev(sc->sc_dev,
   11552 		    "could not acquire SWSM SMBI\n");
   11553 		return 1;
   11554 	}
   11555 
   11556 	/* Get the FW semaphore. */
   11557 	timeout = sc->sc_nvm_wordsize + 1;
   11558 	while (timeout) {
   11559 		swsm = CSR_READ(sc, WMREG_SWSM);
   11560 		swsm |= SWSM_SWESMBI;
   11561 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   11562 		/* If we managed to set the bit we got the semaphore. */
   11563 		swsm = CSR_READ(sc, WMREG_SWSM);
   11564 		if (swsm & SWSM_SWESMBI)
   11565 			break;
   11566 
   11567 		delay(50);
   11568 		timeout--;
   11569 	}
   11570 
   11571 	if (timeout == 0) {
   11572 		aprint_error_dev(sc->sc_dev,
   11573 		    "could not acquire SWSM SWESMBI\n");
   11574 		/* Release semaphores */
   11575 		wm_put_swsm_semaphore(sc);
   11576 		return 1;
   11577 	}
   11578 	return 0;
   11579 }
   11580 
   11581 /*
   11582  * Put hardware semaphore.
   11583  * Same as e1000_put_hw_semaphore_generic()
   11584  */
   11585 static void
   11586 wm_put_swsm_semaphore(struct wm_softc *sc)
   11587 {
   11588 	uint32_t swsm;
   11589 
   11590 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11591 		device_xname(sc->sc_dev), __func__));
   11592 
   11593 	swsm = CSR_READ(sc, WMREG_SWSM);
   11594 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   11595 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   11596 }
   11597 
   11598 /*
   11599  * Get SW/FW semaphore.
   11600  * Same as e1000_acquire_swfw_sync_82575().
   11601  */
   11602 static int
   11603 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11604 {
   11605 	uint32_t swfw_sync;
   11606 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   11607 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   11608 	int timeout = 200;
   11609 
   11610 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11611 		device_xname(sc->sc_dev), __func__));
   11612 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11613 
   11614 	for (timeout = 0; timeout < 200; timeout++) {
   11615 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11616 			if (wm_get_swsm_semaphore(sc)) {
   11617 				aprint_error_dev(sc->sc_dev,
   11618 				    "%s: failed to get semaphore\n",
   11619 				    __func__);
   11620 				return 1;
   11621 			}
   11622 		}
   11623 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11624 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   11625 			swfw_sync |= swmask;
   11626 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11627 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   11628 				wm_put_swsm_semaphore(sc);
   11629 			return 0;
   11630 		}
   11631 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   11632 			wm_put_swsm_semaphore(sc);
   11633 		delay(5000);
   11634 	}
   11635 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   11636 	    device_xname(sc->sc_dev), mask, swfw_sync);
   11637 	return 1;
   11638 }
   11639 
   11640 static void
   11641 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11642 {
   11643 	uint32_t swfw_sync;
   11644 
   11645 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11646 		device_xname(sc->sc_dev), __func__));
   11647 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11648 
   11649 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11650 		while (wm_get_swsm_semaphore(sc) != 0)
   11651 			continue;
   11652 	}
   11653 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11654 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   11655 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11656 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   11657 		wm_put_swsm_semaphore(sc);
   11658 }
   11659 
   11660 static int
   11661 wm_get_phy_82575(struct wm_softc *sc)
   11662 {
   11663 
   11664 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11665 		device_xname(sc->sc_dev), __func__));
   11666 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11667 }
   11668 
   11669 static void
   11670 wm_put_phy_82575(struct wm_softc *sc)
   11671 {
   11672 
   11673 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11674 		device_xname(sc->sc_dev), __func__));
   11675 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11676 }
   11677 
   11678 static int
   11679 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   11680 {
   11681 	uint32_t ext_ctrl;
   11682 	int timeout = 200;
   11683 
   11684 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11685 		device_xname(sc->sc_dev), __func__));
   11686 
   11687 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11688 	for (timeout = 0; timeout < 200; timeout++) {
   11689 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11690 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11691 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11692 
   11693 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11694 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11695 			return 0;
   11696 		delay(5000);
   11697 	}
   11698 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   11699 	    device_xname(sc->sc_dev), ext_ctrl);
   11700 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11701 	return 1;
   11702 }
   11703 
   11704 static void
   11705 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   11706 {
   11707 	uint32_t ext_ctrl;
   11708 
   11709 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11710 		device_xname(sc->sc_dev), __func__));
   11711 
   11712 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11713 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11714 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11715 
   11716 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11717 }
   11718 
   11719 static int
   11720 wm_get_swflag_ich8lan(struct wm_softc *sc)
   11721 {
   11722 	uint32_t ext_ctrl;
   11723 	int timeout;
   11724 
   11725 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11726 		device_xname(sc->sc_dev), __func__));
   11727 	mutex_enter(sc->sc_ich_phymtx);
   11728 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   11729 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11730 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   11731 			break;
   11732 		delay(1000);
   11733 	}
   11734 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   11735 		printf("%s: SW has already locked the resource\n",
   11736 		    device_xname(sc->sc_dev));
   11737 		goto out;
   11738 	}
   11739 
   11740 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11741 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11742 	for (timeout = 0; timeout < 1000; timeout++) {
   11743 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11744 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11745 			break;
   11746 		delay(1000);
   11747 	}
   11748 	if (timeout >= 1000) {
   11749 		printf("%s: failed to acquire semaphore\n",
   11750 		    device_xname(sc->sc_dev));
   11751 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11752 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11753 		goto out;
   11754 	}
   11755 	return 0;
   11756 
   11757 out:
   11758 	mutex_exit(sc->sc_ich_phymtx);
   11759 	return 1;
   11760 }
   11761 
   11762 static void
   11763 wm_put_swflag_ich8lan(struct wm_softc *sc)
   11764 {
   11765 	uint32_t ext_ctrl;
   11766 
   11767 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11768 		device_xname(sc->sc_dev), __func__));
   11769 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11770 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   11771 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11772 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11773 	} else {
   11774 		printf("%s: Semaphore unexpectedly released\n",
   11775 		    device_xname(sc->sc_dev));
   11776 	}
   11777 
   11778 	mutex_exit(sc->sc_ich_phymtx);
   11779 }
   11780 
   11781 static int
   11782 wm_get_nvm_ich8lan(struct wm_softc *sc)
   11783 {
   11784 
   11785 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11786 		device_xname(sc->sc_dev), __func__));
   11787 	mutex_enter(sc->sc_ich_nvmmtx);
   11788 
   11789 	return 0;
   11790 }
   11791 
   11792 static void
   11793 wm_put_nvm_ich8lan(struct wm_softc *sc)
   11794 {
   11795 
   11796 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11797 		device_xname(sc->sc_dev), __func__));
   11798 	mutex_exit(sc->sc_ich_nvmmtx);
   11799 }
   11800 
   11801 static int
   11802 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   11803 {
   11804 	int i = 0;
   11805 	uint32_t reg;
   11806 
   11807 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11808 		device_xname(sc->sc_dev), __func__));
   11809 
   11810 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11811 	do {
   11812 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   11813 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   11814 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11815 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   11816 			break;
   11817 		delay(2*1000);
   11818 		i++;
   11819 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   11820 
   11821 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   11822 		wm_put_hw_semaphore_82573(sc);
   11823 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   11824 		    device_xname(sc->sc_dev));
   11825 		return -1;
   11826 	}
   11827 
   11828 	return 0;
   11829 }
   11830 
   11831 static void
   11832 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   11833 {
   11834 	uint32_t reg;
   11835 
   11836 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11837 		device_xname(sc->sc_dev), __func__));
   11838 
   11839 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11840 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11841 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11842 }
   11843 
   11844 /*
   11845  * Management mode and power management related subroutines.
   11846  * BMC, AMT, suspend/resume and EEE.
   11847  */
   11848 
   11849 #ifdef WM_WOL
   11850 static int
   11851 wm_check_mng_mode(struct wm_softc *sc)
   11852 {
   11853 	int rv;
   11854 
   11855 	switch (sc->sc_type) {
   11856 	case WM_T_ICH8:
   11857 	case WM_T_ICH9:
   11858 	case WM_T_ICH10:
   11859 	case WM_T_PCH:
   11860 	case WM_T_PCH2:
   11861 	case WM_T_PCH_LPT:
   11862 	case WM_T_PCH_SPT:
   11863 		rv = wm_check_mng_mode_ich8lan(sc);
   11864 		break;
   11865 	case WM_T_82574:
   11866 	case WM_T_82583:
   11867 		rv = wm_check_mng_mode_82574(sc);
   11868 		break;
   11869 	case WM_T_82571:
   11870 	case WM_T_82572:
   11871 	case WM_T_82573:
   11872 	case WM_T_80003:
   11873 		rv = wm_check_mng_mode_generic(sc);
   11874 		break;
   11875 	default:
   11876 		/* noting to do */
   11877 		rv = 0;
   11878 		break;
   11879 	}
   11880 
   11881 	return rv;
   11882 }
   11883 
   11884 static int
   11885 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   11886 {
   11887 	uint32_t fwsm;
   11888 
   11889 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11890 
   11891 	if (((fwsm & FWSM_FW_VALID) != 0)
   11892 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11893 		return 1;
   11894 
   11895 	return 0;
   11896 }
   11897 
   11898 static int
   11899 wm_check_mng_mode_82574(struct wm_softc *sc)
   11900 {
   11901 	uint16_t data;
   11902 
   11903 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11904 
   11905 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   11906 		return 1;
   11907 
   11908 	return 0;
   11909 }
   11910 
   11911 static int
   11912 wm_check_mng_mode_generic(struct wm_softc *sc)
   11913 {
   11914 	uint32_t fwsm;
   11915 
   11916 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11917 
   11918 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   11919 		return 1;
   11920 
   11921 	return 0;
   11922 }
   11923 #endif /* WM_WOL */
   11924 
   11925 static int
   11926 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11927 {
   11928 	uint32_t manc, fwsm, factps;
   11929 
   11930 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11931 		return 0;
   11932 
   11933 	manc = CSR_READ(sc, WMREG_MANC);
   11934 
   11935 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11936 		device_xname(sc->sc_dev), manc));
   11937 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11938 		return 0;
   11939 
   11940 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11941 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11942 		factps = CSR_READ(sc, WMREG_FACTPS);
   11943 		if (((factps & FACTPS_MNGCG) == 0)
   11944 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11945 			return 1;
   11946 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11947 		uint16_t data;
   11948 
   11949 		factps = CSR_READ(sc, WMREG_FACTPS);
   11950 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11951 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11952 			device_xname(sc->sc_dev), factps, data));
   11953 		if (((factps & FACTPS_MNGCG) == 0)
   11954 		    && ((data & NVM_CFG2_MNGM_MASK)
   11955 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11956 			return 1;
   11957 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11958 	    && ((manc & MANC_ASF_EN) == 0))
   11959 		return 1;
   11960 
   11961 	return 0;
   11962 }
   11963 
   11964 static bool
   11965 wm_phy_resetisblocked(struct wm_softc *sc)
   11966 {
   11967 	bool blocked = false;
   11968 	uint32_t reg;
   11969 	int i = 0;
   11970 
   11971 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11972 		device_xname(sc->sc_dev), __func__));
   11973 
   11974 	switch (sc->sc_type) {
   11975 	case WM_T_ICH8:
   11976 	case WM_T_ICH9:
   11977 	case WM_T_ICH10:
   11978 	case WM_T_PCH:
   11979 	case WM_T_PCH2:
   11980 	case WM_T_PCH_LPT:
   11981 	case WM_T_PCH_SPT:
   11982 		do {
   11983 			reg = CSR_READ(sc, WMREG_FWSM);
   11984 			if ((reg & FWSM_RSPCIPHY) == 0) {
   11985 				blocked = true;
   11986 				delay(10*1000);
   11987 				continue;
   11988 			}
   11989 			blocked = false;
   11990 		} while (blocked && (i++ < 30));
   11991 		return blocked;
   11992 		break;
   11993 	case WM_T_82571:
   11994 	case WM_T_82572:
   11995 	case WM_T_82573:
   11996 	case WM_T_82574:
   11997 	case WM_T_82583:
   11998 	case WM_T_80003:
   11999 		reg = CSR_READ(sc, WMREG_MANC);
   12000 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12001 			return true;
   12002 		else
   12003 			return false;
   12004 		break;
   12005 	default:
   12006 		/* no problem */
   12007 		break;
   12008 	}
   12009 
   12010 	return false;
   12011 }
   12012 
   12013 static void
   12014 wm_get_hw_control(struct wm_softc *sc)
   12015 {
   12016 	uint32_t reg;
   12017 
   12018 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12019 		device_xname(sc->sc_dev), __func__));
   12020 
   12021 	if (sc->sc_type == WM_T_82573) {
   12022 		reg = CSR_READ(sc, WMREG_SWSM);
   12023 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12024 	} else if (sc->sc_type >= WM_T_82571) {
   12025 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12026 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12027 	}
   12028 }
   12029 
   12030 static void
   12031 wm_release_hw_control(struct wm_softc *sc)
   12032 {
   12033 	uint32_t reg;
   12034 
   12035 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12036 		device_xname(sc->sc_dev), __func__));
   12037 
   12038 	if (sc->sc_type == WM_T_82573) {
   12039 		reg = CSR_READ(sc, WMREG_SWSM);
   12040 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12041 	} else if (sc->sc_type >= WM_T_82571) {
   12042 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12043 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12044 	}
   12045 }
   12046 
   12047 static void
   12048 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12049 {
   12050 	uint32_t reg;
   12051 
   12052 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12053 		device_xname(sc->sc_dev), __func__));
   12054 
   12055 	if (sc->sc_type < WM_T_PCH2)
   12056 		return;
   12057 
   12058 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12059 
   12060 	if (gate)
   12061 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12062 	else
   12063 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12064 
   12065 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12066 }
   12067 
   12068 static void
   12069 wm_smbustopci(struct wm_softc *sc)
   12070 {
   12071 	uint32_t fwsm, reg;
   12072 	int rv = 0;
   12073 
   12074 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12075 		device_xname(sc->sc_dev), __func__));
   12076 
   12077 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12078 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12079 
   12080 	/* Disable ULP */
   12081 	wm_ulp_disable(sc);
   12082 
   12083 	/* Acquire PHY semaphore */
   12084 	sc->phy.acquire(sc);
   12085 
   12086 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12087 	switch (sc->sc_type) {
   12088 	case WM_T_PCH_LPT:
   12089 	case WM_T_PCH_SPT:
   12090 		if (wm_phy_is_accessible_pchlan(sc))
   12091 			break;
   12092 
   12093 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12094 		reg |= CTRL_EXT_FORCE_SMBUS;
   12095 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12096 #if 0
   12097 		/* XXX Isn't this required??? */
   12098 		CSR_WRITE_FLUSH(sc);
   12099 #endif
   12100 		delay(50 * 1000);
   12101 		/* FALLTHROUGH */
   12102 	case WM_T_PCH2:
   12103 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12104 			break;
   12105 		/* FALLTHROUGH */
   12106 	case WM_T_PCH:
   12107 		if (sc->sc_type == WM_T_PCH)
   12108 			if ((fwsm & FWSM_FW_VALID) != 0)
   12109 				break;
   12110 
   12111 		if (wm_phy_resetisblocked(sc) == true) {
   12112 			printf("XXX reset is blocked(3)\n");
   12113 			break;
   12114 		}
   12115 
   12116 		wm_toggle_lanphypc_pch_lpt(sc);
   12117 
   12118 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12119 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12120 				break;
   12121 
   12122 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12123 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12124 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12125 
   12126 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12127 				break;
   12128 			rv = -1;
   12129 		}
   12130 		break;
   12131 	default:
   12132 		break;
   12133 	}
   12134 
   12135 	/* Release semaphore */
   12136 	sc->phy.release(sc);
   12137 
   12138 	if (rv == 0) {
   12139 		if (wm_phy_resetisblocked(sc)) {
   12140 			printf("XXX reset is blocked(4)\n");
   12141 			goto out;
   12142 		}
   12143 		wm_reset_phy(sc);
   12144 		if (wm_phy_resetisblocked(sc))
   12145 			printf("XXX reset is blocked(4)\n");
   12146 	}
   12147 
   12148 out:
   12149 	/*
   12150 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   12151 	 */
   12152 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   12153 		delay(10*1000);
   12154 		wm_gate_hw_phy_config_ich8lan(sc, false);
   12155 	}
   12156 }
   12157 
   12158 static void
   12159 wm_init_manageability(struct wm_softc *sc)
   12160 {
   12161 
   12162 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12163 		device_xname(sc->sc_dev), __func__));
   12164 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12165 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   12166 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12167 
   12168 		/* Disable hardware interception of ARP */
   12169 		manc &= ~MANC_ARP_EN;
   12170 
   12171 		/* Enable receiving management packets to the host */
   12172 		if (sc->sc_type >= WM_T_82571) {
   12173 			manc |= MANC_EN_MNG2HOST;
   12174 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   12175 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   12176 		}
   12177 
   12178 		CSR_WRITE(sc, WMREG_MANC, manc);
   12179 	}
   12180 }
   12181 
   12182 static void
   12183 wm_release_manageability(struct wm_softc *sc)
   12184 {
   12185 
   12186 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12187 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12188 
   12189 		manc |= MANC_ARP_EN;
   12190 		if (sc->sc_type >= WM_T_82571)
   12191 			manc &= ~MANC_EN_MNG2HOST;
   12192 
   12193 		CSR_WRITE(sc, WMREG_MANC, manc);
   12194 	}
   12195 }
   12196 
   12197 static void
   12198 wm_get_wakeup(struct wm_softc *sc)
   12199 {
   12200 
   12201 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   12202 	switch (sc->sc_type) {
   12203 	case WM_T_82573:
   12204 	case WM_T_82583:
   12205 		sc->sc_flags |= WM_F_HAS_AMT;
   12206 		/* FALLTHROUGH */
   12207 	case WM_T_80003:
   12208 	case WM_T_82575:
   12209 	case WM_T_82576:
   12210 	case WM_T_82580:
   12211 	case WM_T_I350:
   12212 	case WM_T_I354:
   12213 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   12214 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   12215 		/* FALLTHROUGH */
   12216 	case WM_T_82541:
   12217 	case WM_T_82541_2:
   12218 	case WM_T_82547:
   12219 	case WM_T_82547_2:
   12220 	case WM_T_82571:
   12221 	case WM_T_82572:
   12222 	case WM_T_82574:
   12223 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12224 		break;
   12225 	case WM_T_ICH8:
   12226 	case WM_T_ICH9:
   12227 	case WM_T_ICH10:
   12228 	case WM_T_PCH:
   12229 	case WM_T_PCH2:
   12230 	case WM_T_PCH_LPT:
   12231 	case WM_T_PCH_SPT:
   12232 		sc->sc_flags |= WM_F_HAS_AMT;
   12233 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12234 		break;
   12235 	default:
   12236 		break;
   12237 	}
   12238 
   12239 	/* 1: HAS_MANAGE */
   12240 	if (wm_enable_mng_pass_thru(sc) != 0)
   12241 		sc->sc_flags |= WM_F_HAS_MANAGE;
   12242 
   12243 #ifdef WM_DEBUG
   12244 	printf("\n");
   12245 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   12246 		printf("HAS_AMT,");
   12247 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   12248 		printf("ARC_SUBSYS_VALID,");
   12249 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   12250 		printf("ASF_FIRMWARE_PRES,");
   12251 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   12252 		printf("HAS_MANAGE,");
   12253 	printf("\n");
   12254 #endif
   12255 	/*
   12256 	 * Note that the WOL flags is set after the resetting of the eeprom
   12257 	 * stuff
   12258 	 */
   12259 }
   12260 
   12261 /*
   12262  * Unconfigure Ultra Low Power mode.
   12263  * Only for I217 and newer (see below).
   12264  */
   12265 static void
   12266 wm_ulp_disable(struct wm_softc *sc)
   12267 {
   12268 	uint32_t reg;
   12269 	int i = 0;
   12270 
   12271 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12272 		device_xname(sc->sc_dev), __func__));
   12273 	/* Exclude old devices */
   12274 	if ((sc->sc_type < WM_T_PCH_LPT)
   12275 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   12276 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   12277 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   12278 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   12279 		return;
   12280 
   12281 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   12282 		/* Request ME un-configure ULP mode in the PHY */
   12283 		reg = CSR_READ(sc, WMREG_H2ME);
   12284 		reg &= ~H2ME_ULP;
   12285 		reg |= H2ME_ENFORCE_SETTINGS;
   12286 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12287 
   12288 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   12289 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   12290 			if (i++ == 30) {
   12291 				printf("%s timed out\n", __func__);
   12292 				return;
   12293 			}
   12294 			delay(10 * 1000);
   12295 		}
   12296 		reg = CSR_READ(sc, WMREG_H2ME);
   12297 		reg &= ~H2ME_ENFORCE_SETTINGS;
   12298 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12299 
   12300 		return;
   12301 	}
   12302 
   12303 	/* Acquire semaphore */
   12304 	sc->phy.acquire(sc);
   12305 
   12306 	/* Toggle LANPHYPC */
   12307 	wm_toggle_lanphypc_pch_lpt(sc);
   12308 
   12309 	/* Unforce SMBus mode in PHY */
   12310 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12311 	if (reg == 0x0000 || reg == 0xffff) {
   12312 		uint32_t reg2;
   12313 
   12314 		printf("%s: Force SMBus first.\n", __func__);
   12315 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   12316 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   12317 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   12318 		delay(50 * 1000);
   12319 
   12320 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12321 	}
   12322 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   12323 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   12324 
   12325 	/* Unforce SMBus mode in MAC */
   12326 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12327 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   12328 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12329 
   12330 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   12331 	reg |= HV_PM_CTRL_K1_ENA;
   12332 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   12333 
   12334 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   12335 	reg &= ~(I218_ULP_CONFIG1_IND
   12336 	    | I218_ULP_CONFIG1_STICKY_ULP
   12337 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   12338 	    | I218_ULP_CONFIG1_WOL_HOST
   12339 	    | I218_ULP_CONFIG1_INBAND_EXIT
   12340 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   12341 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   12342 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   12343 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12344 	reg |= I218_ULP_CONFIG1_START;
   12345 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12346 
   12347 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   12348 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   12349 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   12350 
   12351 	/* Release semaphore */
   12352 	sc->phy.release(sc);
   12353 	wm_gmii_reset(sc);
   12354 	delay(50 * 1000);
   12355 }
   12356 
   12357 /* WOL in the newer chipset interfaces (pchlan) */
   12358 static void
   12359 wm_enable_phy_wakeup(struct wm_softc *sc)
   12360 {
   12361 #if 0
   12362 	uint16_t preg;
   12363 
   12364 	/* Copy MAC RARs to PHY RARs */
   12365 
   12366 	/* Copy MAC MTA to PHY MTA */
   12367 
   12368 	/* Configure PHY Rx Control register */
   12369 
   12370 	/* Enable PHY wakeup in MAC register */
   12371 
   12372 	/* Configure and enable PHY wakeup in PHY registers */
   12373 
   12374 	/* Activate PHY wakeup */
   12375 
   12376 	/* XXX */
   12377 #endif
   12378 }
   12379 
   12380 /* Power down workaround on D3 */
   12381 static void
   12382 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   12383 {
   12384 	uint32_t reg;
   12385 	int i;
   12386 
   12387 	for (i = 0; i < 2; i++) {
   12388 		/* Disable link */
   12389 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12390 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12391 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12392 
   12393 		/*
   12394 		 * Call gig speed drop workaround on Gig disable before
   12395 		 * accessing any PHY registers
   12396 		 */
   12397 		if (sc->sc_type == WM_T_ICH8)
   12398 			wm_gig_downshift_workaround_ich8lan(sc);
   12399 
   12400 		/* Write VR power-down enable */
   12401 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12402 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12403 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   12404 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   12405 
   12406 		/* Read it back and test */
   12407 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12408 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12409 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   12410 			break;
   12411 
   12412 		/* Issue PHY reset and repeat at most one more time */
   12413 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   12414 	}
   12415 }
   12416 
   12417 static void
   12418 wm_enable_wakeup(struct wm_softc *sc)
   12419 {
   12420 	uint32_t reg, pmreg;
   12421 	pcireg_t pmode;
   12422 
   12423 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12424 		device_xname(sc->sc_dev), __func__));
   12425 
   12426 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12427 		&pmreg, NULL) == 0)
   12428 		return;
   12429 
   12430 	/* Advertise the wakeup capability */
   12431 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   12432 	    | CTRL_SWDPIN(3));
   12433 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   12434 
   12435 	/* ICH workaround */
   12436 	switch (sc->sc_type) {
   12437 	case WM_T_ICH8:
   12438 	case WM_T_ICH9:
   12439 	case WM_T_ICH10:
   12440 	case WM_T_PCH:
   12441 	case WM_T_PCH2:
   12442 	case WM_T_PCH_LPT:
   12443 	case WM_T_PCH_SPT:
   12444 		/* Disable gig during WOL */
   12445 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12446 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   12447 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12448 		if (sc->sc_type == WM_T_PCH)
   12449 			wm_gmii_reset(sc);
   12450 
   12451 		/* Power down workaround */
   12452 		if (sc->sc_phytype == WMPHY_82577) {
   12453 			struct mii_softc *child;
   12454 
   12455 			/* Assume that the PHY is copper */
   12456 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12457 			if (child->mii_mpd_rev <= 2)
   12458 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   12459 				    (768 << 5) | 25, 0x0444); /* magic num */
   12460 		}
   12461 		break;
   12462 	default:
   12463 		break;
   12464 	}
   12465 
   12466 	/* Keep the laser running on fiber adapters */
   12467 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   12468 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12469 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12470 		reg |= CTRL_EXT_SWDPIN(3);
   12471 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12472 	}
   12473 
   12474 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   12475 #if 0	/* for the multicast packet */
   12476 	reg |= WUFC_MC;
   12477 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   12478 #endif
   12479 
   12480 	if (sc->sc_type >= WM_T_PCH)
   12481 		wm_enable_phy_wakeup(sc);
   12482 	else {
   12483 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   12484 		CSR_WRITE(sc, WMREG_WUFC, reg);
   12485 	}
   12486 
   12487 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12488 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12489 		|| (sc->sc_type == WM_T_PCH2))
   12490 		    && (sc->sc_phytype == WMPHY_IGP_3))
   12491 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   12492 
   12493 	/* Request PME */
   12494 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   12495 #if 0
   12496 	/* Disable WOL */
   12497 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   12498 #else
   12499 	/* For WOL */
   12500 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   12501 #endif
   12502 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   12503 }
   12504 
   12505 /* LPLU */
   12506 
   12507 static void
   12508 wm_lplu_d0_disable(struct wm_softc *sc)
   12509 {
   12510 	uint32_t reg;
   12511 
   12512 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12513 		device_xname(sc->sc_dev), __func__));
   12514 
   12515 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12516 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   12517 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12518 }
   12519 
   12520 static void
   12521 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   12522 {
   12523 	uint32_t reg;
   12524 
   12525 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12526 		device_xname(sc->sc_dev), __func__));
   12527 
   12528 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   12529 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   12530 	reg |= HV_OEM_BITS_ANEGNOW;
   12531 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   12532 }
   12533 
   12534 /* EEE */
   12535 
   12536 static void
   12537 wm_set_eee_i350(struct wm_softc *sc)
   12538 {
   12539 	uint32_t ipcnfg, eeer;
   12540 
   12541 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   12542 	eeer = CSR_READ(sc, WMREG_EEER);
   12543 
   12544 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   12545 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12546 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12547 		    | EEER_LPI_FC);
   12548 	} else {
   12549 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12550 		ipcnfg &= ~IPCNFG_10BASE_TE;
   12551 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12552 		    | EEER_LPI_FC);
   12553 	}
   12554 
   12555 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   12556 	CSR_WRITE(sc, WMREG_EEER, eeer);
   12557 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   12558 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   12559 }
   12560 
   12561 /*
   12562  * Workarounds (mainly PHY related).
   12563  * Basically, PHY's workarounds are in the PHY drivers.
   12564  */
   12565 
   12566 /* Work-around for 82566 Kumeran PCS lock loss */
   12567 static void
   12568 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   12569 {
   12570 #if 0
   12571 	int miistatus, active, i;
   12572 	int reg;
   12573 
   12574 	miistatus = sc->sc_mii.mii_media_status;
   12575 
   12576 	/* If the link is not up, do nothing */
   12577 	if ((miistatus & IFM_ACTIVE) == 0)
   12578 		return;
   12579 
   12580 	active = sc->sc_mii.mii_media_active;
   12581 
   12582 	/* Nothing to do if the link is other than 1Gbps */
   12583 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   12584 		return;
   12585 
   12586 	for (i = 0; i < 10; i++) {
   12587 		/* read twice */
   12588 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12589 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12590 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   12591 			goto out;	/* GOOD! */
   12592 
   12593 		/* Reset the PHY */
   12594 		wm_gmii_reset(sc);
   12595 		delay(5*1000);
   12596 	}
   12597 
   12598 	/* Disable GigE link negotiation */
   12599 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12600 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12601 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12602 
   12603 	/*
   12604 	 * Call gig speed drop workaround on Gig disable before accessing
   12605 	 * any PHY registers.
   12606 	 */
   12607 	wm_gig_downshift_workaround_ich8lan(sc);
   12608 
   12609 out:
   12610 	return;
   12611 #endif
   12612 }
   12613 
   12614 /* WOL from S5 stops working */
   12615 static void
   12616 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   12617 {
   12618 	uint16_t kmrn_reg;
   12619 
   12620 	/* Only for igp3 */
   12621 	if (sc->sc_phytype == WMPHY_IGP_3) {
   12622 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   12623 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   12624 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12625 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   12626 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12627 	}
   12628 }
   12629 
   12630 /*
   12631  * Workaround for pch's PHYs
   12632  * XXX should be moved to new PHY driver?
   12633  */
   12634 static void
   12635 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   12636 {
   12637 
   12638 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12639 		device_xname(sc->sc_dev), __func__));
   12640 	KASSERT(sc->sc_type == WM_T_PCH);
   12641 
   12642 	if (sc->sc_phytype == WMPHY_82577)
   12643 		wm_set_mdio_slow_mode_hv(sc);
   12644 
   12645 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   12646 
   12647 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   12648 
   12649 	/* 82578 */
   12650 	if (sc->sc_phytype == WMPHY_82578) {
   12651 		struct mii_softc *child;
   12652 
   12653 		/*
   12654 		 * Return registers to default by doing a soft reset then
   12655 		 * writing 0x3140 to the control register
   12656 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   12657 		 */
   12658 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12659 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   12660 			PHY_RESET(child);
   12661 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   12662 			    0x3140);
   12663 		}
   12664 	}
   12665 
   12666 	/* Select page 0 */
   12667 	sc->phy.acquire(sc);
   12668 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   12669 	sc->phy.release(sc);
   12670 
   12671 	/*
   12672 	 * Configure the K1 Si workaround during phy reset assuming there is
   12673 	 * link so that it disables K1 if link is in 1Gbps.
   12674 	 */
   12675 	wm_k1_gig_workaround_hv(sc, 1);
   12676 }
   12677 
   12678 static void
   12679 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   12680 {
   12681 
   12682 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12683 		device_xname(sc->sc_dev), __func__));
   12684 	KASSERT(sc->sc_type == WM_T_PCH2);
   12685 
   12686 	wm_set_mdio_slow_mode_hv(sc);
   12687 }
   12688 
   12689 static int
   12690 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   12691 {
   12692 	int k1_enable = sc->sc_nvm_k1_enabled;
   12693 
   12694 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12695 		device_xname(sc->sc_dev), __func__));
   12696 
   12697 	if (sc->phy.acquire(sc) != 0)
   12698 		return -1;
   12699 
   12700 	if (link) {
   12701 		k1_enable = 0;
   12702 
   12703 		/* Link stall fix for link up */
   12704 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   12705 	} else {
   12706 		/* Link stall fix for link down */
   12707 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   12708 	}
   12709 
   12710 	wm_configure_k1_ich8lan(sc, k1_enable);
   12711 	sc->phy.release(sc);
   12712 
   12713 	return 0;
   12714 }
   12715 
   12716 static void
   12717 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   12718 {
   12719 	uint32_t reg;
   12720 
   12721 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   12722 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   12723 	    reg | HV_KMRN_MDIO_SLOW);
   12724 }
   12725 
   12726 static void
   12727 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   12728 {
   12729 	uint32_t ctrl, ctrl_ext, tmp;
   12730 	uint16_t kmrn_reg;
   12731 
   12732 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   12733 
   12734 	if (k1_enable)
   12735 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   12736 	else
   12737 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   12738 
   12739 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   12740 
   12741 	delay(20);
   12742 
   12743 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12744 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12745 
   12746 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   12747 	tmp |= CTRL_FRCSPD;
   12748 
   12749 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   12750 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   12751 	CSR_WRITE_FLUSH(sc);
   12752 	delay(20);
   12753 
   12754 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   12755 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12756 	CSR_WRITE_FLUSH(sc);
   12757 	delay(20);
   12758 }
   12759 
   12760 /* special case - for 82575 - need to do manual init ... */
   12761 static void
   12762 wm_reset_init_script_82575(struct wm_softc *sc)
   12763 {
   12764 	/*
   12765 	 * remark: this is untested code - we have no board without EEPROM
   12766 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   12767 	 */
   12768 
   12769 	/* SerDes configuration via SERDESCTRL */
   12770 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   12771 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   12772 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   12773 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   12774 
   12775 	/* CCM configuration via CCMCTL register */
   12776 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   12777 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   12778 
   12779 	/* PCIe lanes configuration */
   12780 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   12781 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   12782 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   12783 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   12784 
   12785 	/* PCIe PLL Configuration */
   12786 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   12787 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   12788 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   12789 }
   12790 
   12791 static void
   12792 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   12793 {
   12794 	uint32_t reg;
   12795 	uint16_t nvmword;
   12796 	int rv;
   12797 
   12798 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   12799 		return;
   12800 
   12801 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   12802 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   12803 	if (rv != 0) {
   12804 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   12805 		    __func__);
   12806 		return;
   12807 	}
   12808 
   12809 	reg = CSR_READ(sc, WMREG_MDICNFG);
   12810 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   12811 		reg |= MDICNFG_DEST;
   12812 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   12813 		reg |= MDICNFG_COM_MDIO;
   12814 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12815 }
   12816 
   12817 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   12818 
   12819 static bool
   12820 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   12821 {
   12822 	int i;
   12823 	uint32_t reg;
   12824 	uint16_t id1, id2;
   12825 
   12826 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12827 		device_xname(sc->sc_dev), __func__));
   12828 	id1 = id2 = 0xffff;
   12829 	for (i = 0; i < 2; i++) {
   12830 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   12831 		if (MII_INVALIDID(id1))
   12832 			continue;
   12833 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   12834 		if (MII_INVALIDID(id2))
   12835 			continue;
   12836 		break;
   12837 	}
   12838 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   12839 		goto out;
   12840 	}
   12841 
   12842 	if (sc->sc_type < WM_T_PCH_LPT) {
   12843 		sc->phy.release(sc);
   12844 		wm_set_mdio_slow_mode_hv(sc);
   12845 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   12846 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   12847 		sc->phy.acquire(sc);
   12848 	}
   12849 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   12850 		printf("XXX return with false\n");
   12851 		return false;
   12852 	}
   12853 out:
   12854 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   12855 		/* Only unforce SMBus if ME is not active */
   12856 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   12857 			/* Unforce SMBus mode in PHY */
   12858 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   12859 			    CV_SMB_CTRL);
   12860 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   12861 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   12862 			    CV_SMB_CTRL, reg);
   12863 
   12864 			/* Unforce SMBus mode in MAC */
   12865 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12866 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12867 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12868 		}
   12869 	}
   12870 	return true;
   12871 }
   12872 
   12873 static void
   12874 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   12875 {
   12876 	uint32_t reg;
   12877 	int i;
   12878 
   12879 	/* Set PHY Config Counter to 50msec */
   12880 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   12881 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   12882 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   12883 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   12884 
   12885 	/* Toggle LANPHYPC */
   12886 	reg = CSR_READ(sc, WMREG_CTRL);
   12887 	reg |= CTRL_LANPHYPC_OVERRIDE;
   12888 	reg &= ~CTRL_LANPHYPC_VALUE;
   12889 	CSR_WRITE(sc, WMREG_CTRL, reg);
   12890 	CSR_WRITE_FLUSH(sc);
   12891 	delay(1000);
   12892 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   12893 	CSR_WRITE(sc, WMREG_CTRL, reg);
   12894 	CSR_WRITE_FLUSH(sc);
   12895 
   12896 	if (sc->sc_type < WM_T_PCH_LPT)
   12897 		delay(50 * 1000);
   12898 	else {
   12899 		i = 20;
   12900 
   12901 		do {
   12902 			delay(5 * 1000);
   12903 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   12904 		    && i--);
   12905 
   12906 		delay(30 * 1000);
   12907 	}
   12908 }
   12909 
   12910 static int
   12911 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   12912 {
   12913 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   12914 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   12915 	uint32_t rxa;
   12916 	uint16_t scale = 0, lat_enc = 0;
   12917 	int64_t lat_ns, value;
   12918 
   12919 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12920 		device_xname(sc->sc_dev), __func__));
   12921 
   12922 	if (link) {
   12923 		pcireg_t preg;
   12924 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   12925 
   12926 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   12927 
   12928 		/*
   12929 		 * Determine the maximum latency tolerated by the device.
   12930 		 *
   12931 		 * Per the PCIe spec, the tolerated latencies are encoded as
   12932 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   12933 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   12934 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   12935 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   12936 		 */
   12937 		lat_ns = ((int64_t)rxa * 1024 -
   12938 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   12939 		if (lat_ns < 0)
   12940 			lat_ns = 0;
   12941 		else {
   12942 			uint32_t status;
   12943 			uint16_t speed;
   12944 
   12945 			status = CSR_READ(sc, WMREG_STATUS);
   12946 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   12947 			case STATUS_SPEED_10:
   12948 				speed = 10;
   12949 				break;
   12950 			case STATUS_SPEED_100:
   12951 				speed = 100;
   12952 				break;
   12953 			case STATUS_SPEED_1000:
   12954 				speed = 1000;
   12955 				break;
   12956 			default:
   12957 				printf("%s: Unknown speed (status = %08x)\n",
   12958 				    device_xname(sc->sc_dev), status);
   12959 				return -1;
   12960 			}
   12961 			lat_ns /= speed;
   12962 		}
   12963 		value = lat_ns;
   12964 
   12965 		while (value > LTRV_VALUE) {
   12966 			scale ++;
   12967 			value = howmany(value, __BIT(5));
   12968 		}
   12969 		if (scale > LTRV_SCALE_MAX) {
   12970 			printf("%s: Invalid LTR latency scale %d\n",
   12971 			    device_xname(sc->sc_dev), scale);
   12972 			return -1;
   12973 		}
   12974 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   12975 
   12976 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   12977 		    WM_PCI_LTR_CAP_LPT);
   12978 		max_snoop = preg & 0xffff;
   12979 		max_nosnoop = preg >> 16;
   12980 
   12981 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   12982 
   12983 		if (lat_enc > max_ltr_enc) {
   12984 			lat_enc = max_ltr_enc;
   12985 		}
   12986 	}
   12987 	/* Snoop and No-Snoop latencies the same */
   12988 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   12989 	CSR_WRITE(sc, WMREG_LTRV, reg);
   12990 
   12991 	return 0;
   12992 }
   12993 
   12994 /*
   12995  * I210 Errata 25 and I211 Errata 10
   12996  * Slow System Clock.
   12997  */
   12998 static void
   12999 wm_pll_workaround_i210(struct wm_softc *sc)
   13000 {
   13001 	uint32_t mdicnfg, wuc;
   13002 	uint32_t reg;
   13003 	pcireg_t pcireg;
   13004 	uint32_t pmreg;
   13005 	uint16_t nvmword, tmp_nvmword;
   13006 	int phyval;
   13007 	bool wa_done = false;
   13008 	int i;
   13009 
   13010 	/* Save WUC and MDICNFG registers */
   13011 	wuc = CSR_READ(sc, WMREG_WUC);
   13012 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13013 
   13014 	reg = mdicnfg & ~MDICNFG_DEST;
   13015 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13016 
   13017 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13018 		nvmword = INVM_DEFAULT_AL;
   13019 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13020 
   13021 	/* Get Power Management cap offset */
   13022 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13023 		&pmreg, NULL) == 0)
   13024 		return;
   13025 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13026 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13027 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13028 
   13029 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13030 			break; /* OK */
   13031 		}
   13032 
   13033 		wa_done = true;
   13034 		/* Directly reset the internal PHY */
   13035 		reg = CSR_READ(sc, WMREG_CTRL);
   13036 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13037 
   13038 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13039 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13040 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13041 
   13042 		CSR_WRITE(sc, WMREG_WUC, 0);
   13043 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13044 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13045 
   13046 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13047 		    pmreg + PCI_PMCSR);
   13048 		pcireg |= PCI_PMCSR_STATE_D3;
   13049 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13050 		    pmreg + PCI_PMCSR, pcireg);
   13051 		delay(1000);
   13052 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13053 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13054 		    pmreg + PCI_PMCSR, pcireg);
   13055 
   13056 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13057 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13058 
   13059 		/* Restore WUC register */
   13060 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13061 	}
   13062 
   13063 	/* Restore MDICNFG setting */
   13064 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13065 	if (wa_done)
   13066 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13067 }
   13068